Check for relocation overflows in write_reloc_value.
[ksplice.git] / kmodsrc / ksplice.c
bloba70b263e5b347fea6b2a2b7be65550932e9b2bbe
1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
16 * 02110-1301, USA.
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
22 #include <linux/debugfs.h>
23 #else /* CONFIG_DEBUG_FS */
24 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
25 #endif /* CONFIG_DEBUG_FS */
26 #include <linux/errno.h>
27 #include <linux/kallsyms.h>
28 #include <linux/kobject.h>
29 #include <linux/kthread.h>
30 #include <linux/pagemap.h>
31 #include <linux/sched.h>
32 #include <linux/stop_machine.h>
33 #include <linux/sysfs.h>
34 #include <linux/time.h>
35 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
36 #include <linux/uaccess.h>
37 #else /* LINUX_VERSION_CODE < */
38 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
39 #include <asm/uaccess.h>
40 #endif /* LINUX_VERSION_CODE */
41 #include <linux/vmalloc.h>
42 #ifdef KSPLICE_STANDALONE
43 #include "ksplice.h"
44 #else /* !KSPLICE_STANDALONE */
45 #include <linux/ksplice.h>
46 #endif /* KSPLICE_STANDALONE */
47 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
48 #include <asm/alternative.h>
49 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
51 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
52 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
53 #define bool _Bool
54 #define false 0
55 #define true 1
56 #endif /* LINUX_VERSION_CODE */
58 #if BITS_PER_LONG == 32
59 #define ADDR "08lx"
60 #elif BITS_PER_LONG == 64
61 #define ADDR "016lx"
62 #endif /* BITS_PER_LONG */
64 enum stage {
65 STAGE_PREPARING, STAGE_APPLIED, STAGE_REVERSED
68 enum run_pre_mode {
69 RUN_PRE_INITIAL, RUN_PRE_DEBUG, RUN_PRE_FINAL
72 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
73 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
74 #define __bitwise__
75 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
76 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
77 #define __bitwise__ __bitwise
78 #endif
80 typedef int __bitwise__ abort_t;
82 #define OK ((__force abort_t) 0)
83 #define NO_MATCH ((__force abort_t) 1)
84 #define BAD_SYSTEM_MAP ((__force abort_t) 2)
85 #define CODE_BUSY ((__force abort_t) 3)
86 #define MODULE_BUSY ((__force abort_t) 4)
87 #define OUT_OF_MEMORY ((__force abort_t) 5)
88 #define FAILED_TO_FIND ((__force abort_t) 6)
89 #define ALREADY_REVERSED ((__force abort_t) 7)
90 #define MISSING_EXPORT ((__force abort_t) 8)
91 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 9)
92 #define UNEXPECTED ((__force abort_t) 10)
94 struct update_bundle {
95 const char *kid;
96 const char *name;
97 struct kobject kobj;
98 enum stage stage;
99 abort_t abort_cause;
100 int debug;
101 #ifdef CONFIG_DEBUG_FS
102 struct debugfs_blob_wrapper debug_blob;
103 struct dentry *debugfs_dentry;
104 #else /* !CONFIG_DEBUG_FS */
105 bool debug_continue_line;
106 #endif /* CONFIG_DEBUG_FS */
107 struct list_head packs;
108 struct list_head conflicts;
109 struct list_head list;
112 struct conflict {
113 const char *process_name;
114 pid_t pid;
115 struct list_head stack;
116 struct list_head list;
119 struct conflict_frame {
120 unsigned long addr;
121 int has_conflict;
122 const char *label;
123 struct list_head list;
126 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
127 /* Old kernels don't have debugfs_create_blob */
128 struct debugfs_blob_wrapper {
129 void *data;
130 unsigned long size;
132 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
134 struct reloc_nameval {
135 struct list_head list;
136 const char *label;
137 unsigned long val;
138 enum { NOVAL, TEMP, VAL } status;
141 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
142 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
144 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
145 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
146 static int virtual_address_mapped(unsigned long addr)
148 char retval;
149 return probe_kernel_address(addr, retval) != -EFAULT;
151 #else /* LINUX_VERSION_CODE < */
152 static int virtual_address_mapped(unsigned long addr);
153 #endif /* LINUX_VERSION_CODE */
155 static long probe_kernel_read(void *dst, void *src, size_t size)
157 if (!virtual_address_mapped((unsigned long)src) ||
158 !virtual_address_mapped((unsigned long)src + size))
159 return -EFAULT;
161 memcpy(dst, src, size);
162 return 0;
164 #endif /* LINUX_VERSION_CODE */
166 static struct reloc_nameval *find_nameval(struct module_pack *pack,
167 const char *label);
168 static abort_t create_nameval(struct module_pack *pack, const char *label,
169 unsigned long val, int status);
170 static abort_t lookup_reloc(struct module_pack *pack, unsigned long addr,
171 const struct ksplice_reloc **relocp);
172 static abort_t handle_reloc(struct module_pack *pack,
173 const struct ksplice_reloc *r,
174 unsigned long run_addr, enum run_pre_mode mode);
176 struct safety_record {
177 struct list_head list;
178 const char *label;
179 unsigned long addr;
180 unsigned long size;
181 bool first_byte_safe;
184 struct candidate_val {
185 struct list_head list;
186 unsigned long val;
189 static bool singular(struct list_head *list)
191 return !list_empty(list) && list->next->next == list;
194 static int __attribute__((format(printf, 2, 3)))
195 _ksdebug(struct update_bundle *bundle, const char *fmt, ...);
196 #ifdef CONFIG_DEBUG_FS
197 static abort_t init_debug_buf(struct update_bundle *bundle);
198 static void clear_debug_buf(struct update_bundle *bundle);
199 #else /* !CONFIG_DEBUG_FS */
200 static inline abort_t init_debug_buf(struct update_bundle *bundle)
202 return OK;
205 static inline void clear_debug_buf(struct update_bundle *bundle)
207 return;
209 #endif /* CONFIG_DEBUG_FS */
211 #define ksdebug(pack, fmt, ...) \
212 _ksdebug(pack->bundle, fmt, ## __VA_ARGS__)
213 #define failed_to_find(pack, sym_name) \
214 ksdebug(pack, "Failed to find symbol %s at " \
215 "%s:%d\n", sym_name, __FILE__, __LINE__)
217 static inline void print_abort(struct module_pack *pack, const char *str)
219 ksdebug(pack, "Aborted. (%s)\n", str);
222 static LIST_HEAD(update_bundles);
223 #ifdef KSPLICE_STANDALONE
224 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
225 extern struct list_head ksplice_module_list;
226 #else /* !CONFIG_KSPLICE */
227 LIST_HEAD(ksplice_module_list);
228 #endif /* CONFIG_KSPLICE */
229 #else /* !KSPLICE_STANDALONE */
230 LIST_HEAD(ksplice_module_list);
231 EXPORT_SYMBOL_GPL(ksplice_module_list);
232 #endif /* KSPLICE_STANDALONE */
234 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
235 /* Old kernels do not have kcalloc
236 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
238 static inline void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
240 char *mem;
241 if (n != 0 && size > ULONG_MAX / n)
242 return NULL;
243 mem = kmalloc(n * size, flags);
244 if (mem)
245 memset(mem, 0, n * size);
246 return mem;
248 #endif /* LINUX_VERSION_CODE */
250 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
251 /* Old kernels do not have kstrdup
252 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
254 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
256 size_t len;
257 char *buf;
259 if (!s)
260 return NULL;
262 len = strlen(s) + 1;
263 buf = kmalloc(len, gfp);
264 if (buf)
265 memcpy(buf, s, len);
266 return buf;
268 #endif /* LINUX_VERSION_CODE */
270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
271 /* Old kernels use semaphore instead of mutex
272 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
274 #define mutex semaphore
275 #define mutex_lock down
276 #define mutex_unlock up
277 #endif /* LINUX_VERSION_CODE */
279 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
280 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
281 static char * __attribute_used__
282 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
284 unsigned int len;
285 char *p, dummy[1];
286 va_list aq;
288 va_copy(aq, ap);
289 len = vsnprintf(dummy, 0, fmt, aq);
290 va_end(aq);
292 p = kmalloc(len + 1, gfp);
293 if (!p)
294 return NULL;
296 vsnprintf(p, len + 1, fmt, ap);
298 return p;
300 #endif
302 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
303 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
304 static char * __attribute__((format (printf, 2, 3)))
305 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
307 va_list ap;
308 char *p;
310 va_start(ap, fmt);
311 p = kvasprintf(gfp, fmt, ap);
312 va_end(ap);
314 return p;
316 #endif
318 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
319 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
320 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
322 char *tail;
323 unsigned long val;
324 size_t len;
326 *res = 0;
327 len = strlen(cp);
328 if (len == 0)
329 return -EINVAL;
331 val = simple_strtoul(cp, &tail, base);
332 if ((*tail == '\0') ||
333 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
334 *res = val;
335 return 0;
338 return -EINVAL;
340 #endif
342 #ifndef task_thread_info
343 #define task_thread_info(task) (task)->thread_info
344 #endif /* !task_thread_info */
346 #ifdef KSPLICE_STANDALONE
348 static int bootstrapped = 0;
350 #ifdef CONFIG_KALLSYMS
351 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
352 extern u8 kallsyms_names[];
353 #endif /* CONFIG_KALLSYMS */
355 /* defined by ksplice-create */
356 extern const struct ksplice_reloc ksplice_init_relocs[],
357 ksplice_init_relocs_end[];
359 /* Obtained via System.map */
360 extern struct list_head modules;
361 extern struct mutex module_mutex;
362 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
363 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
364 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
365 #endif /* LINUX_VERSION_CODE */
366 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
367 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
368 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
369 #endif /* LINUX_VERSION_CODE */
370 extern const struct kernel_symbol __start___ksymtab[];
371 extern const struct kernel_symbol __stop___ksymtab[];
372 extern const unsigned long __start___kcrctab[];
373 extern const struct kernel_symbol __start___ksymtab_gpl[];
374 extern const struct kernel_symbol __stop___ksymtab_gpl[];
375 extern const unsigned long __start___kcrctab_gpl[];
376 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
377 extern const struct kernel_symbol __start___ksymtab_unused[];
378 extern const struct kernel_symbol __stop___ksymtab_unused[];
379 extern const unsigned long __start___kcrctab_unused[];
380 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
381 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
382 extern const unsigned long __start___kcrctab_unused_gpl[];
383 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
384 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
385 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
386 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
387 extern const unsigned long __start___kcrctab_gpl_future[];
388 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
390 #endif /* KSPLICE_STANDALONE */
392 static abort_t apply_relocs(struct module_pack *pack,
393 const struct ksplice_reloc *relocs,
394 const struct ksplice_reloc *relocs_end);
395 static abort_t apply_reloc(struct module_pack *pack,
396 const struct ksplice_reloc *r);
397 static abort_t read_reloc_value(struct module_pack *pack,
398 const struct ksplice_reloc *r,
399 unsigned long addr, unsigned long *valp);
400 static abort_t write_reloc_value(struct module_pack *pack,
401 const struct ksplice_reloc *r,
402 unsigned long sym_addr);
403 static abort_t add_system_map_candidates(struct module_pack *pack,
404 const struct ksplice_symbol *symbol,
405 struct list_head *vals);
406 static abort_t compute_address(struct module_pack *pack,
407 const struct ksplice_symbol *ksym,
408 struct list_head *vals);
410 struct accumulate_struct {
411 const char *desired_name;
412 struct list_head *vals;
415 #ifdef CONFIG_KALLSYMS
416 static int accumulate_matching_names(void *data, const char *sym_name,
417 unsigned long sym_val);
418 static abort_t kernel_lookup(const char *name, struct list_head *vals);
419 static abort_t other_module_lookup(struct module_pack *pack, const char *name,
420 struct list_head *vals);
421 #ifdef KSPLICE_STANDALONE
422 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
423 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off,
424 char *result);
425 #endif /* LINUX_VERSION_CODE */
426 #endif /* KSPLICE_STANDALONE */
427 #endif /* CONFIG_KALLSYMS */
428 static abort_t exported_symbol_lookup(const char *name, struct list_head *vals);
429 static abort_t new_export_lookup(struct update_bundle *bundle,
430 const char *name, struct list_head *vals);
432 #ifdef KSPLICE_STANDALONE
433 static abort_t brute_search_all(struct module_pack *pack,
434 const struct ksplice_size *s,
435 struct list_head *vals);
436 #endif /* KSPLICE_STANDALONE */
438 static abort_t add_candidate_val(struct list_head *vals, unsigned long val);
439 static void release_vals(struct list_head *vals);
440 static void set_temp_myst_relocs(struct module_pack *pack, int status_val);
441 static int contains_canary(struct module_pack *pack, unsigned long blank_addr,
442 int size, long dst_mask);
443 static int starts_with(const char *str, const char *prefix);
444 static int ends_with(const char *str, const char *suffix);
446 #define clear_list(head, type, member) \
447 do { \
448 struct list_head *_pos, *_n; \
449 list_for_each_safe(_pos, _n, head) { \
450 list_del(_pos); \
451 kfree(list_entry(_pos, type, member)); \
453 } while (0)
455 /* primary */
456 static abort_t activate_primary(struct module_pack *pack);
457 static abort_t process_exports(struct module_pack *pack);
458 static abort_t process_patches(struct module_pack *pack);
459 static int __apply_patches(void *bundle);
460 static int __reverse_patches(void *bundle);
461 static abort_t check_each_task(struct update_bundle *bundle);
462 static abort_t check_task(struct update_bundle *bundle,
463 const struct task_struct *t, int rerun);
464 static abort_t check_stack(struct update_bundle *bundle, struct conflict *conf,
465 const struct thread_info *tinfo,
466 const unsigned long *stack);
467 static abort_t check_address(struct update_bundle *bundle,
468 struct conflict *conf, unsigned long addr);
469 static abort_t check_record(struct conflict_frame *frame,
470 const struct safety_record *rec,
471 unsigned long addr);
472 static int valid_stack_ptr(const struct thread_info *tinfo, const void *p);
473 static int is_stop_machine(const struct task_struct *t);
474 static void cleanup_conflicts(struct update_bundle *bundle);
475 static void print_conflicts(struct update_bundle *bundle);
476 static void insert_trampoline(struct ksplice_patch *p);
477 static void remove_trampoline(const struct ksplice_patch *p);
478 /* Architecture-specific functions defined in ARCH/ksplice-arch.c */
479 static abort_t create_trampoline(struct ksplice_patch *p);
480 static unsigned long follow_trampolines(struct module_pack *pack,
481 unsigned long addr);
482 static abort_t handle_paravirt(struct module_pack *pack, unsigned long pre,
483 unsigned long run, int *matched);
485 static abort_t add_dependency_on_address(struct module_pack *pack,
486 unsigned long addr);
487 static abort_t add_patch_dependencies(struct module_pack *pack);
489 #if defined(KSPLICE_STANDALONE) && \
490 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
491 #define KSPLICE_NO_KERNEL_SUPPORT 1
492 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
494 #ifdef KSPLICE_NO_KERNEL_SUPPORT
495 /* Functions defined here that will be exported in later kernels */
496 #ifdef CONFIG_KALLSYMS
497 static int module_kallsyms_on_each_symbol(const struct module *mod,
498 int (*fn)(void *, const char *,
499 unsigned long),
500 void *data);
501 #endif /* CONFIG_KALLSYMS */
502 static struct module *find_module(const char *name);
503 static int use_module(struct module *a, struct module *b);
504 static const struct kernel_symbol *find_symbol(const char *name,
505 struct module **owner,
506 const unsigned long **crc,
507 bool gplok, bool warn);
508 static struct module *__module_data_address(unsigned long addr);
509 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
511 /* helper */
512 static abort_t activate_helper(struct module_pack *pack,
513 bool consider_data_sections);
514 static abort_t search_for_match(struct module_pack *pack,
515 const struct ksplice_size *s);
516 static abort_t try_addr(struct module_pack *pack, const struct ksplice_size *s,
517 unsigned long run_addr,
518 struct list_head *safety_records,
519 enum run_pre_mode mode);
520 static abort_t run_pre_cmp(struct module_pack *pack,
521 const struct ksplice_size *s,
522 unsigned long run_addr,
523 struct list_head *safety_records,
524 enum run_pre_mode mode);
525 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
526 /* defined in $ARCH/ksplice-arch.c */
527 static abort_t arch_run_pre_cmp(struct module_pack *pack,
528 const struct ksplice_size *s,
529 unsigned long run_addr,
530 struct list_head *safety_records,
531 enum run_pre_mode mode);
532 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
533 static void print_bytes(struct module_pack *pack,
534 const unsigned char *run, int runc,
535 const unsigned char *pre, int prec);
536 static abort_t create_safety_record(struct module_pack *pack,
537 const struct ksplice_size *s,
538 struct list_head *record_list,
539 unsigned long run_addr,
540 unsigned long run_size);
542 static abort_t reverse_patches(struct update_bundle *bundle);
543 static abort_t apply_patches(struct update_bundle *bundle);
544 static abort_t apply_update(struct update_bundle *bundle);
545 static int register_ksplice_module(struct module_pack *pack);
546 static struct update_bundle *init_ksplice_bundle(const char *kid);
547 static void cleanup_ksplice_bundle(struct update_bundle *bundle);
548 static void add_to_bundle(struct module_pack *pack,
549 struct update_bundle *bundle);
550 static int ksplice_sysfs_init(struct update_bundle *bundle);
552 #ifndef KSPLICE_STANDALONE
553 #include "ksplice-arch.c"
554 #elif defined CONFIG_X86
555 #include "x86/ksplice-arch.c"
556 #elif defined CONFIG_ARM
557 #include "arm/ksplice-arch.c"
558 #endif /* KSPLICE_STANDALONE */
560 #ifndef KSPLICE_STANDALONE
561 static struct kobject *ksplice_kobj;
562 #endif /* !KSPLICE_STANDALONE */
564 struct ksplice_attribute {
565 struct attribute attr;
566 ssize_t (*show)(struct update_bundle *bundle, char *buf);
567 ssize_t (*store)(struct update_bundle *bundle, const char *buf,
568 size_t len);
571 static ssize_t ksplice_attr_show(struct kobject *kobj, struct attribute *attr,
572 char *buf)
574 struct ksplice_attribute *attribute =
575 container_of(attr, struct ksplice_attribute, attr);
576 struct update_bundle *bundle =
577 container_of(kobj, struct update_bundle, kobj);
578 if (attribute->show == NULL)
579 return -EIO;
580 return attribute->show(bundle, buf);
583 static ssize_t ksplice_attr_store(struct kobject *kobj, struct attribute *attr,
584 const char *buf, size_t len)
586 struct ksplice_attribute *attribute =
587 container_of(attr, struct ksplice_attribute, attr);
588 struct update_bundle *bundle =
589 container_of(kobj, struct update_bundle, kobj);
590 if (attribute->store == NULL)
591 return -EIO;
592 return attribute->store(bundle, buf, len);
595 static struct sysfs_ops ksplice_sysfs_ops = {
596 .show = ksplice_attr_show,
597 .store = ksplice_attr_store,
600 static void ksplice_release(struct kobject *kobj)
602 struct update_bundle *bundle;
603 bundle = container_of(kobj, struct update_bundle, kobj);
604 cleanup_ksplice_bundle(bundle);
607 static ssize_t stage_show(struct update_bundle *bundle, char *buf)
609 switch (bundle->stage) {
610 case STAGE_PREPARING:
611 return snprintf(buf, PAGE_SIZE, "preparing\n");
612 case STAGE_APPLIED:
613 return snprintf(buf, PAGE_SIZE, "applied\n");
614 case STAGE_REVERSED:
615 return snprintf(buf, PAGE_SIZE, "reversed\n");
617 return 0;
620 static ssize_t abort_cause_show(struct update_bundle *bundle, char *buf)
622 switch (bundle->abort_cause) {
623 case OK:
624 return snprintf(buf, PAGE_SIZE, "ok\n");
625 case NO_MATCH:
626 return snprintf(buf, PAGE_SIZE, "no_match\n");
627 case BAD_SYSTEM_MAP:
628 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
629 case CODE_BUSY:
630 return snprintf(buf, PAGE_SIZE, "code_busy\n");
631 case MODULE_BUSY:
632 return snprintf(buf, PAGE_SIZE, "module_busy\n");
633 case OUT_OF_MEMORY:
634 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
635 case FAILED_TO_FIND:
636 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
637 case ALREADY_REVERSED:
638 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
639 case MISSING_EXPORT:
640 return snprintf(buf, PAGE_SIZE, "missing_export\n");
641 case UNEXPECTED_RUNNING_TASK:
642 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
643 case UNEXPECTED:
644 return snprintf(buf, PAGE_SIZE, "unexpected\n");
646 return 0;
649 static ssize_t conflict_show(struct update_bundle *bundle, char *buf)
651 const struct conflict *conf;
652 const struct conflict_frame *frame;
653 int used = 0;
654 list_for_each_entry(conf, &bundle->conflicts, list) {
655 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
656 conf->process_name, conf->pid);
657 list_for_each_entry(frame, &conf->stack, list) {
658 if (!frame->has_conflict)
659 continue;
660 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
661 frame->label);
663 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
665 return used;
668 static ssize_t stage_store(struct update_bundle *bundle,
669 const char *buf, size_t len)
671 if (strncmp(buf, "applied\n", len) == 0 &&
672 bundle->stage == STAGE_PREPARING)
673 bundle->abort_cause = apply_update(bundle);
674 else if (strncmp(buf, "reversed\n", len) == 0 &&
675 bundle->stage == STAGE_APPLIED)
676 bundle->abort_cause = reverse_patches(bundle);
677 return len;
680 static ssize_t debug_show(struct update_bundle *bundle, char *buf)
682 return snprintf(buf, PAGE_SIZE, "%d\n", bundle->debug);
685 static ssize_t debug_store(struct update_bundle *bundle, const char *buf,
686 size_t len)
688 unsigned long l;
689 int ret = strict_strtoul(buf, 10, &l);
690 if (ret != 0)
691 return ret;
692 bundle->debug = l;
693 return len;
696 static struct ksplice_attribute stage_attribute =
697 __ATTR(stage, 0600, stage_show, stage_store);
698 static struct ksplice_attribute abort_cause_attribute =
699 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
700 static struct ksplice_attribute debug_attribute =
701 __ATTR(debug, 0600, debug_show, debug_store);
702 static struct ksplice_attribute conflict_attribute =
703 __ATTR(conflicts, 0400, conflict_show, NULL);
705 static struct attribute *ksplice_attrs[] = {
706 &stage_attribute.attr,
707 &abort_cause_attribute.attr,
708 &debug_attribute.attr,
709 &conflict_attribute.attr,
710 NULL
713 static struct kobj_type ksplice_ktype = {
714 .sysfs_ops = &ksplice_sysfs_ops,
715 .release = ksplice_release,
716 .default_attrs = ksplice_attrs,
719 static abort_t activate_primary(struct module_pack *pack)
721 abort_t ret;
722 ret = apply_relocs(pack, pack->primary_relocs,
723 pack->primary_relocs_end);
724 if (ret != OK)
725 return ret;
727 ret = process_patches(pack);
728 if (ret != OK)
729 return ret;
731 ret = process_exports(pack);
732 if (ret != OK)
733 return ret;
735 ret = add_patch_dependencies(pack);
736 if (ret != OK)
737 return ret;
739 return OK;
742 static void __attribute__((noreturn)) ksplice_deleted(void)
744 printk(KERN_CRIT "Attempted call of kernel function deleted by Ksplice "
745 "update!\n");
746 BUG();
747 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
748 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
749 for (;;);
750 #endif
753 static abort_t process_patches(struct module_pack *pack)
755 struct ksplice_patch *p;
756 struct safety_record *rec;
757 abort_t ret;
759 /* Check every patch has a safety_record */
760 for (p = pack->patches; p < pack->patches_end; p++) {
761 struct reloc_nameval *nv = find_nameval(pack, p->label);
762 int found = 0;
763 if (nv == NULL) {
764 failed_to_find(pack, p->label);
765 return FAILED_TO_FIND;
767 p->oldaddr = nv->val;
769 list_for_each_entry(rec, &pack->safety_records, list) {
770 if (strcmp(rec->label, p->label) == 0 &&
771 follow_trampolines(pack, p->oldaddr) == rec->addr) {
772 found = 1;
773 break;
776 if (!found) {
777 ksdebug(pack, "No safety record for patch %s\n",
778 p->label);
779 return UNEXPECTED;
781 if (rec->size < p->size) {
782 ksdebug(pack, "Symbol %s is too short for trampoline\n",
783 p->label);
784 return UNEXPECTED;
787 if (p->repladdr == 0)
788 p->repladdr = (unsigned long)ksplice_deleted;
789 else
790 rec->first_byte_safe = true;
792 ret = create_trampoline(p);
793 if (ret != OK)
794 return ret;
796 return OK;
799 static abort_t process_exports(struct module_pack *pack)
801 struct ksplice_export *export;
802 struct module *m;
803 const struct kernel_symbol *sym;
805 for (export = pack->exports; export < pack->exports_end; export++) {
806 sym = find_symbol(export->name, &m, NULL, true, false);
807 if (sym == NULL) {
808 ksdebug(pack, "Could not find kernel_symbol struct for "
809 "%s\n", export->name);
810 return MISSING_EXPORT;
813 /* Cast away const since we are planning to mutate the
814 * kernel_symbol structure. */
815 export->sym = (struct kernel_symbol *)sym;
816 export->saved_name = export->sym->name;
817 if (m != pack->primary && use_module(pack->primary, m) != 1)
818 return UNEXPECTED;
820 return OK;
823 static void insert_trampoline(struct ksplice_patch *p)
825 mm_segment_t old_fs = get_fs();
826 set_fs(KERNEL_DS);
827 memcpy((void *)p->saved, (void *)p->oldaddr, p->size);
828 memcpy((void *)p->oldaddr, (void *)p->trampoline, p->size);
829 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
830 set_fs(old_fs);
833 static void remove_trampoline(const struct ksplice_patch *p)
835 mm_segment_t old_fs = get_fs();
836 set_fs(KERNEL_DS);
837 memcpy((void *)p->oldaddr, (void *)p->saved, p->size);
838 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
839 set_fs(old_fs);
842 static abort_t apply_patches(struct update_bundle *bundle)
844 int i;
845 abort_t ret;
847 for (i = 0; i < 5; i++) {
848 cleanup_conflicts(bundle);
849 #ifdef KSPLICE_STANDALONE
850 bust_spinlocks(1);
851 #endif /* KSPLICE_STANDALONE */
852 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
853 ret = (__force abort_t)stop_machine(__apply_patches, bundle,
854 NULL);
855 #else /* LINUX_VERSION_CODE < */
856 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
857 ret = (__force abort_t)stop_machine_run(__apply_patches, bundle,
858 NR_CPUS);
859 #endif /* LINUX_VERSION_CODE */
860 #ifdef KSPLICE_STANDALONE
861 bust_spinlocks(0);
862 #endif /* KSPLICE_STANDALONE */
863 if (ret != CODE_BUSY)
864 break;
865 set_current_state(TASK_INTERRUPTIBLE);
866 schedule_timeout(msecs_to_jiffies(1000));
869 if (ret == OK) {
870 struct module_pack *pack;
871 const struct ksplice_size *s;
872 struct safety_record *rec;
873 list_for_each_entry(pack, &bundle->packs, list) {
874 for (s = pack->primary_sizes;
875 s < pack->primary_sizes_end; s++) {
876 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
877 if (rec == NULL)
878 return OUT_OF_MEMORY;
879 rec->addr = s->thismod_addr;
880 rec->size = s->size;
881 rec->label = s->symbol->label;
882 list_add(&rec->list, &pack->safety_records);
885 _ksdebug(bundle, "Update %s applied successfully\n",
886 bundle->kid);
887 return 0;
888 } else if (ret == CODE_BUSY) {
889 print_conflicts(bundle);
890 _ksdebug(bundle, "Aborted %s. stack check: to-be-replaced "
891 "code is busy\n", bundle->kid);
892 } else if (ret == ALREADY_REVERSED) {
893 _ksdebug(bundle, "Aborted %s. Ksplice update %s is already "
894 "reversed.\n", bundle->kid, bundle->kid);
896 return ret;
899 static abort_t reverse_patches(struct update_bundle *bundle)
901 int i;
902 abort_t ret;
903 struct module_pack *pack;
905 clear_debug_buf(bundle);
906 ret = init_debug_buf(bundle);
907 if (ret != OK)
908 return ret;
910 _ksdebug(bundle, "Preparing to reverse %s\n", bundle->kid);
912 for (i = 0; i < 5; i++) {
913 cleanup_conflicts(bundle);
914 clear_list(&bundle->conflicts, struct conflict, list);
915 #ifdef KSPLICE_STANDALONE
916 bust_spinlocks(1);
917 #endif /* KSPLICE_STANDALONE */
918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
919 ret = (__force abort_t)stop_machine(__reverse_patches, bundle,
920 NULL);
921 #else /* LINUX_VERSION_CODE < */
922 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
923 ret = (__force abort_t)stop_machine_run(__reverse_patches,
924 bundle, NR_CPUS);
925 #endif /* LINUX_VERSION_CODE */
926 #ifdef KSPLICE_STANDALONE
927 bust_spinlocks(0);
928 #endif /* KSPLICE_STANDALONE */
929 if (ret != CODE_BUSY)
930 break;
931 set_current_state(TASK_INTERRUPTIBLE);
932 schedule_timeout(msecs_to_jiffies(1000));
934 list_for_each_entry(pack, &bundle->packs, list)
935 clear_list(&pack->safety_records, struct safety_record, list);
936 if (ret == OK) {
937 _ksdebug(bundle, "Update %s reversed successfully\n",
938 bundle->kid);
939 } else if (ret == CODE_BUSY) {
940 print_conflicts(bundle);
941 _ksdebug(bundle, "Aborted %s. stack check: to-be-reversed "
942 "code is busy\n", bundle->kid);
943 } else if (ret == MODULE_BUSY) {
944 _ksdebug(bundle, "Update %s is in use by another module\n",
945 bundle->kid);
947 return ret;
950 static int __apply_patches(void *bundleptr)
952 struct update_bundle *bundle = bundleptr;
953 struct module_pack *pack;
954 struct ksplice_patch *p;
955 struct ksplice_export *export;
956 abort_t ret;
958 if (bundle->stage == STAGE_APPLIED)
959 return (__force int)OK;
961 if (bundle->stage != STAGE_PREPARING)
962 return (__force int)UNEXPECTED;
964 ret = check_each_task(bundle);
965 if (ret != OK)
966 return (__force int)ret;
968 list_for_each_entry(pack, &bundle->packs, list) {
969 if (try_module_get(pack->primary) != 1) {
970 struct module_pack *pack1;
971 list_for_each_entry(pack1, &bundle->packs, list) {
972 if (pack1 == pack)
973 break;
974 module_put(pack1->primary);
976 return (__force int)UNEXPECTED;
980 bundle->stage = STAGE_APPLIED;
982 list_for_each_entry(pack, &bundle->packs, list) {
983 for (export = pack->exports; export < pack->exports_end;
984 export++)
985 export->sym->name = export->new_name;
988 list_for_each_entry(pack, &bundle->packs, list) {
989 for (p = pack->patches; p < pack->patches_end; p++)
990 insert_trampoline(p);
992 return (__force int)OK;
995 static int __reverse_patches(void *bundleptr)
997 struct update_bundle *bundle = bundleptr;
998 struct module_pack *pack;
999 const struct ksplice_patch *p;
1000 struct ksplice_export *export;
1001 abort_t ret;
1003 if (bundle->stage != STAGE_APPLIED)
1004 return (__force int)OK;
1006 #ifdef CONFIG_MODULE_UNLOAD
1007 /* primary's refcount isn't changed by accessing ksplice.ko's sysfs */
1008 list_for_each_entry(pack, &bundle->packs, list) {
1009 if (module_refcount(pack->primary) != 1)
1010 return (__force int)MODULE_BUSY;
1012 #endif /* CONFIG_MODULE_UNLOAD */
1014 ret = check_each_task(bundle);
1015 if (ret != OK)
1016 return (__force int)ret;
1018 bundle->stage = STAGE_REVERSED;
1020 list_for_each_entry(pack, &bundle->packs, list)
1021 module_put(pack->primary);
1023 list_for_each_entry(pack, &bundle->packs, list) {
1024 for (export = pack->exports; export < pack->exports_end;
1025 export++)
1026 export->sym->name = export->saved_name;
1029 list_for_each_entry(pack, &bundle->packs, list) {
1030 for (p = pack->patches; p < pack->patches_end; p++)
1031 remove_trampoline(p);
1033 return (__force int)OK;
1036 static abort_t check_each_task(struct update_bundle *bundle)
1038 const struct task_struct *g, *p;
1039 abort_t status = OK, ret;
1040 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1041 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1042 read_lock(&tasklist_lock);
1043 #endif /* LINUX_VERSION_CODE */
1044 do_each_thread(g, p) {
1045 /* do_each_thread is a double loop! */
1046 ret = check_task(bundle, p, 0);
1047 if (ret != OK) {
1048 check_task(bundle, p, 1);
1049 status = ret;
1051 if (ret != OK && ret != CODE_BUSY)
1052 goto out;
1053 } while_each_thread(g, p);
1054 out:
1055 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
1056 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
1057 read_unlock(&tasklist_lock);
1058 #endif /* LINUX_VERSION_CODE */
1059 return status;
1062 static abort_t check_task(struct update_bundle *bundle,
1063 const struct task_struct *t, int rerun)
1065 abort_t status, ret;
1066 struct conflict *conf = NULL;
1068 if (rerun) {
1069 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
1070 if (conf == NULL)
1071 return OUT_OF_MEMORY;
1072 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
1073 if (conf->process_name == NULL) {
1074 kfree(conf);
1075 return OUT_OF_MEMORY;
1077 conf->pid = t->pid;
1078 INIT_LIST_HEAD(&conf->stack);
1079 list_add(&conf->list, &bundle->conflicts);
1082 status = check_address(bundle, conf, KSPLICE_IP(t));
1083 if (t == current) {
1084 ret = check_stack(bundle, conf, task_thread_info(t),
1085 (unsigned long *)__builtin_frame_address(0));
1086 if (status == OK)
1087 status = ret;
1088 } else if (!task_curr(t)) {
1089 ret = check_stack(bundle, conf, task_thread_info(t),
1090 (unsigned long *)KSPLICE_SP(t));
1091 if (status == OK)
1092 status = ret;
1093 } else if (!is_stop_machine(t)) {
1094 status = UNEXPECTED_RUNNING_TASK;
1096 return status;
1099 /* Modified version of Linux's print_context_stack */
1100 static abort_t check_stack(struct update_bundle *bundle, struct conflict *conf,
1101 const struct thread_info *tinfo,
1102 const unsigned long *stack)
1104 abort_t status = OK, ret;
1105 unsigned long addr;
1107 while (valid_stack_ptr(tinfo, stack)) {
1108 addr = *stack++;
1109 ret = check_address(bundle, conf, addr);
1110 if (ret != OK)
1111 status = ret;
1113 return status;
1116 static abort_t check_address(struct update_bundle *bundle,
1117 struct conflict *conf, unsigned long addr)
1119 abort_t status = OK, ret;
1120 const struct safety_record *rec;
1121 struct module_pack *pack;
1122 struct conflict_frame *frame = NULL;
1124 if (conf != NULL) {
1125 frame = kmalloc(sizeof(*frame), GFP_ATOMIC);
1126 if (frame == NULL)
1127 return OUT_OF_MEMORY;
1128 frame->addr = addr;
1129 frame->has_conflict = 0;
1130 frame->label = NULL;
1131 list_add(&frame->list, &conf->stack);
1134 list_for_each_entry(pack, &bundle->packs, list) {
1135 list_for_each_entry(rec, &pack->safety_records, list) {
1136 ret = check_record(frame, rec, addr);
1137 if (ret != OK)
1138 status = ret;
1141 return status;
1144 static abort_t check_record(struct conflict_frame *frame,
1145 const struct safety_record *rec, unsigned long addr)
1147 if ((addr > rec->addr && addr < rec->addr + rec->size) ||
1148 (addr == rec->addr && !rec->first_byte_safe)) {
1149 if (frame != NULL) {
1150 frame->label = rec->label;
1151 frame->has_conflict = 1;
1153 return CODE_BUSY;
1155 return OK;
1158 /* Modified version of Linux's valid_stack_ptr */
1159 static int valid_stack_ptr(const struct thread_info *tinfo, const void *p)
1161 return p > (const void *)tinfo
1162 && p <= (const void *)tinfo + THREAD_SIZE - sizeof(long);
1165 static int is_stop_machine(const struct task_struct *t)
1167 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
1168 const char *num;
1169 if (!starts_with(t->comm, "kstop"))
1170 return 0;
1171 num = t->comm + strlen("kstop");
1172 return num[strspn(num, "0123456789")] == '\0';
1173 #else /* LINUX_VERSION_CODE < */
1174 return strcmp(t->comm, "kstopmachine") == 0;
1175 #endif /* LINUX_VERSION_CODE */
1178 static void cleanup_conflicts(struct update_bundle *bundle)
1180 struct conflict *conf;
1181 list_for_each_entry(conf, &bundle->conflicts, list) {
1182 clear_list(&conf->stack, struct conflict_frame, list);
1183 kfree(conf->process_name);
1185 clear_list(&bundle->conflicts, struct conflict, list);
1188 static void print_conflicts(struct update_bundle *bundle)
1190 const struct conflict *conf;
1191 const struct conflict_frame *frame;
1192 list_for_each_entry(conf, &bundle->conflicts, list) {
1193 _ksdebug(bundle, "stack check: pid %d (%s):", conf->pid,
1194 conf->process_name);
1195 list_for_each_entry(frame, &conf->stack, list) {
1196 _ksdebug(bundle, " %" ADDR, frame->addr);
1197 if (frame->has_conflict)
1198 _ksdebug(bundle, " [<-CONFLICT]");
1200 _ksdebug(bundle, "\n");
1204 #ifdef KSPLICE_NO_KERNEL_SUPPORT
1205 static struct module *find_module(const char *name)
1207 struct module *mod;
1209 list_for_each_entry(mod, &modules, list) {
1210 if (strcmp(mod->name, name) == 0)
1211 return mod;
1213 return NULL;
1215 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
1217 static int register_ksplice_module(struct module_pack *pack)
1219 struct update_bundle *bundle;
1220 int ret = 0;
1222 INIT_LIST_HEAD(&pack->reloc_namevals);
1223 INIT_LIST_HEAD(&pack->safety_records);
1225 mutex_lock(&module_mutex);
1226 if (strcmp(pack->target_name, "vmlinux") == 0) {
1227 pack->target = NULL;
1228 } else {
1229 pack->target = find_module(pack->target_name);
1230 if (pack->target == NULL || !module_is_live(pack->target)) {
1231 ret = -ENODEV;
1232 goto out;
1235 list_for_each_entry(bundle, &update_bundles, list) {
1236 if (strcmp(pack->kid, bundle->kid) == 0) {
1237 if (bundle->stage != STAGE_PREPARING) {
1238 ret = -EPERM;
1239 goto out;
1241 add_to_bundle(pack, bundle);
1242 list_add(&pack->module_list_entry.list,
1243 &ksplice_module_list);
1244 goto out;
1247 bundle = init_ksplice_bundle(pack->kid);
1248 if (bundle == NULL) {
1249 ret = -ENOMEM;
1250 goto out;
1252 ret = ksplice_sysfs_init(bundle);
1253 if (ret != 0) {
1254 cleanup_ksplice_bundle(bundle);
1255 goto out;
1257 add_to_bundle(pack, bundle);
1258 list_add(&pack->module_list_entry.list, &ksplice_module_list);
1259 out:
1260 mutex_unlock(&module_mutex);
1261 return ret;
1264 void cleanup_ksplice_module(struct module_pack *pack)
1266 if (pack->bundle == NULL || pack->bundle->stage == STAGE_APPLIED)
1267 return;
1268 mutex_lock(&module_mutex);
1269 list_del(&pack->list);
1270 list_del(&pack->module_list_entry.list);
1271 mutex_unlock(&module_mutex);
1272 if (list_empty(&pack->bundle->packs))
1273 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1274 kobject_put(&pack->bundle->kobj);
1275 #else /* LINUX_VERSION_CODE < */
1276 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1277 kobject_unregister(&pack->bundle->kobj);
1278 #endif /* LINUX_VERSION_CODE */
1279 pack->bundle = NULL;
1281 EXPORT_SYMBOL_GPL(cleanup_ksplice_module);
1283 static void add_to_bundle(struct module_pack *pack,
1284 struct update_bundle *bundle)
1286 pack->bundle = bundle;
1287 list_add(&pack->list, &bundle->packs);
1288 pack->module_list_entry.target = pack->target;
1289 pack->module_list_entry.primary = pack->primary;
1292 static void cleanup_ksplice_bundle(struct update_bundle *bundle)
1294 mutex_lock(&module_mutex);
1295 list_del(&bundle->list);
1296 mutex_unlock(&module_mutex);
1297 cleanup_conflicts(bundle);
1298 clear_debug_buf(bundle);
1299 kfree(bundle->kid);
1300 kfree(bundle->name);
1301 kfree(bundle);
1304 static struct update_bundle *init_ksplice_bundle(const char *kid)
1306 struct update_bundle *bundle;
1307 bundle = kcalloc(1, sizeof(struct update_bundle), GFP_KERNEL);
1308 if (bundle == NULL)
1309 return NULL;
1310 bundle->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
1311 if (bundle->name == NULL) {
1312 kfree(bundle);
1313 return NULL;
1315 bundle->kid = kstrdup(kid, GFP_KERNEL);
1316 if (bundle->kid == NULL) {
1317 kfree(bundle->name);
1318 kfree(bundle);
1319 return NULL;
1321 INIT_LIST_HEAD(&bundle->packs);
1322 if (init_debug_buf(bundle) != OK) {
1323 kfree(bundle->kid);
1324 kfree(bundle->name);
1325 kfree(bundle);
1326 return NULL;
1328 list_add(&bundle->list, &update_bundles);
1329 bundle->stage = STAGE_PREPARING;
1330 bundle->abort_cause = OK;
1331 INIT_LIST_HEAD(&bundle->conflicts);
1332 return bundle;
1335 static int ksplice_sysfs_init(struct update_bundle *bundle)
1337 int ret = 0;
1338 memset(&bundle->kobj, 0, sizeof(bundle->kobj));
1339 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1340 #ifndef KSPLICE_STANDALONE
1341 ret = kobject_init_and_add(&bundle->kobj, &ksplice_ktype,
1342 ksplice_kobj, "%s", bundle->kid);
1343 #else /* KSPLICE_STANDALONE */
1344 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1345 ret = kobject_init_and_add(&bundle->kobj, &ksplice_ktype,
1346 &THIS_MODULE->mkobj.kobj, "ksplice");
1347 #endif /* KSPLICE_STANDALONE */
1348 #else /* LINUX_VERSION_CODE < */
1349 ret = kobject_set_name(&bundle->kobj, "%s", "ksplice");
1350 if (ret != 0)
1351 return ret;
1352 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
1353 bundle->kobj.parent = &THIS_MODULE->mkobj.kobj;
1354 #else /* LINUX_VERSION_CODE < */
1355 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1356 bundle->kobj.parent = &THIS_MODULE->mkobj->kobj;
1357 #endif /* LINUX_VERSION_CODE */
1358 bundle->kobj.ktype = &ksplice_ktype;
1359 ret = kobject_register(&bundle->kobj);
1360 #endif /* LINUX_VERSION_CODE */
1361 if (ret != 0)
1362 return ret;
1363 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1364 kobject_uevent(&bundle->kobj, KOBJ_ADD);
1365 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1366 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1367 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1368 kobject_uevent(&bundle->kobj, KOBJ_ADD, NULL);
1369 #endif /* LINUX_VERSION_CODE */
1370 return 0;
1373 int init_ksplice_module(struct module_pack *pack)
1375 #ifdef KSPLICE_STANDALONE
1376 if (bootstrapped == 0)
1377 return -1;
1378 #endif /* KSPLICE_STANDALONE */
1379 return register_ksplice_module(pack);
1381 EXPORT_SYMBOL(init_ksplice_module);
1383 static abort_t apply_update(struct update_bundle *bundle)
1385 struct module_pack *pack;
1386 abort_t ret;
1388 mutex_lock(&module_mutex);
1389 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1390 list_for_each_entry(pack, &bundle->packs, list) {
1391 if (pack->target == NULL) {
1392 apply_paravirt(pack->primary_parainstructions,
1393 pack->primary_parainstructions_end);
1394 apply_paravirt(pack->helper_parainstructions,
1395 pack->helper_parainstructions_end);
1398 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1400 list_for_each_entry(pack, &bundle->packs, list) {
1401 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1402 ret = activate_helper(pack, false);
1403 if (ret == NO_MATCH) {
1404 ksdebug(pack, "Trying to continue without the "
1405 "unmatched sections; we will find them later."
1406 "\n");
1407 ret = activate_primary(pack);
1408 if (ret != OK) {
1409 ksdebug(pack, "Aborted. Unable to continue "
1410 "without the unmatched sections.\n");
1411 goto out;
1413 ksdebug(pack, "run-pre: Considering .data sections to "
1414 "find the unmatched sections\n");
1415 ret = activate_helper(pack, true);
1416 if (ret != OK)
1417 goto out;
1418 ksdebug(pack, "run-pre: Found all previously unmatched "
1419 "sections\n");
1420 } else if (ret != OK) {
1421 goto out;
1422 } else {
1423 ret = activate_primary(pack);
1424 if (ret != OK)
1425 goto out;
1428 ret = apply_patches(bundle);
1429 out:
1430 list_for_each_entry(pack, &bundle->packs, list) {
1431 clear_list(&pack->reloc_namevals, struct reloc_nameval, list);
1432 if (bundle->stage == STAGE_PREPARING)
1433 clear_list(&pack->safety_records, struct safety_record,
1434 list);
1436 mutex_unlock(&module_mutex);
1437 return ret;
1440 static abort_t activate_helper(struct module_pack *pack,
1441 bool consider_data_sections)
1443 const struct ksplice_size *s;
1444 abort_t ret;
1445 char *finished;
1446 int i, remaining = 0;
1447 bool progress;
1449 finished = kcalloc(pack->helper_sizes_end - pack->helper_sizes,
1450 sizeof(*finished), GFP_KERNEL);
1451 if (finished == NULL)
1452 return OUT_OF_MEMORY;
1453 for (s = pack->helper_sizes; s < pack->helper_sizes_end; s++) {
1454 if ((s->flags & KSPLICE_SIZE_DATA) == 0)
1455 remaining++;
1458 while (remaining > 0) {
1459 progress = false;
1460 for (s = pack->helper_sizes; s < pack->helper_sizes_end; s++) {
1461 i = s - pack->helper_sizes;
1462 if (finished[i])
1463 continue;
1464 if (!consider_data_sections &&
1465 (s->flags & KSPLICE_SIZE_DATA) != 0)
1466 continue;
1467 ret = search_for_match(pack, s);
1468 if (ret == OK) {
1469 finished[i] = 1;
1470 if ((s->flags & KSPLICE_SIZE_DATA) == 0)
1471 remaining--;
1472 progress = true;
1473 } else if (ret != NO_MATCH) {
1474 kfree(finished);
1475 return ret;
1479 if (progress)
1480 continue;
1482 for (s = pack->helper_sizes; s < pack->helper_sizes_end; s++) {
1483 i = s - pack->helper_sizes;
1484 if (finished[i] == 0)
1485 ksdebug(pack, "run-pre: could not match "
1486 "section %s\n", s->symbol->label);
1488 print_abort(pack, "run-pre: could not match some sections");
1489 kfree(finished);
1490 return NO_MATCH;
1492 kfree(finished);
1493 return OK;
1496 static abort_t search_for_match(struct module_pack *pack,
1497 const struct ksplice_size *s)
1499 int i;
1500 abort_t ret;
1501 unsigned long run_addr;
1502 LIST_HEAD(vals);
1503 struct candidate_val *v, *n;
1505 ret = add_system_map_candidates(pack, s->symbol, &vals);
1506 if (ret != OK) {
1507 release_vals(&vals);
1508 return ret;
1510 ret = compute_address(pack, s->symbol, &vals);
1511 if (ret != OK) {
1512 release_vals(&vals);
1513 return ret;
1516 ksdebug(pack, "run-pre: starting sect search for %s\n",
1517 s->symbol->label);
1519 list_for_each_entry_safe(v, n, &vals, list) {
1520 run_addr = v->val;
1522 yield();
1523 ret = try_addr(pack, s, run_addr, NULL, RUN_PRE_INITIAL);
1524 if (ret == NO_MATCH) {
1525 list_del(&v->list);
1526 kfree(v);
1527 } else if (ret != OK) {
1528 release_vals(&vals);
1529 return ret;
1533 #ifdef KSPLICE_STANDALONE
1534 if (list_empty(&vals) && (s->flags & KSPLICE_SIZE_DATA) == 0) {
1535 ret = brute_search_all(pack, s, &vals);
1536 if (ret != OK) {
1537 release_vals(&vals);
1538 return ret;
1540 /* Make sure run-pre matching output is displayed if
1541 brute_search succeeds */
1542 if (singular(&vals)) {
1543 run_addr = list_entry(vals.next, struct candidate_val,
1544 list)->val;
1545 ret = try_addr(pack, s, run_addr, NULL,
1546 RUN_PRE_INITIAL);
1547 if (ret != OK) {
1548 ksdebug(pack, "run-pre: Debug run failed for "
1549 "sect %s:\n", s->symbol->label);
1550 release_vals(&vals);
1551 return ret;
1555 #endif /* KSPLICE_STANDALONE */
1557 if (singular(&vals)) {
1558 LIST_HEAD(safety_records);
1559 run_addr = list_entry(vals.next, struct candidate_val,
1560 list)->val;
1561 ret = try_addr(pack, s, run_addr, &safety_records,
1562 RUN_PRE_FINAL);
1563 release_vals(&vals);
1564 if (ret != OK) {
1565 clear_list(&safety_records, struct safety_record, list);
1566 ksdebug(pack, "run-pre: Final run failed for sect "
1567 "%s:\n", s->symbol->label);
1568 } else {
1569 list_splice(&safety_records, &pack->safety_records);
1571 return ret;
1572 } else if (!list_empty(&vals)) {
1573 struct candidate_val *val;
1574 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1575 s->symbol->label);
1576 i = 0;
1577 list_for_each_entry(val, &vals, list) {
1578 i++;
1579 ksdebug(pack, "%lx\n", val->val);
1580 if (i > 5) {
1581 ksdebug(pack, "...\n");
1582 break;
1585 release_vals(&vals);
1586 return NO_MATCH;
1588 release_vals(&vals);
1589 return NO_MATCH;
1592 static void print_bytes(struct module_pack *pack,
1593 const unsigned char *run, int runc,
1594 const unsigned char *pre, int prec)
1596 int o;
1597 int matched = min(runc, prec);
1598 for (o = 0; o < matched; o++) {
1599 if (run[o] == pre[o])
1600 ksdebug(pack, "%02x ", run[o]);
1601 else
1602 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
1604 for (o = matched; o < runc; o++)
1605 ksdebug(pack, "%02x/ ", run[o]);
1606 for (o = matched; o < prec; o++)
1607 ksdebug(pack, "/%02x ", pre[o]);
1610 static abort_t run_pre_cmp(struct module_pack *pack,
1611 const struct ksplice_size *s,
1612 unsigned long run_addr,
1613 struct list_head *safety_records,
1614 enum run_pre_mode mode)
1616 int matched = 0;
1617 abort_t ret;
1618 unsigned long pre_addr = s->thismod_addr;
1619 const struct ksplice_reloc *r;
1620 const unsigned char *pre, *run;
1621 unsigned char runval;
1623 if ((s->flags & KSPLICE_SIZE_TEXT) != 0)
1624 run_addr = follow_trampolines(pack, run_addr);
1626 pre = (const unsigned char *)pre_addr;
1627 run = (const unsigned char *)run_addr;
1628 while (pre < (const unsigned char *)pre_addr + s->size) {
1629 ret = lookup_reloc(pack, (unsigned long)pre, &r);
1630 if (ret == OK) {
1631 ret = handle_reloc(pack, r, (unsigned long)run, mode);
1632 if (ret != OK) {
1633 if (mode == RUN_PRE_INITIAL)
1634 ksdebug(pack, "reloc in sect does not "
1635 "match after %lx/%lx bytes\n",
1636 (unsigned long)pre - pre_addr,
1637 s->size);
1638 return ret;
1640 if (mode == RUN_PRE_DEBUG)
1641 print_bytes(pack, run, r->size, pre, r->size);
1642 pre += r->size;
1643 run += r->size;
1644 continue;
1645 } else if (ret != NO_MATCH) {
1646 return ret;
1649 if ((s->flags & KSPLICE_SIZE_TEXT) != 0) {
1650 ret = handle_paravirt(pack, (unsigned long)pre,
1651 (unsigned long)run, &matched);
1652 if (ret != OK)
1653 return ret;
1654 if (matched != 0) {
1655 if (mode == RUN_PRE_DEBUG)
1656 print_bytes(pack, run, matched, pre,
1657 matched);
1658 pre += matched;
1659 run += matched;
1660 continue;
1664 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
1665 if (mode == RUN_PRE_INITIAL)
1666 ksdebug(pack, "sect unmapped after %lx/%lx "
1667 "bytes\n",
1668 (unsigned long)pre - pre_addr, s->size);
1669 return NO_MATCH;
1672 if (runval != *pre && (s->flags & KSPLICE_SIZE_DATA) == 0) {
1673 if (mode == RUN_PRE_INITIAL)
1674 ksdebug(pack, "sect does not match after "
1675 "%lx/%lx bytes\n",
1676 (unsigned long)pre - pre_addr, s->size);
1677 if (mode == RUN_PRE_DEBUG) {
1678 print_bytes(pack, run, 1, pre, 1);
1679 ksdebug(pack, "[p_o=%lx] ! ",
1680 (unsigned long)pre - pre_addr);
1681 print_bytes(pack, run + 1, 2, pre + 1, 2);
1683 return NO_MATCH;
1685 if (mode == RUN_PRE_DEBUG)
1686 print_bytes(pack, run, 1, pre, 1);
1687 pre++;
1688 run++;
1690 return create_safety_record(pack, s, safety_records, run_addr,
1691 (unsigned long)run - run_addr);
1694 #ifdef KSPLICE_NO_KERNEL_SUPPORT
1695 static struct module *__module_data_address(unsigned long addr)
1697 struct module *mod;
1699 list_for_each_entry(mod, &modules, list) {
1700 if (addr >= (unsigned long)mod->module_core +
1701 mod->core_text_size &&
1702 addr < (unsigned long)mod->module_core + mod->core_size)
1703 return mod;
1705 return NULL;
1707 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
1709 static abort_t try_addr(struct module_pack *pack, const struct ksplice_size *s,
1710 unsigned long run_addr,
1711 struct list_head *safety_records,
1712 enum run_pre_mode mode)
1714 abort_t ret;
1715 const struct module *run_module;
1717 if ((s->flags & KSPLICE_SIZE_RODATA) != 0 ||
1718 (s->flags & KSPLICE_SIZE_DATA) != 0)
1719 run_module = __module_data_address(run_addr);
1720 else
1721 run_module = __module_text_address(run_addr);
1722 if (run_module != pack->target) {
1723 ksdebug(pack, "run-pre: ignoring address %" ADDR " in other "
1724 "module %s for sect %s\n", run_addr,
1725 run_module == NULL ? "vmlinux" : run_module->name,
1726 s->symbol->label);
1727 return NO_MATCH;
1730 ret = create_nameval(pack, s->symbol->label, run_addr, TEMP);
1731 if (ret != OK)
1732 return ret;
1734 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1735 ret = run_pre_cmp(pack, s, run_addr, safety_records, mode);
1736 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1737 if ((s->flags & KSPLICE_SIZE_TEXT) != 0)
1738 ret = arch_run_pre_cmp(pack, s, run_addr, safety_records, mode);
1739 else
1740 ret = run_pre_cmp(pack, s, run_addr, safety_records, mode);
1741 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1742 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
1743 set_temp_myst_relocs(pack, NOVAL);
1744 ksdebug(pack, "run-pre: %s sect %s does not match ",
1745 (s->flags & KSPLICE_SIZE_RODATA) != 0 ? "data" : "text",
1746 s->symbol->label);
1747 ksdebug(pack, "(r_a=%" ADDR " p_a=%" ADDR " s=%lx)\n",
1748 run_addr, s->thismod_addr, s->size);
1749 ksdebug(pack, "run-pre: ");
1750 if (pack->bundle->debug >= 1) {
1751 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1752 ret = run_pre_cmp(pack, s, run_addr, safety_records,
1753 RUN_PRE_DEBUG);
1754 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1755 if ((s->flags & KSPLICE_SIZE_TEXT) != 0)
1756 ret = arch_run_pre_cmp(pack, s, run_addr,
1757 safety_records,
1758 RUN_PRE_DEBUG);
1759 else
1760 ret = run_pre_cmp(pack, s, run_addr,
1761 safety_records,
1762 RUN_PRE_DEBUG);
1763 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1764 set_temp_myst_relocs(pack, NOVAL);
1766 ksdebug(pack, "\n");
1767 return ret;
1768 } else if (ret != OK) {
1769 set_temp_myst_relocs(pack, NOVAL);
1770 return ret;
1771 } else if (mode != RUN_PRE_FINAL) {
1772 set_temp_myst_relocs(pack, NOVAL);
1773 ksdebug(pack, "run-pre: candidate for sect %s=%" ADDR "\n",
1774 s->symbol->label, run_addr);
1775 return OK;
1778 set_temp_myst_relocs(pack, VAL);
1779 ksdebug(pack, "run-pre: found sect %s=%" ADDR "\n", s->symbol->label,
1780 run_addr);
1781 return OK;
1784 static abort_t create_safety_record(struct module_pack *pack,
1785 const struct ksplice_size *s,
1786 struct list_head *record_list,
1787 unsigned long run_addr,
1788 unsigned long run_size)
1790 struct safety_record *rec;
1791 struct ksplice_patch *p;
1793 if (record_list == NULL)
1794 return OK;
1796 for (p = pack->patches; p < pack->patches_end; p++) {
1797 if (strcmp(s->symbol->label, p->label) == 0)
1798 break;
1800 if (p >= pack->patches_end)
1801 return OK;
1803 if ((s->flags & KSPLICE_SIZE_TEXT) == 0 && p->repladdr != 0) {
1804 ksdebug(pack, "Error: ksplice_patch %s is matched to a "
1805 "non-deleted non-text section!\n", s->symbol->label);
1806 return UNEXPECTED;
1809 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
1810 if (rec == NULL)
1811 return OUT_OF_MEMORY;
1812 rec->addr = run_addr;
1813 rec->size = run_size;
1814 rec->label = s->symbol->label;
1815 rec->first_byte_safe = false;
1817 list_add(&rec->list, record_list);
1818 return OK;
1821 static abort_t handle_reloc(struct module_pack *pack,
1822 const struct ksplice_reloc *r,
1823 unsigned long run_addr, enum run_pre_mode mode)
1825 unsigned long val;
1826 abort_t ret;
1828 ret = read_reloc_value(pack, r, run_addr, &val);
1829 if (ret != OK)
1830 return ret;
1832 if (mode == RUN_PRE_INITIAL)
1833 ksdebug(pack, "run-pre: reloc at r_a=%" ADDR " p_a=%" ADDR
1834 " to %s+%lx: found %s = %" ADDR "\n",
1835 run_addr, r->blank_addr, r->symbol->label, r->addend,
1836 r->symbol->label, val);
1838 if (starts_with(r->symbol->label, ".rodata.str"))
1839 return OK;
1841 if (contains_canary(pack, run_addr, r->size, r->dst_mask) != 0)
1842 return UNEXPECTED;
1844 ret = create_nameval(pack, r->symbol->label, val, TEMP);
1845 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL) {
1846 struct reloc_nameval *nv = find_nameval(pack, r->symbol->label);
1847 ksdebug(pack, "run-pre: reloc at r_a=%" ADDR " p_a=%" ADDR
1848 ": nameval %s = %" ADDR "(%d) does not match expected "
1849 "%" ADDR "\n", run_addr, r->blank_addr,
1850 r->symbol->label, nv->val, nv->status, val);
1852 return ret;
1855 static abort_t read_reloc_value(struct module_pack *pack,
1856 const struct ksplice_reloc *r,
1857 unsigned long addr, unsigned long *valp)
1859 unsigned char bytes[sizeof(long)];
1860 unsigned long val;
1862 if (probe_kernel_read(bytes, (void *)addr, r->size) == -EFAULT)
1863 return NO_MATCH;
1865 switch (r->size) {
1866 case 1:
1867 val = *(uint8_t *)bytes;
1868 break;
1869 case 2:
1870 val = *(uint16_t *)bytes;
1871 break;
1872 case 4:
1873 val = *(uint32_t *)bytes;
1874 break;
1875 #if BITS_PER_LONG >= 64
1876 case 8:
1877 val = *(uint64_t *)bytes;
1878 break;
1879 #endif /* BITS_PER_LONG */
1880 default:
1881 print_abort(pack, "Invalid relocation size");
1882 return UNEXPECTED;
1885 val &= r->dst_mask;
1886 if (r->signed_addend)
1887 val |= -(val & (r->dst_mask & ~(r->dst_mask >> 1)));
1888 val <<= r->rightshift;
1889 if (r->pcrel)
1890 val += (unsigned long)addr;
1891 val -= r->addend;
1892 *valp = val;
1893 return OK;
1896 static abort_t write_reloc_value(struct module_pack *pack,
1897 const struct ksplice_reloc *r,
1898 unsigned long sym_addr)
1900 unsigned long val = sym_addr + r->addend;
1901 if (r->pcrel)
1902 val -= r->blank_addr;
1903 val >>= r->rightshift;
1904 switch (r->size) {
1905 case 1:
1906 *(uint8_t *)r->blank_addr =
1907 (*(uint8_t *)r->blank_addr & ~r->dst_mask) |
1908 (val & r->dst_mask);
1909 break;
1910 case 2:
1911 *(uint16_t *)r->blank_addr =
1912 (*(uint16_t *)r->blank_addr & ~r->dst_mask) |
1913 (val & r->dst_mask);
1914 break;
1915 case 4:
1916 *(uint32_t *)r->blank_addr =
1917 (*(uint32_t *)r->blank_addr & ~r->dst_mask) |
1918 (val & r->dst_mask);
1919 break;
1920 #if BITS_PER_LONG >= 64
1921 case 8:
1922 *(uint64_t *)r->blank_addr =
1923 (*(uint64_t *)r->blank_addr & ~r->dst_mask) |
1924 (val & r->dst_mask);
1925 break;
1926 #endif /* BITS_PER_LONG */
1927 default:
1928 print_abort(pack, "Invalid relocation size");
1929 return UNEXPECTED;
1932 if (read_reloc_value(pack, r, r->blank_addr, &val) != OK ||
1933 val != sym_addr) {
1934 print_abort(pack, "relocation overflow");
1935 return UNEXPECTED;
1938 return OK;
1941 static abort_t apply_relocs(struct module_pack *pack,
1942 const struct ksplice_reloc *relocs,
1943 const struct ksplice_reloc *relocs_end)
1945 const struct ksplice_reloc *r;
1946 for (r = relocs; r < relocs_end; r++) {
1947 abort_t ret = apply_reloc(pack, r);
1948 if (ret != OK)
1949 return ret;
1951 return OK;
1954 static abort_t apply_reloc(struct module_pack *pack,
1955 const struct ksplice_reloc *r)
1957 abort_t ret;
1958 int canary_ret;
1959 unsigned long sym_addr;
1960 LIST_HEAD(vals);
1962 canary_ret = contains_canary(pack, r->blank_addr, r->size, r->dst_mask);
1963 if (canary_ret < 0)
1964 return UNEXPECTED;
1965 if (canary_ret == 0) {
1966 ksdebug(pack, "reloc: skipped %s:%" ADDR "(altinstr)\n",
1967 r->symbol->label, r->blank_offset);
1968 return OK;
1971 #ifdef KSPLICE_STANDALONE
1972 if (!bootstrapped) {
1973 ret = add_system_map_candidates(pack, r->symbol, &vals);
1974 if (ret != OK) {
1975 release_vals(&vals);
1976 return ret;
1979 #else /* !KSPLICE_STANDALONE */
1980 #ifdef CONFIG_KALLSYMS
1981 ret = add_system_map_candidates(pack, r->symbol, &vals);
1982 if (ret != OK) {
1983 release_vals(&vals);
1984 return ret;
1986 #endif /* CONFIG_KALLSYMS */
1987 #endif /* KSPLICE_STANDALONE */
1988 ret = compute_address(pack, r->symbol, &vals);
1989 if (ret != OK) {
1990 release_vals(&vals);
1991 return ret;
1993 if (!singular(&vals)) {
1994 release_vals(&vals);
1995 failed_to_find(pack, r->symbol->label);
1996 return FAILED_TO_FIND;
1998 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1999 release_vals(&vals);
2001 ret = write_reloc_value(pack, r, sym_addr);
2002 if (ret != OK)
2003 return ret;
2005 ksdebug(pack, "reloc: %s:%" ADDR " ", r->symbol->label,
2006 r->blank_offset);
2007 ksdebug(pack, "(S=%" ADDR " A=%" ADDR " ", sym_addr, r->addend);
2008 switch (r->size) {
2009 case 1:
2010 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
2011 break;
2012 case 2:
2013 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
2014 break;
2015 case 4:
2016 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
2017 break;
2018 #if BITS_PER_LONG >= 64
2019 case 8:
2020 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
2021 break;
2022 #endif /* BITS_PER_LONG */
2023 default:
2024 print_abort(pack, "Invalid relocation size");
2025 return UNEXPECTED;
2027 #ifdef KSPLICE_STANDALONE
2028 if (!bootstrapped)
2029 return OK;
2030 #endif /* KSPLICE_STANDALONE */
2031 /* Create namevals so that we can verify our choices in the second
2032 round of run-pre matching that considers data sections. */
2033 ret = create_nameval(pack, r->symbol->label, sym_addr, VAL);
2034 if (ret != OK)
2035 return ret;
2036 return add_dependency_on_address(pack, sym_addr);
2039 static abort_t add_system_map_candidates(struct module_pack *pack,
2040 const struct ksplice_symbol *symbol,
2041 struct list_head *vals)
2043 abort_t ret;
2044 long off;
2045 int i;
2047 /* Some Fedora kernel releases have System.map files whose symbol
2048 * addresses disagree with the running kernel by a constant address
2049 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2050 * values used to compile these kernels. This constant address offset
2051 * is always a multiple of 0x100000.
2053 * If we observe an offset that is NOT a multiple of 0x100000, then the
2054 * user provided us with an incorrect System.map file, and we should
2055 * abort.
2056 * If we observe an offset that is a multiple of 0x100000, then we can
2057 * adjust the System.map address values accordingly and proceed.
2059 off = (unsigned long)printk - pack->map_printk;
2060 if (off & 0xfffff) {
2061 print_abort(pack, "System.map does not match kernel");
2062 return BAD_SYSTEM_MAP;
2064 for (i = 0; i < symbol->nr_candidates; i++) {
2065 ret = add_candidate_val(vals, symbol->candidates[i] + off);
2066 if (ret != OK)
2067 return ret;
2069 return OK;
2072 static abort_t add_dependency_on_address(struct module_pack *pack,
2073 unsigned long addr)
2075 struct module *m =
2076 __module_text_address(follow_trampolines(pack, addr));
2077 if (m == NULL || starts_with(m->name, pack->name) ||
2078 ends_with(m->name, "_helper"))
2079 return OK;
2080 if (use_module(pack->primary, m) != 1)
2081 return MODULE_BUSY;
2082 return OK;
2085 static abort_t add_patch_dependencies(struct module_pack *pack)
2087 abort_t ret;
2088 const struct ksplice_patch *p;
2089 for (p = pack->patches; p < pack->patches_end; p++) {
2090 ret = add_dependency_on_address(pack, p->oldaddr);
2091 if (ret != OK)
2092 return ret;
2094 return 0;
2097 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2098 #ifdef CONFIG_MODULE_UNLOAD
2099 struct module_use {
2100 struct list_head list;
2101 struct module *module_which_uses;
2104 /* I'm not yet certain whether we need the strong form of this. */
2105 static inline int strong_try_module_get(struct module *mod)
2107 if (mod && mod->state != MODULE_STATE_LIVE)
2108 return -EBUSY;
2109 if (try_module_get(mod))
2110 return 0;
2111 return -ENOENT;
2114 /* Does a already use b? */
2115 static int already_uses(struct module *a, struct module *b)
2117 struct module_use *use;
2118 list_for_each_entry(use, &b->modules_which_use_me, list) {
2119 if (use->module_which_uses == a)
2120 return 1;
2122 return 0;
2125 /* Make it so module a uses b. Must be holding module_mutex */
2126 static int use_module(struct module *a, struct module *b)
2128 struct module_use *use;
2129 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2130 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2131 int no_warn;
2132 #endif /* LINUX_VERSION_CODE */
2133 if (b == NULL || already_uses(a, b))
2134 return 1;
2136 if (strong_try_module_get(b) < 0)
2137 return 0;
2139 use = kmalloc(sizeof(*use), GFP_ATOMIC);
2140 if (!use) {
2141 module_put(b);
2142 return 0;
2144 use->module_which_uses = a;
2145 list_add(&use->list, &b->modules_which_use_me);
2146 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
2147 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
2148 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
2149 #endif /* LINUX_VERSION_CODE */
2150 return 1;
2152 #else /* CONFIG_MODULE_UNLOAD */
2153 static int use_module(struct module *a, struct module *b)
2155 return 1;
2157 #endif /* CONFIG_MODULE_UNLOAD */
2158 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2160 static abort_t compute_address(struct module_pack *pack,
2161 const struct ksplice_symbol *ksym,
2162 struct list_head *vals)
2164 abort_t ret;
2165 struct reloc_nameval *nv;
2167 #ifdef KSPLICE_STANDALONE
2168 if (!bootstrapped)
2169 return OK;
2170 #endif /* KSPLICE_STANDALONE */
2172 nv = find_nameval(pack, ksym->label);
2173 if (nv != NULL) {
2174 release_vals(vals);
2175 ksdebug(pack, "using detected sym %s=%" ADDR "\n", ksym->label,
2176 nv->val);
2177 return add_candidate_val(vals, nv->val);
2180 if (starts_with(ksym->label, ".rodata"))
2181 return OK;
2183 #ifdef CONFIG_MODULE_UNLOAD
2184 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2185 && pack->target->exit != NULL) {
2186 ret = add_candidate_val(vals,
2187 (unsigned long)pack->target->exit);
2188 if (ret != OK)
2189 return ret;
2191 #endif
2193 ret = exported_symbol_lookup(ksym->name, vals);
2194 if (ret == OK)
2195 ret = new_export_lookup(pack->bundle, ksym->name, vals);
2196 #ifdef CONFIG_KALLSYMS
2197 if (ret == OK)
2198 ret = kernel_lookup(ksym->name, vals);
2199 if (ret == OK)
2200 ret = other_module_lookup(pack, ksym->name, vals);
2201 #endif /* CONFIG_KALLSYMS */
2202 if (ret != OK)
2203 return ret;
2205 return OK;
2208 static abort_t new_export_lookup(struct update_bundle *bundle,
2209 const char *name, struct list_head *vals)
2211 struct module_pack *pack;
2212 struct ksplice_export *exp;
2213 list_for_each_entry(pack, &bundle->packs, list) {
2214 for (exp = pack->exports; exp < pack->exports_end; exp++) {
2215 if (strcmp(exp->new_name, name) == 0 &&
2216 exp->sym != NULL &&
2217 contains_canary(pack,
2218 (unsigned long)&exp->sym->value,
2219 sizeof(unsigned long), -1) == 0)
2220 return add_candidate_val(vals, exp->sym->value);
2223 return OK;
2226 static abort_t exported_symbol_lookup(const char *name, struct list_head *vals)
2228 const struct kernel_symbol *sym;
2229 sym = find_symbol(name, NULL, NULL, true, false);
2230 if (sym == NULL)
2231 return OK;
2232 return add_candidate_val(vals, sym->value);
2235 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2236 #ifndef CONFIG_MODVERSIONS
2237 #define symversion(base, idx) NULL
2238 #else
2239 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
2240 #endif
2242 struct symsearch {
2243 const struct kernel_symbol *start, *stop;
2244 const unsigned long *crcs;
2245 enum {
2246 NOT_GPL_ONLY,
2247 GPL_ONLY,
2248 WILL_BE_GPL_ONLY,
2249 } licence;
2250 bool unused;
2253 static bool each_symbol_in_section(const struct symsearch *arr,
2254 unsigned int arrsize,
2255 struct module *owner,
2256 bool (*fn)(const struct symsearch *syms,
2257 struct module *owner,
2258 unsigned int symnum, void *data),
2259 void *data)
2261 unsigned int i, j;
2263 for (j = 0; j < arrsize; j++) {
2264 for (i = 0; i < arr[j].stop - arr[j].start; i++)
2265 if (fn(&arr[j], owner, i, data))
2266 return true;
2269 return false;
2272 /* Returns true as soon as fn returns true, otherwise false. */
2273 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
2274 struct module *owner,
2275 unsigned int symnum, void *data),
2276 void *data)
2278 struct module *mod;
2279 const struct symsearch arr[] = {
2280 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
2281 NOT_GPL_ONLY, false },
2282 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
2283 __start___kcrctab_gpl,
2284 GPL_ONLY, false },
2285 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2286 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
2287 __start___kcrctab_gpl_future,
2288 WILL_BE_GPL_ONLY, false },
2289 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2290 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2291 { __start___ksymtab_unused, __stop___ksymtab_unused,
2292 __start___kcrctab_unused,
2293 NOT_GPL_ONLY, true },
2294 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
2295 __start___kcrctab_unused_gpl,
2296 GPL_ONLY, true },
2297 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2300 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
2301 return 1;
2303 list_for_each_entry(mod, &modules, list) {
2304 struct symsearch module_arr[] = {
2305 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
2306 NOT_GPL_ONLY, false },
2307 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
2308 mod->gpl_crcs,
2309 GPL_ONLY, false },
2310 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
2311 { mod->gpl_future_syms,
2312 mod->gpl_future_syms + mod->num_gpl_future_syms,
2313 mod->gpl_future_crcs,
2314 WILL_BE_GPL_ONLY, false },
2315 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
2316 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
2317 { mod->unused_syms,
2318 mod->unused_syms + mod->num_unused_syms,
2319 mod->unused_crcs,
2320 NOT_GPL_ONLY, true },
2321 { mod->unused_gpl_syms,
2322 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
2323 mod->unused_gpl_crcs,
2324 GPL_ONLY, true },
2325 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
2328 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
2329 mod, fn, data))
2330 return true;
2332 return false;
2335 struct find_symbol_arg {
2336 /* Input */
2337 const char *name;
2338 bool gplok;
2339 bool warn;
2341 /* Output */
2342 struct module *owner;
2343 const unsigned long *crc;
2344 const struct kernel_symbol *sym;
2347 static bool find_symbol_in_section(const struct symsearch *syms,
2348 struct module *owner,
2349 unsigned int symnum, void *data)
2351 struct find_symbol_arg *fsa = data;
2353 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
2354 return false;
2356 if (!fsa->gplok) {
2357 if (syms->licence == GPL_ONLY)
2358 return false;
2359 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
2360 printk(KERN_WARNING "Symbol %s is being used "
2361 "by a non-GPL module, which will not "
2362 "be allowed in the future\n", fsa->name);
2363 printk(KERN_WARNING "Please see the file "
2364 "Documentation/feature-removal-schedule.txt "
2365 "in the kernel source tree for more details.\n");
2369 #ifdef CONFIG_UNUSED_SYMBOLS
2370 if (syms->unused && fsa->warn) {
2371 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
2372 "however this module is using it.\n", fsa->name);
2373 printk(KERN_WARNING
2374 "This symbol will go away in the future.\n");
2375 printk(KERN_WARNING
2376 "Please evalute if this is the right api to use and if "
2377 "it really is, submit a report the linux kernel "
2378 "mailinglist together with submitting your code for "
2379 "inclusion.\n");
2381 #endif
2383 fsa->owner = owner;
2384 fsa->crc = symversion(syms->crcs, symnum);
2385 fsa->sym = &syms->start[symnum];
2386 return true;
2389 /* Find a symbol and return it, along with, (optional) crc and
2390 * (optional) module which owns it */
2391 static const struct kernel_symbol *find_symbol(const char *name,
2392 struct module **owner,
2393 const unsigned long **crc,
2394 bool gplok, bool warn)
2396 struct find_symbol_arg fsa;
2398 fsa.name = name;
2399 fsa.gplok = gplok;
2400 fsa.warn = warn;
2402 if (each_symbol(find_symbol_in_section, &fsa)) {
2403 if (owner)
2404 *owner = fsa.owner;
2405 if (crc)
2406 *crc = fsa.crc;
2407 return fsa.sym;
2410 return NULL;
2412 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2414 #ifdef CONFIG_KALLSYMS
2415 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2416 static abort_t other_module_lookup(struct module_pack *pack, const char *name,
2417 struct list_head *vals)
2419 abort_t ret = OK;
2420 struct accumulate_struct acc = { name, vals };
2421 const struct module *m;
2423 list_for_each_entry(m, &modules, list) {
2424 if (starts_with(m->name, pack->name) ||
2425 !ends_with(m->name, pack->target_name))
2426 continue;
2427 ret = (__force abort_t)
2428 module_kallsyms_on_each_symbol(m, accumulate_matching_names,
2429 &acc);
2430 if (ret != OK)
2431 break;
2433 return ret;
2435 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
2436 static abort_t other_module_lookup(struct module_pack *pack, const char *name,
2437 struct list_head *vals)
2439 struct accumulate_struct acc = { name, vals };
2440 struct ksplice_module_list_entry *entry;
2441 abort_t ret;
2443 list_for_each_entry(entry, &ksplice_module_list, list) {
2444 if (entry->target != pack->target ||
2445 entry->primary == pack->primary)
2446 continue;
2447 ret = (__force abort_t)
2448 module_kallsyms_on_each_symbol(entry->primary,
2449 accumulate_matching_names,
2450 &acc);
2451 if (ret != OK)
2452 return ret;
2454 if (pack->target == NULL)
2455 return OK;
2456 ret = (__force abort_t)
2457 module_kallsyms_on_each_symbol(pack->target,
2458 accumulate_matching_names, &acc);
2459 return ret;
2461 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2463 static int accumulate_matching_names(void *data, const char *sym_name,
2464 unsigned long sym_val)
2466 abort_t ret = OK;
2467 struct accumulate_struct *acc = data;
2469 if (strcmp(sym_name, acc->desired_name) == 0)
2470 ret = add_candidate_val(acc->vals, sym_val);
2471 return (__force int)ret;
2473 #endif /* CONFIG_KALLSYMS */
2475 #ifdef KSPLICE_STANDALONE
2476 static abort_t brute_search(struct module_pack *pack,
2477 const struct ksplice_size *s,
2478 const void *start, unsigned long len,
2479 struct list_head *vals)
2481 unsigned long addr;
2482 char run, pre;
2483 abort_t ret;
2485 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2486 addr++) {
2487 if (addr % 100000 == 0)
2488 yield();
2490 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2491 return OK;
2493 pre = *(const unsigned char *)(s->thismod_addr);
2495 if (run != pre)
2496 continue;
2498 ret = try_addr(pack, s, addr, NULL, RUN_PRE_INITIAL);
2499 if (ret == OK) {
2500 ret = add_candidate_val(vals, addr);
2501 if (ret != OK)
2502 return ret;
2503 } else if (ret != NO_MATCH) {
2504 return ret;
2508 return OK;
2511 static abort_t brute_search_all(struct module_pack *pack,
2512 const struct ksplice_size *s,
2513 struct list_head *vals)
2515 struct module *m;
2516 abort_t ret = OK;
2517 int saved_debug;
2519 ksdebug(pack, "brute_search: searching for %s\n", s->symbol->label);
2520 saved_debug = pack->bundle->debug;
2521 pack->bundle->debug = 0;
2523 list_for_each_entry(m, &modules, list) {
2524 if (starts_with(m->name, pack->name) ||
2525 ends_with(m->name, "_helper"))
2526 continue;
2527 ret = brute_search(pack, s, m->module_core, m->core_size, vals);
2528 if (ret != OK)
2529 break;
2530 ret = brute_search(pack, s, m->module_init, m->init_size, vals);
2531 if (ret != OK)
2532 break;
2534 if (ret == OK)
2535 ret = brute_search(pack, s, (const void *)init_mm.start_code,
2536 init_mm.end_code - init_mm.start_code, vals);
2537 pack->bundle->debug = saved_debug;
2539 return ret;
2542 #ifdef CONFIG_KALLSYMS
2543 /* Modified version of Linux's kallsyms_lookup_name */
2544 static abort_t kernel_lookup(const char *name, struct list_head *vals)
2546 abort_t ret;
2547 char namebuf[KSYM_NAME_LEN + 1];
2548 unsigned long i;
2549 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2550 unsigned long off;
2551 #endif /* LINUX_VERSION_CODE */
2553 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2554 * 2.6.10 was the first release after this commit
2556 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2557 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
2558 off = ksplice_kallsyms_expand_symbol(off, namebuf);
2560 if (strcmp(namebuf, name) == 0) {
2561 ret = add_candidate_val(vals, kallsyms_addresses[i]);
2562 if (ret != OK)
2563 return ret;
2566 #else /* LINUX_VERSION_CODE < */
2567 char *knames;
2569 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
2570 unsigned prefix = *knames++;
2572 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
2574 if (strcmp(namebuf, name) == 0) {
2575 ret = add_candidate_val(vals, kallsyms_addresses[i]);
2576 if (ret != OK)
2577 return ret;
2580 knames += strlen(knames) + 1;
2582 #endif /* LINUX_VERSION_CODE */
2584 return OK;
2587 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2588 * 2.6.10 was the first release after this commit
2590 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2591 extern u8 kallsyms_token_table[];
2592 extern u16 kallsyms_token_index[];
2593 /* Modified version of Linux's kallsyms_expand_symbol */
2594 static unsigned long ksplice_kallsyms_expand_symbol(unsigned long off,
2595 char *result)
2597 long len, skipped_first = 0;
2598 const u8 *tptr, *data;
2600 data = &kallsyms_names[off];
2601 len = *data;
2602 data++;
2604 off += len + 1;
2606 while (len) {
2607 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
2608 data++;
2609 len--;
2611 while (*tptr) {
2612 if (skipped_first) {
2613 *result = *tptr;
2614 result++;
2615 } else
2616 skipped_first = 1;
2617 tptr++;
2621 *result = '\0';
2623 return off;
2625 #endif /* LINUX_VERSION_CODE */
2627 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2628 static int module_kallsyms_on_each_symbol(const struct module *mod,
2629 int (*fn)(void *, const char *,
2630 unsigned long),
2631 void *data)
2633 unsigned int i;
2634 int ret;
2636 for (i = 0; i < mod->num_symtab; i++) {
2637 if ((ret =
2638 fn(data, mod->strtab + mod->symtab[i].st_name,
2639 mod->symtab[i].st_value) != 0))
2640 return ret;
2642 return 0;
2644 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2645 #endif /* CONFIG_KALLSYMS */
2646 #else /* !KSPLICE_STANDALONE */
2648 static abort_t kernel_lookup(const char *name, struct list_head *vals)
2650 struct accumulate_struct acc = { name, vals };
2651 return (__force abort_t)
2652 kernel_kallsyms_on_each_symbol(accumulate_matching_names, &acc);
2654 #endif /* KSPLICE_STANDALONE */
2656 static abort_t add_candidate_val(struct list_head *vals, unsigned long val)
2658 struct candidate_val *tmp, *new;
2660 list_for_each_entry(tmp, vals, list) {
2661 if (tmp->val == val)
2662 return OK;
2664 new = kmalloc(sizeof(*new), GFP_KERNEL);
2665 if (new == NULL)
2666 return OUT_OF_MEMORY;
2667 new->val = val;
2668 list_add(&new->list, vals);
2669 return OK;
2672 static void release_vals(struct list_head *vals)
2674 clear_list(vals, struct candidate_val, list);
2677 static struct reloc_nameval *find_nameval(struct module_pack *pack,
2678 const char *label)
2680 struct reloc_nameval *nv;
2681 list_for_each_entry(nv, &pack->reloc_namevals, list) {
2682 if (strcmp(nv->label, label) == 0)
2683 return nv;
2685 return NULL;
2688 static abort_t create_nameval(struct module_pack *pack, const char *label,
2689 unsigned long val, int status)
2691 struct reloc_nameval *nv = find_nameval(pack, label);
2692 if (nv != NULL)
2693 return nv->val == val ? OK : NO_MATCH;
2695 nv = kmalloc(sizeof(*nv), GFP_KERNEL);
2696 if (nv == NULL)
2697 return OUT_OF_MEMORY;
2698 nv->label = label;
2699 nv->val = val;
2700 nv->status = status;
2701 list_add(&nv->list, &pack->reloc_namevals);
2702 return OK;
2705 static abort_t lookup_reloc(struct module_pack *pack, unsigned long addr,
2706 const struct ksplice_reloc **relocp)
2708 const struct ksplice_reloc *r;
2709 int canary_ret;
2710 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
2711 if (addr >= r->blank_addr && addr < r->blank_addr + r->size) {
2712 canary_ret = contains_canary(pack, r->blank_addr,
2713 r->size, r->dst_mask);
2714 if (canary_ret < 0)
2715 return UNEXPECTED;
2716 if (canary_ret == 0) {
2717 ksdebug(pack, "reloc: skipped %s:%" ADDR
2718 " (altinstr)\n", r->symbol->label,
2719 r->blank_offset);
2720 return NO_MATCH;
2722 if (addr != r->blank_addr) {
2723 ksdebug(pack, "Invalid nonzero relocation "
2724 "offset\n");
2725 return UNEXPECTED;
2727 *relocp = r;
2728 return OK;
2731 return NO_MATCH;
2734 static void set_temp_myst_relocs(struct module_pack *pack, int status_val)
2736 struct reloc_nameval *nv, *n;
2737 list_for_each_entry_safe(nv, n, &pack->reloc_namevals, list) {
2738 if (nv->status == TEMP) {
2739 if (status_val == NOVAL) {
2740 list_del(&nv->list);
2741 kfree(nv);
2742 } else {
2743 nv->status = status_val;
2749 static int contains_canary(struct module_pack *pack, unsigned long blank_addr,
2750 int size, long dst_mask)
2752 switch (size) {
2753 case 1:
2754 return (*(uint8_t *)blank_addr & dst_mask) ==
2755 (0x77 & dst_mask);
2756 case 2:
2757 return (*(uint16_t *)blank_addr & dst_mask) ==
2758 (0x7777 & dst_mask);
2759 case 4:
2760 return (*(uint32_t *)blank_addr & dst_mask) ==
2761 (0x77777777 & dst_mask);
2762 #if BITS_PER_LONG >= 64
2763 case 8:
2764 return (*(uint64_t *)blank_addr & dst_mask) ==
2765 (0x7777777777777777l & dst_mask);
2766 #endif /* BITS_PER_LONG */
2767 default:
2768 print_abort(pack, "Invalid relocation size");
2769 return -1;
2773 static int starts_with(const char *str, const char *prefix)
2775 return strncmp(str, prefix, strlen(prefix)) == 0;
2778 static int ends_with(const char *str, const char *suffix)
2780 return strlen(str) >= strlen(suffix) &&
2781 strcmp(&str[strlen(str) - strlen(suffix)], suffix) == 0;
2784 #ifdef CONFIG_DEBUG_FS
2785 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2786 /* Old kernels don't have debugfs_create_blob */
2787 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
2788 size_t count, loff_t *ppos)
2790 struct debugfs_blob_wrapper *blob = file->private_data;
2791 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
2792 blob->size);
2795 static int blob_open(struct inode *inode, struct file *file)
2797 if (inode->i_private)
2798 file->private_data = inode->i_private;
2799 return 0;
2802 static struct file_operations fops_blob = {
2803 .read = read_file_blob,
2804 .open = blob_open,
2807 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
2808 struct dentry *parent,
2809 struct debugfs_blob_wrapper *blob)
2811 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
2813 #endif /* LINUX_VERSION_CODE */
2815 static void clear_debug_buf(struct update_bundle *bundle)
2817 if (bundle->debugfs_dentry == NULL)
2818 return;
2819 debugfs_remove(bundle->debugfs_dentry);
2820 bundle->debugfs_dentry = NULL;
2821 bundle->debug_blob.size = 0;
2822 vfree(bundle->debug_blob.data);
2823 bundle->debug_blob.data = NULL;
2826 static abort_t init_debug_buf(struct update_bundle *bundle)
2828 bundle->debug_blob.size = 0;
2829 bundle->debug_blob.data = NULL;
2830 bundle->debugfs_dentry =
2831 debugfs_create_blob(bundle->name, S_IFREG | S_IRUSR, NULL,
2832 &bundle->debug_blob);
2833 if (bundle->debugfs_dentry == NULL)
2834 return OUT_OF_MEMORY;
2835 return OK;
2838 static int _ksdebug(struct update_bundle *bundle, const char *fmt, ...)
2840 va_list args;
2841 unsigned long size, old_size, new_size;
2843 if (bundle->debug == 0)
2844 return 0;
2846 /* size includes the trailing '\0' */
2847 va_start(args, fmt);
2848 size = 1 + vsnprintf(bundle->debug_blob.data, 0, fmt, args);
2849 va_end(args);
2850 old_size = bundle->debug_blob.size == 0 ? 0 :
2851 max(PAGE_SIZE, roundup_pow_of_two(bundle->debug_blob.size));
2852 new_size = bundle->debug_blob.size + size == 0 ? 0 :
2853 max(PAGE_SIZE, roundup_pow_of_two(bundle->debug_blob.size + size));
2854 if (new_size > old_size) {
2855 char *buf = vmalloc(new_size);
2856 if (buf == NULL)
2857 return -ENOMEM;
2858 memcpy(buf, bundle->debug_blob.data, bundle->debug_blob.size);
2859 vfree(bundle->debug_blob.data);
2860 bundle->debug_blob.data = buf;
2862 va_start(args, fmt);
2863 bundle->debug_blob.size += vsnprintf(bundle->debug_blob.data +
2864 bundle->debug_blob.size,
2865 size, fmt, args);
2866 va_end(args);
2867 return 0;
2869 #else /* CONFIG_DEBUG_FS */
2870 static int _ksdebug(struct update_bundle *bundle, const char *fmt, ...)
2872 va_list args;
2874 if (bundle->debug == 0)
2875 return 0;
2877 if (!bundle->debug_continue_line)
2878 printk(KERN_DEBUG "ksplice: ");
2880 va_start(args, fmt);
2881 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
2882 vprintk(fmt, args);
2883 #else /* LINUX_VERSION_CODE < */
2884 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
2886 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
2887 printk("%s", buf);
2888 kfree(buf);
2890 #endif /* LINUX_VERSION_CODE */
2891 va_end(args);
2893 bundle->debug_continue_line =
2894 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
2895 return 0;
2897 #endif /* CONFIG_DEBUG_FS */
2899 #ifdef KSPLICE_STANDALONE
2900 static int debug;
2901 module_param(debug, int, 0600);
2902 MODULE_PARM_DESC(debug, "Debug level");
2904 static struct module_pack ksplice_pack = {
2905 .name = "ksplice_" STR(KSPLICE_KID),
2906 .kid = "init_" STR(KSPLICE_KID),
2907 .target_name = NULL,
2908 .target = NULL,
2909 .map_printk = MAP_PRINTK,
2910 .primary = THIS_MODULE,
2911 .reloc_namevals = LIST_HEAD_INIT(ksplice_pack.reloc_namevals),
2913 #endif /* KSPLICE_STANDALONE */
2915 static int init_ksplice(void)
2917 #ifdef KSPLICE_STANDALONE
2918 struct module_pack *pack = &ksplice_pack;
2919 pack->bundle = init_ksplice_bundle(pack->kid);
2920 if (pack->bundle == NULL)
2921 return -ENOMEM;
2922 add_to_bundle(pack, pack->bundle);
2923 pack->bundle->debug = debug;
2924 pack->bundle->abort_cause =
2925 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
2926 if (pack->bundle->abort_cause == OK)
2927 bootstrapped = 1;
2928 #else /* !KSPLICE_STANDALONE */
2929 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
2930 if (ksplice_kobj == NULL)
2931 return -ENOMEM;
2932 #endif /* KSPLICE_STANDALONE */
2933 return 0;
2936 static void cleanup_ksplice(void)
2938 #ifdef KSPLICE_STANDALONE
2939 cleanup_ksplice_bundle(ksplice_pack.bundle);
2940 #else /* !KSPLICE_STANDALONE */
2941 kobject_put(ksplice_kobj);
2942 #endif /* KSPLICE_STANDALONE */
2945 module_init(init_ksplice);
2946 module_exit(cleanup_ksplice);
2948 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
2949 MODULE_DESCRIPTION("Ksplice rebootless update system");
2950 #ifdef KSPLICE_VERSION
2951 MODULE_VERSION(KSPLICE_VERSION);
2952 #endif
2953 MODULE_LICENSE("GPL v2");