Patch .rodata sections whose size hasn't changed in place.
[ksplice.git] / kmodsrc / ksplice.c
blob2b888f3e4062c063a80e456df1cbda40f5ca92b9
1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
16 * 02110-1301, USA.
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
22 #include <linux/bug.h>
23 #else /* LINUX_VERSION_CODE */
24 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
25 #endif /* LINUX_VERSION_CODE */
26 #include <linux/ctype.h>
27 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
28 #include <linux/debugfs.h>
29 #else /* CONFIG_DEBUG_FS */
30 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
31 #endif /* CONFIG_DEBUG_FS */
32 #include <linux/errno.h>
33 #include <linux/kallsyms.h>
34 #include <linux/kobject.h>
35 #include <linux/kthread.h>
36 #include <linux/pagemap.h>
37 #include <linux/sched.h>
38 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
39 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
40 #include <linux/sort.h>
41 #endif /* LINUX_VERSION_CODE < */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
67 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
68 #define bool _Bool
69 #define false 0
70 #define true 1
71 #endif /* LINUX_VERSION_CODE */
73 enum stage {
74 STAGE_PREPARING, STAGE_APPLIED, STAGE_REVERSED
77 enum run_pre_mode {
78 RUN_PRE_INITIAL, RUN_PRE_DEBUG, RUN_PRE_FINAL, RUN_PRE_SILENT
81 enum { NOVAL, TEMP, VAL };
83 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
84 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
85 #define __bitwise__
86 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
87 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
88 #define __bitwise__ __bitwise
89 #endif
91 typedef int __bitwise__ abort_t;
93 #define OK ((__force abort_t) 0)
94 #define NO_MATCH ((__force abort_t) 1)
95 #define CODE_BUSY ((__force abort_t) 2)
96 #define MODULE_BUSY ((__force abort_t) 3)
97 #define OUT_OF_MEMORY ((__force abort_t) 4)
98 #define FAILED_TO_FIND ((__force abort_t) 5)
99 #define ALREADY_REVERSED ((__force abort_t) 6)
100 #define MISSING_EXPORT ((__force abort_t) 7)
101 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
102 #define UNEXPECTED ((__force abort_t) 9)
103 #define TARGET_NOT_LOADED ((__force abort_t) 10)
104 #ifdef KSPLICE_STANDALONE
105 #define BAD_SYSTEM_MAP ((__force abort_t) 11)
106 #endif /* KSPLICE_STANDALONE */
108 struct update {
109 const char *kid;
110 const char *name;
111 struct kobject kobj;
112 enum stage stage;
113 abort_t abort_cause;
114 int debug;
115 #ifdef CONFIG_DEBUG_FS
116 struct debugfs_blob_wrapper debug_blob;
117 struct dentry *debugfs_dentry;
118 #else /* !CONFIG_DEBUG_FS */
119 bool debug_continue_line;
120 #endif /* CONFIG_DEBUG_FS */
121 bool partial;
122 struct list_head packs;
123 struct list_head unused_packs;
124 struct list_head conflicts;
125 struct list_head list;
128 struct conflict {
129 const char *process_name;
130 pid_t pid;
131 struct list_head stack;
132 struct list_head list;
135 struct conflict_addr {
136 unsigned long addr;
137 bool has_conflict;
138 const char *label;
139 struct list_head list;
142 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
143 /* Old kernels don't have debugfs_create_blob */
144 struct debugfs_blob_wrapper {
145 void *data;
146 unsigned long size;
148 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
150 struct labelval {
151 struct list_head list;
152 struct ksplice_symbol *symbol;
153 struct list_head *saved_vals;
156 struct safety_record {
157 struct list_head list;
158 const char *label;
159 unsigned long addr;
160 unsigned long size;
161 bool first_byte_safe;
164 struct candidate_val {
165 struct list_head list;
166 unsigned long val;
169 struct accumulate_struct {
170 struct ksplice_pack *pack;
171 const char *desired_name;
172 struct list_head *vals;
175 struct ksplice_lookup {
176 /* input */
177 struct ksplice_pack *pack;
178 struct ksplice_symbol **arr;
179 size_t size;
180 /* output */
181 abort_t ret;
184 #ifdef KSPLICE_NO_KERNEL_SUPPORT
185 struct symsearch {
186 const struct kernel_symbol *start, *stop;
187 const unsigned long *crcs;
188 enum {
189 NOT_GPL_ONLY,
190 GPL_ONLY,
191 WILL_BE_GPL_ONLY,
192 } licence;
193 bool unused;
195 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
197 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
198 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
200 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
201 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
202 static bool virtual_address_mapped(unsigned long addr)
204 char retval;
205 return probe_kernel_address(addr, retval) != -EFAULT;
207 #else /* LINUX_VERSION_CODE < */
208 static bool virtual_address_mapped(unsigned long addr);
209 #endif /* LINUX_VERSION_CODE */
211 static long probe_kernel_read(void *dst, void *src, size_t size)
213 if (size == 0)
214 return 0;
215 if (!virtual_address_mapped((unsigned long)src) ||
216 !virtual_address_mapped((unsigned long)src + size - 1))
217 return -EFAULT;
219 memcpy(dst, src, size);
220 return 0;
222 #endif /* LINUX_VERSION_CODE */
224 static LIST_HEAD(updates);
225 #ifdef KSPLICE_STANDALONE
226 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
227 extern struct list_head ksplice_module_list;
228 #else /* !CONFIG_KSPLICE */
229 LIST_HEAD(ksplice_module_list);
230 #endif /* CONFIG_KSPLICE */
231 #else /* !KSPLICE_STANDALONE */
232 LIST_HEAD(ksplice_module_list);
233 EXPORT_SYMBOL_GPL(ksplice_module_list);
234 static struct kobject *ksplice_kobj;
235 #endif /* KSPLICE_STANDALONE */
237 static struct kobj_type ksplice_ktype;
239 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
240 /* Old kernels do not have kcalloc
241 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
243 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
245 char *mem;
246 if (n != 0 && size > ULONG_MAX / n)
247 return NULL;
248 mem = kmalloc(n * size, flags);
249 if (mem)
250 memset(mem, 0, n * size);
251 return mem;
253 #endif /* LINUX_VERSION_CODE */
255 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
256 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
257 static void u32_swap(void *a, void *b, int size)
259 u32 t = *(u32 *)a;
260 *(u32 *)a = *(u32 *)b;
261 *(u32 *)b = t;
264 static void generic_swap(void *a, void *b, int size)
266 char t;
268 do {
269 t = *(char *)a;
270 *(char *)a++ = *(char *)b;
271 *(char *)b++ = t;
272 } while (--size > 0);
276 * sort - sort an array of elements
277 * @base: pointer to data to sort
278 * @num: number of elements
279 * @size: size of each element
280 * @cmp: pointer to comparison function
281 * @swap: pointer to swap function or NULL
283 * This function does a heapsort on the given array. You may provide a
284 * swap function optimized to your element type.
286 * Sorting time is O(n log n) both on average and worst-case. While
287 * qsort is about 20% faster on average, it suffers from exploitable
288 * O(n*n) worst-case behavior and extra memory requirements that make
289 * it less suitable for kernel use.
292 void sort(void *base, size_t num, size_t size,
293 int (*cmp)(const void *, const void *),
294 void (*swap)(void *, void *, int size))
296 /* pre-scale counters for performance */
297 int i = (num / 2 - 1) * size, n = num * size, c, r;
299 if (!swap)
300 swap = (size == 4 ? u32_swap : generic_swap);
302 /* heapify */
303 for (; i >= 0; i -= size) {
304 for (r = i; r * 2 + size < n; r = c) {
305 c = r * 2 + size;
306 if (c < n - size && cmp(base + c, base + c + size) < 0)
307 c += size;
308 if (cmp(base + r, base + c) >= 0)
309 break;
310 swap(base + r, base + c, size);
314 /* sort */
315 for (i = n - size; i > 0; i -= size) {
316 swap(base, base + i, size);
317 for (r = 0; r * 2 + size < i; r = c) {
318 c = r * 2 + size;
319 if (c < i - size && cmp(base + c, base + c + size) < 0)
320 c += size;
321 if (cmp(base + r, base + c) >= 0)
322 break;
323 swap(base + r, base + c, size);
327 #endif /* LINUX_VERSION_CODE < */
329 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
330 /* Old kernels do not have kstrdup
331 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
333 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
335 size_t len;
336 char *buf;
338 if (!s)
339 return NULL;
341 len = strlen(s) + 1;
342 buf = kmalloc(len, gfp);
343 if (buf)
344 memcpy(buf, s, len);
345 return buf;
347 #endif /* LINUX_VERSION_CODE */
349 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
350 /* Old kernels use semaphore instead of mutex
351 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
353 #define mutex semaphore
354 #define mutex_lock down
355 #define mutex_unlock up
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
359 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
360 static char * __attribute_used__
361 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
363 unsigned int len;
364 char *p, dummy[1];
365 va_list aq;
367 va_copy(aq, ap);
368 len = vsnprintf(dummy, 0, fmt, aq);
369 va_end(aq);
371 p = kmalloc(len + 1, gfp);
372 if (!p)
373 return NULL;
375 vsnprintf(p, len + 1, fmt, ap);
377 return p;
379 #endif
381 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
382 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
383 static char * __attribute__((format (printf, 2, 3)))
384 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
386 va_list ap;
387 char *p;
389 va_start(ap, fmt);
390 p = kvasprintf(gfp, fmt, ap);
391 va_end(ap);
393 return p;
395 #endif
397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
398 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
399 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
401 char *tail;
402 unsigned long val;
403 size_t len;
405 *res = 0;
406 len = strlen(cp);
407 if (len == 0)
408 return -EINVAL;
410 val = simple_strtoul(cp, &tail, base);
411 if ((*tail == '\0') ||
412 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
413 *res = val;
414 return 0;
417 return -EINVAL;
419 #endif
421 #ifndef task_thread_info
422 #define task_thread_info(task) (task)->thread_info
423 #endif /* !task_thread_info */
425 #ifdef KSPLICE_STANDALONE
427 static bool bootstrapped = false;
429 #ifdef CONFIG_KALLSYMS
430 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
431 extern u8 kallsyms_names[];
432 #endif /* CONFIG_KALLSYMS */
434 /* defined by ksplice-create */
435 extern const struct ksplice_reloc ksplice_init_relocs[],
436 ksplice_init_relocs_end[];
438 /* Obtained via System.map */
439 extern struct list_head modules;
440 extern struct mutex module_mutex;
441 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
442 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
443 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
444 #endif /* LINUX_VERSION_CODE */
445 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
446 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
447 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
448 #endif /* LINUX_VERSION_CODE */
449 extern const struct kernel_symbol __start___ksymtab[];
450 extern const struct kernel_symbol __stop___ksymtab[];
451 extern const unsigned long __start___kcrctab[];
452 extern const struct kernel_symbol __start___ksymtab_gpl[];
453 extern const struct kernel_symbol __stop___ksymtab_gpl[];
454 extern const unsigned long __start___kcrctab_gpl[];
455 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
456 extern const struct kernel_symbol __start___ksymtab_unused[];
457 extern const struct kernel_symbol __stop___ksymtab_unused[];
458 extern const unsigned long __start___kcrctab_unused[];
459 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
460 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
461 extern const unsigned long __start___kcrctab_unused_gpl[];
462 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
463 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
464 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
465 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
466 extern const unsigned long __start___kcrctab_gpl_future[];
467 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
469 #endif /* KSPLICE_STANDALONE */
471 static struct update *init_ksplice_update(const char *kid);
472 static void cleanup_ksplice_update(struct update *update);
473 static void maybe_cleanup_ksplice_update(struct update *update);
474 static void add_to_update(struct ksplice_pack *pack, struct update *update);
475 static int ksplice_sysfs_init(struct update *update);
477 /* Preparing the relocations and patches for application */
478 static abort_t apply_update(struct update *update);
479 static abort_t prepare_pack(struct ksplice_pack *pack);
480 static abort_t finalize_pack(struct ksplice_pack *pack);
481 static abort_t finalize_exports(struct ksplice_pack *pack);
482 static abort_t finalize_patches(struct ksplice_pack *pack);
483 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
484 unsigned long addr);
485 static abort_t map_trampoline_pages(struct update *update);
486 static void unmap_trampoline_pages(struct update *update);
487 static void *map_writable(void *addr, size_t len);
488 static abort_t apply_relocs(struct ksplice_pack *pack,
489 const struct ksplice_reloc *relocs,
490 const struct ksplice_reloc *relocs_end);
491 static abort_t apply_reloc(struct ksplice_pack *pack,
492 const struct ksplice_reloc *r);
493 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
494 const struct ksplice_reloc *r);
495 static abort_t apply_howto_date(struct ksplice_pack *pack,
496 const struct ksplice_reloc *r);
497 static abort_t read_reloc_value(struct ksplice_pack *pack,
498 const struct ksplice_reloc *r,
499 unsigned long addr, unsigned long *valp);
500 static abort_t write_reloc_value(struct ksplice_pack *pack,
501 const struct ksplice_reloc *r,
502 unsigned long addr, unsigned long sym_addr);
503 static void __attribute__((noreturn)) ksplice_deleted(void);
505 /* run-pre matching */
506 static abort_t match_pack_sections(struct ksplice_pack *pack,
507 bool consider_data_sections);
508 static abort_t find_section(struct ksplice_pack *pack,
509 struct ksplice_section *sect);
510 static abort_t try_addr(struct ksplice_pack *pack,
511 struct ksplice_section *sect,
512 unsigned long run_addr,
513 struct list_head *safety_records,
514 enum run_pre_mode mode);
515 static abort_t run_pre_cmp(struct ksplice_pack *pack,
516 const struct ksplice_section *sect,
517 unsigned long run_addr,
518 struct list_head *safety_records,
519 enum run_pre_mode mode);
520 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
521 /* defined in arch/ARCH/kernel/ksplice-arch.c */
522 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
523 struct ksplice_section *sect,
524 unsigned long run_addr,
525 struct list_head *safety_records,
526 enum run_pre_mode mode);
527 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
528 static void print_bytes(struct ksplice_pack *pack,
529 const unsigned char *run, int runc,
530 const unsigned char *pre, int prec);
531 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
532 static abort_t brute_search(struct ksplice_pack *pack,
533 struct ksplice_section *sect,
534 const void *start, unsigned long len,
535 struct list_head *vals);
536 static abort_t brute_search_all(struct ksplice_pack *pack,
537 struct ksplice_section *sect,
538 struct list_head *vals);
539 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
540 static const struct ksplice_reloc *
541 init_reloc_search(struct ksplice_pack *pack,
542 const struct ksplice_section *sect);
543 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
544 const struct ksplice_reloc *end,
545 unsigned long address,
546 unsigned long size);
547 static abort_t lookup_reloc(struct ksplice_pack *pack,
548 const struct ksplice_reloc **fingerp,
549 unsigned long addr,
550 const struct ksplice_reloc **relocp);
551 static abort_t handle_reloc(struct ksplice_pack *pack,
552 const struct ksplice_section *sect,
553 const struct ksplice_reloc *r,
554 unsigned long run_addr, enum run_pre_mode mode);
555 static abort_t handle_howto_date(struct ksplice_pack *pack,
556 const struct ksplice_section *sect,
557 const struct ksplice_reloc *r,
558 unsigned long run_addr,
559 enum run_pre_mode mode);
560 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
561 const struct ksplice_section *sect,
562 const struct ksplice_reloc *r,
563 unsigned long run_addr,
564 enum run_pre_mode mode);
565 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
566 const struct ksplice_symbol *sym);
567 static int compare_section_labels(const void *va, const void *vb);
568 static int symbol_section_bsearch_compare(const void *a, const void *b);
569 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
570 const struct ksplice_patch *p);
572 /* Computing possible addresses for symbols */
573 static abort_t lookup_symbol(struct ksplice_pack *pack,
574 const struct ksplice_symbol *ksym,
575 struct list_head *vals);
576 static void cleanup_symbol_arrays(struct ksplice_pack *pack);
577 static abort_t init_symbol_arrays(struct ksplice_pack *pack);
578 static abort_t init_symbol_array(struct ksplice_pack *pack,
579 struct ksplice_symbol *start,
580 struct ksplice_symbol *end);
581 static abort_t uniquify_symbols(struct ksplice_pack *pack);
582 static abort_t add_matching_values(struct ksplice_lookup *lookup,
583 const char *sym_name, unsigned long sym_val);
584 static bool add_export_values(const struct symsearch *syms,
585 struct module *owner,
586 unsigned int symnum, void *data);
587 static int symbolp_bsearch_compare(const void *key, const void *elt);
588 static int compare_symbolp_names(const void *a, const void *b);
589 static int compare_symbolp_labels(const void *a, const void *b);
590 #ifdef CONFIG_KALLSYMS
591 static int add_kallsyms_values(void *data, const char *name,
592 struct module *owner, unsigned long val);
593 #endif /* CONFIG_KALLSYMS */
594 #ifdef KSPLICE_STANDALONE
595 static abort_t
596 add_system_map_candidates(struct ksplice_pack *pack,
597 const struct ksplice_system_map *start,
598 const struct ksplice_system_map *end,
599 const char *label, struct list_head *vals);
600 static int compare_system_map(const void *a, const void *b);
601 static int system_map_bsearch_compare(const void *key, const void *elt);
602 #endif /* KSPLICE_STANDALONE */
603 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
604 const char *name, struct list_head *vals);
606 /* Atomic update insertion and removal */
607 static abort_t apply_patches(struct update *update);
608 static abort_t reverse_patches(struct update *update);
609 static int __apply_patches(void *update);
610 static int __reverse_patches(void *update);
611 static abort_t check_each_task(struct update *update);
612 static abort_t check_task(struct update *update,
613 const struct task_struct *t, bool rerun);
614 static abort_t check_stack(struct update *update, struct conflict *conf,
615 const struct thread_info *tinfo,
616 const unsigned long *stack);
617 static abort_t check_address(struct update *update,
618 struct conflict *conf, unsigned long addr);
619 static abort_t check_record(struct conflict_addr *ca,
620 const struct safety_record *rec,
621 unsigned long addr);
622 static bool is_stop_machine(const struct task_struct *t);
623 static void cleanup_conflicts(struct update *update);
624 static void print_conflicts(struct update *update);
625 static void insert_trampoline(struct ksplice_patch *p);
626 static abort_t verify_trampoline(struct ksplice_pack *pack,
627 const struct ksplice_patch *p);
628 static void remove_trampoline(const struct ksplice_patch *p);
630 static abort_t create_labelval(struct ksplice_pack *pack,
631 struct ksplice_symbol *ksym,
632 unsigned long val, int status);
633 static abort_t create_safety_record(struct ksplice_pack *pack,
634 const struct ksplice_section *sect,
635 struct list_head *record_list,
636 unsigned long run_addr,
637 unsigned long run_size);
638 static abort_t add_candidate_val(struct ksplice_pack *pack,
639 struct list_head *vals, unsigned long val);
640 static void release_vals(struct list_head *vals);
641 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
643 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
644 const struct ksplice_reloc_howto *howto);
645 static unsigned long follow_trampolines(struct ksplice_pack *pack,
646 unsigned long addr);
647 static bool patches_module(const struct module *a, const struct module *b);
648 static bool starts_with(const char *str, const char *prefix);
649 static bool singular(struct list_head *list);
650 static void *bsearch(const void *key, const void *base, size_t n,
651 size_t size, int (*cmp)(const void *key, const void *elt));
652 static int compare_relocs(const void *a, const void *b);
653 static int reloc_bsearch_compare(const void *key, const void *elt);
655 /* Debugging */
656 static abort_t init_debug_buf(struct update *update);
657 static void clear_debug_buf(struct update *update);
658 static int __attribute__((format(printf, 2, 3)))
659 _ksdebug(struct update *update, const char *fmt, ...);
660 #define ksdebug(pack, fmt, ...) \
661 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
663 #ifdef KSPLICE_NO_KERNEL_SUPPORT
664 /* Functions defined here that will be exported in later kernels */
665 #ifdef CONFIG_KALLSYMS
666 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
667 struct module *, unsigned long),
668 void *data);
669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
670 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
671 #endif /* LINUX_VERSION_CODE */
672 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
673 struct module *,
674 unsigned long),
675 void *data);
676 #endif /* CONFIG_KALLSYMS */
677 static struct module *find_module(const char *name);
678 static int use_module(struct module *a, struct module *b);
679 static const struct kernel_symbol *find_symbol(const char *name,
680 struct module **owner,
681 const unsigned long **crc,
682 bool gplok, bool warn);
683 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
684 struct module *owner,
685 unsigned int symnum, void *data),
686 void *data);
687 static struct module *__module_data_address(unsigned long addr);
688 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
690 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
691 static abort_t prepare_trampoline(struct ksplice_pack *pack,
692 struct ksplice_patch *p);
693 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
694 unsigned long *new_addr);
695 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
696 unsigned long run, int *matched);
697 static abort_t handle_bug(struct ksplice_pack *pack,
698 const struct ksplice_reloc *r,
699 unsigned long run_addr);
700 static abort_t handle_extable(struct ksplice_pack *pack,
701 const struct ksplice_reloc *r,
702 unsigned long run_addr);
703 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
705 #ifndef KSPLICE_STANDALONE
706 #include "ksplice-arch.c"
707 #elif defined CONFIG_X86
708 #include "x86/ksplice-arch.c"
709 #elif defined CONFIG_ARM
710 #include "arm/ksplice-arch.c"
711 #endif /* KSPLICE_STANDALONE */
713 #define clear_list(head, type, member) \
714 do { \
715 struct list_head *_pos, *_n; \
716 list_for_each_safe(_pos, _n, head) { \
717 list_del(_pos); \
718 kfree(list_entry(_pos, type, member)); \
720 } while (0)
722 int init_ksplice_pack(struct ksplice_pack *pack)
724 struct update *update;
725 struct ksplice_patch *p;
726 struct ksplice_section *s;
727 int ret = 0;
729 #ifdef KSPLICE_STANDALONE
730 if (!bootstrapped)
731 return -1;
732 #endif /* KSPLICE_STANDALONE */
734 INIT_LIST_HEAD(&pack->temp_labelvals);
735 INIT_LIST_HEAD(&pack->safety_records);
737 sort(pack->helper_relocs,
738 (pack->helper_relocs_end - pack->helper_relocs),
739 sizeof(*pack->helper_relocs), compare_relocs, NULL);
740 sort(pack->primary_relocs,
741 (pack->primary_relocs_end - pack->primary_relocs),
742 sizeof(*pack->primary_relocs), compare_relocs, NULL);
743 sort(pack->helper_sections,
744 (pack->helper_sections_end - pack->helper_sections),
745 sizeof(*pack->helper_sections), compare_section_labels, NULL);
746 #ifdef KSPLICE_STANDALONE
747 sort(pack->primary_system_map,
748 (pack->primary_system_map_end - pack->primary_system_map),
749 sizeof(*pack->primary_system_map), compare_system_map, NULL);
750 sort(pack->helper_system_map,
751 (pack->helper_system_map_end - pack->helper_system_map),
752 sizeof(*pack->helper_system_map), compare_system_map, NULL);
753 #endif /* KSPLICE_STANDALONE */
755 mutex_lock(&module_mutex);
756 for (p = pack->patches; p < pack->patches_end; p++)
757 p->vaddr = NULL;
758 for (s = pack->helper_sections; s < pack->helper_sections_end; s++)
759 s->match_map = NULL;
761 list_for_each_entry(update, &updates, list) {
762 if (strcmp(pack->kid, update->kid) == 0) {
763 if (update->stage != STAGE_PREPARING) {
764 ret = -EPERM;
765 goto out;
767 add_to_update(pack, update);
768 ret = 0;
769 goto out;
772 update = init_ksplice_update(pack->kid);
773 if (update == NULL) {
774 ret = -ENOMEM;
775 goto out;
777 ret = ksplice_sysfs_init(update);
778 if (ret != 0) {
779 cleanup_ksplice_update(update);
780 goto out;
782 add_to_update(pack, update);
783 out:
784 mutex_unlock(&module_mutex);
785 return ret;
787 EXPORT_SYMBOL_GPL(init_ksplice_pack);
789 void cleanup_ksplice_pack(struct ksplice_pack *pack)
791 if (pack->update == NULL)
792 return;
793 if (pack->update->stage == STAGE_APPLIED) {
794 /* If the pack wasn't actually applied (because we
795 only applied this update to loaded modules and this
796 target wasn't loaded), then unregister the pack
797 from the list of unused packs */
798 struct ksplice_pack *p;
799 bool found = false;
801 mutex_lock(&module_mutex);
802 list_for_each_entry(p, &pack->update->unused_packs, list) {
803 if (p == pack)
804 found = true;
806 if (found)
807 list_del(&pack->list);
808 mutex_unlock(&module_mutex);
809 return;
811 mutex_lock(&module_mutex);
812 list_del(&pack->list);
813 mutex_unlock(&module_mutex);
814 if (pack->update->stage == STAGE_PREPARING)
815 maybe_cleanup_ksplice_update(pack->update);
816 pack->update = NULL;
818 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
820 static struct update *init_ksplice_update(const char *kid)
822 struct update *update;
823 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
824 if (update == NULL)
825 return NULL;
826 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
827 if (update->name == NULL) {
828 kfree(update);
829 return NULL;
831 update->kid = kstrdup(kid, GFP_KERNEL);
832 if (update->kid == NULL) {
833 kfree(update->name);
834 kfree(update);
835 return NULL;
837 if (try_module_get(THIS_MODULE) != 1) {
838 kfree(update->kid);
839 kfree(update->name);
840 kfree(update);
841 return NULL;
843 INIT_LIST_HEAD(&update->packs);
844 INIT_LIST_HEAD(&update->unused_packs);
845 if (init_debug_buf(update) != OK) {
846 module_put(THIS_MODULE);
847 kfree(update->kid);
848 kfree(update->name);
849 kfree(update);
850 return NULL;
852 list_add(&update->list, &updates);
853 update->stage = STAGE_PREPARING;
854 update->abort_cause = OK;
855 update->partial = 0;
856 INIT_LIST_HEAD(&update->conflicts);
857 return update;
860 static void cleanup_ksplice_update(struct update *update)
862 #ifdef KSPLICE_STANDALONE
863 if (bootstrapped)
864 mutex_lock(&module_mutex);
865 list_del(&update->list);
866 if (bootstrapped)
867 mutex_unlock(&module_mutex);
868 #else /* !KSPLICE_STANDALONE */
869 mutex_lock(&module_mutex);
870 list_del(&update->list);
871 mutex_unlock(&module_mutex);
872 #endif /* KSPLICE_STANDALONE */
873 cleanup_conflicts(update);
874 clear_debug_buf(update);
875 kfree(update->kid);
876 kfree(update->name);
877 kfree(update);
878 module_put(THIS_MODULE);
881 static void maybe_cleanup_ksplice_update(struct update *update)
883 if (list_empty(&update->packs) && list_empty(&update->unused_packs))
884 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
885 kobject_put(&update->kobj);
886 #else /* LINUX_VERSION_CODE < */
887 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
888 kobject_unregister(&update->kobj);
889 #endif /* LINUX_VERSION_CODE */
892 static void add_to_update(struct ksplice_pack *pack, struct update *update)
894 pack->update = update;
895 list_add(&pack->list, &update->unused_packs);
896 pack->module_list_entry.primary = pack->primary;
899 static int ksplice_sysfs_init(struct update *update)
901 int ret = 0;
902 memset(&update->kobj, 0, sizeof(update->kobj));
903 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
904 #ifndef KSPLICE_STANDALONE
905 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
906 ksplice_kobj, "%s", update->kid);
907 #else /* KSPLICE_STANDALONE */
908 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
909 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
910 &THIS_MODULE->mkobj.kobj, "ksplice");
911 #endif /* KSPLICE_STANDALONE */
912 #else /* LINUX_VERSION_CODE < */
913 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
914 if (ret != 0)
915 return ret;
916 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
917 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
918 #else /* LINUX_VERSION_CODE < */
919 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
920 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
921 #endif /* LINUX_VERSION_CODE */
922 update->kobj.ktype = &ksplice_ktype;
923 ret = kobject_register(&update->kobj);
924 #endif /* LINUX_VERSION_CODE */
925 if (ret != 0)
926 return ret;
927 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
928 kobject_uevent(&update->kobj, KOBJ_ADD);
929 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
930 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
931 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
932 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
933 #endif /* LINUX_VERSION_CODE */
934 return 0;
937 static abort_t apply_update(struct update *update)
939 struct ksplice_pack *pack, *n;
940 abort_t ret;
941 int retval;
943 mutex_lock(&module_mutex);
944 list_for_each_entry_safe(pack, n, &update->unused_packs, list) {
945 if (strcmp(pack->target_name, "vmlinux") == 0) {
946 pack->target = NULL;
947 } else if (pack->target == NULL) {
948 pack->target = find_module(pack->target_name);
949 if (pack->target == NULL ||
950 !module_is_live(pack->target)) {
951 if (update->partial) {
952 continue;
953 } else {
954 ret = TARGET_NOT_LOADED;
955 goto out;
958 retval = use_module(pack->primary, pack->target);
959 if (retval != 1) {
960 ret = UNEXPECTED;
961 goto out;
964 list_del(&pack->list);
965 list_add_tail(&pack->list, &update->packs);
966 pack->module_list_entry.target = pack->target;
968 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
969 if (pack->target == NULL) {
970 apply_paravirt(pack->primary_parainstructions,
971 pack->primary_parainstructions_end);
972 apply_paravirt(pack->helper_parainstructions,
973 pack->helper_parainstructions_end);
975 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
978 list_for_each_entry(pack, &update->packs, list) {
979 ret = init_symbol_arrays(pack);
980 if (ret != OK) {
981 cleanup_symbol_arrays(pack);
982 goto out;
984 ret = prepare_pack(pack);
985 cleanup_symbol_arrays(pack);
986 if (ret != OK)
987 goto out;
989 ret = apply_patches(update);
990 out:
991 list_for_each_entry(pack, &update->packs, list) {
992 struct ksplice_section *s;
993 if (update->stage == STAGE_PREPARING)
994 clear_list(&pack->safety_records, struct safety_record,
995 list);
996 for (s = pack->helper_sections; s < pack->helper_sections_end;
997 s++) {
998 if (s->match_map != NULL) {
999 vfree(s->match_map);
1000 s->match_map = NULL;
1004 mutex_unlock(&module_mutex);
1005 return ret;
1008 static int compare_symbolp_names(const void *a, const void *b)
1010 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1011 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1012 return 0;
1013 if ((*sympa)->name == NULL)
1014 return -1;
1015 if ((*sympb)->name == NULL)
1016 return 1;
1017 return strcmp((*sympa)->name, (*sympb)->name);
1020 static int compare_symbolp_labels(const void *a, const void *b)
1022 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1023 return strcmp((*sympa)->label, (*sympb)->label);
1026 static int symbolp_bsearch_compare(const void *key, const void *elt)
1028 const char *name = key;
1029 const struct ksplice_symbol *const *symp = elt;
1030 const struct ksplice_symbol *sym = *symp;
1031 if (sym->name == NULL)
1032 return 1;
1033 return strcmp(name, sym->name);
1036 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1037 const char *sym_name, unsigned long sym_val)
1039 struct ksplice_symbol **symp;
1040 abort_t ret;
1042 symp = bsearch(sym_name, lookup->arr, lookup->size,
1043 sizeof(*lookup->arr), symbolp_bsearch_compare);
1044 if (symp == NULL)
1045 return OK;
1047 while (symp > lookup->arr &&
1048 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1049 symp--;
1051 for (; symp < lookup->arr + lookup->size; symp++) {
1052 struct ksplice_symbol *sym = *symp;
1053 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1054 break;
1055 ret = add_candidate_val(lookup->pack, sym->vals, sym_val);
1056 if (ret != OK)
1057 return ret;
1059 return OK;
1062 #ifdef CONFIG_KALLSYMS
1063 static int add_kallsyms_values(void *data, const char *name,
1064 struct module *owner, unsigned long val)
1066 struct ksplice_lookup *lookup = data;
1067 if (owner == lookup->pack->primary ||
1068 !patches_module(owner, lookup->pack->target))
1069 return (__force int)OK;
1070 return (__force int)add_matching_values(lookup, name, val);
1072 #endif /* CONFIG_KALLSYMS */
1074 static bool add_export_values(const struct symsearch *syms,
1075 struct module *owner,
1076 unsigned int symnum, void *data)
1078 struct ksplice_lookup *lookup = data;
1079 abort_t ret;
1081 ret = add_matching_values(lookup, syms->start[symnum].name,
1082 syms->start[symnum].value);
1083 if (ret != OK) {
1084 lookup->ret = ret;
1085 return true;
1087 return false;
1090 static void cleanup_symbol_arrays(struct ksplice_pack *pack)
1092 struct ksplice_symbol *sym;
1093 for (sym = pack->primary_symbols; sym < pack->primary_symbols_end;
1094 sym++) {
1095 if (sym->vals != NULL) {
1096 clear_list(sym->vals, struct candidate_val, list);
1097 kfree(sym->vals);
1098 sym->vals = NULL;
1101 for (sym = pack->helper_symbols; sym < pack->helper_symbols_end; sym++) {
1102 if (sym->vals != NULL) {
1103 clear_list(sym->vals, struct candidate_val, list);
1104 kfree(sym->vals);
1105 sym->vals = NULL;
1110 static abort_t uniquify_symbols(struct ksplice_pack *pack)
1112 struct ksplice_reloc *r;
1113 struct ksplice_section *s;
1114 struct ksplice_symbol *sym, **sym_arr, **symp;
1115 size_t size = pack->primary_symbols_end - pack->primary_symbols;
1117 if (size == 0)
1118 return OK;
1120 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1121 if (sym_arr == NULL)
1122 return OUT_OF_MEMORY;
1124 for (symp = sym_arr, sym = pack->primary_symbols;
1125 symp < sym_arr + size && sym < pack->primary_symbols_end;
1126 sym++, symp++)
1127 *symp = sym;
1129 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1131 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1132 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1133 compare_symbolp_labels);
1134 if (symp != NULL) {
1135 if ((*symp)->name == NULL)
1136 (*symp)->name = r->symbol->name;
1137 r->symbol = *symp;
1141 for (s = pack->helper_sections; s < pack->helper_sections_end; s++) {
1142 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1143 compare_symbolp_labels);
1144 if (symp != NULL) {
1145 if ((*symp)->name == NULL)
1146 (*symp)->name = s->symbol->name;
1147 s->symbol = *symp;
1151 vfree(sym_arr);
1152 return OK;
1155 static abort_t init_symbol_array(struct ksplice_pack *pack,
1156 struct ksplice_symbol *start,
1157 struct ksplice_symbol *end)
1159 struct ksplice_symbol *sym, **sym_arr, **symp;
1160 struct ksplice_lookup lookup;
1161 size_t size = end - start;
1162 abort_t ret;
1164 if (size == 0)
1165 return OK;
1167 for (sym = start; sym < end; sym++) {
1168 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1169 if (sym->vals == NULL)
1170 return OUT_OF_MEMORY;
1171 INIT_LIST_HEAD(sym->vals);
1172 sym->value = 0;
1175 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1176 if (sym_arr == NULL)
1177 return OUT_OF_MEMORY;
1179 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1180 sym++, symp++)
1181 *symp = sym;
1183 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1185 lookup.pack = pack;
1186 lookup.arr = sym_arr;
1187 lookup.size = size;
1188 lookup.ret = OK;
1190 each_symbol(add_export_values, &lookup);
1191 ret = lookup.ret;
1192 #ifdef CONFIG_KALLSYMS
1193 if (ret == OK)
1194 ret = (__force abort_t)
1195 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1196 #endif /* CONFIG_KALLSYMS */
1197 vfree(sym_arr);
1198 return ret;
1201 static abort_t init_symbol_arrays(struct ksplice_pack *pack)
1203 abort_t ret;
1205 ret = uniquify_symbols(pack);
1206 if (ret != OK)
1207 return ret;
1209 ret = init_symbol_array(pack, pack->helper_symbols,
1210 pack->helper_symbols_end);
1211 if (ret != OK)
1212 return ret;
1214 ret = init_symbol_array(pack, pack->primary_symbols,
1215 pack->primary_symbols_end);
1216 if (ret != OK)
1217 return ret;
1219 return OK;
1222 static abort_t prepare_pack(struct ksplice_pack *pack)
1224 abort_t ret;
1226 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1227 ret = match_pack_sections(pack, false);
1228 if (ret == NO_MATCH) {
1229 /* It is possible that by using relocations from .data sections
1230 we can successfully run-pre match the rest of the sections.
1231 To avoid using any symbols obtained from .data sections
1232 (which may be unreliable) in the post code, we first prepare
1233 the post code and then try to run-pre match the remaining
1234 sections with the help of .data sections.
1236 ksdebug(pack, "Continuing without some sections; we might "
1237 "find them later.\n");
1238 ret = finalize_pack(pack);
1239 if (ret != OK) {
1240 ksdebug(pack, "Aborted. Unable to continue without "
1241 "the unmatched sections.\n");
1242 return ret;
1245 ksdebug(pack, "run-pre: Considering .data sections to find the "
1246 "unmatched sections\n");
1247 ret = match_pack_sections(pack, true);
1248 if (ret != OK)
1249 return ret;
1251 ksdebug(pack, "run-pre: Found all previously unmatched "
1252 "sections\n");
1253 return OK;
1254 } else if (ret != OK) {
1255 return ret;
1258 return finalize_pack(pack);
1261 static abort_t finalize_pack(struct ksplice_pack *pack)
1263 abort_t ret;
1264 ret = apply_relocs(pack, pack->primary_relocs,
1265 pack->primary_relocs_end);
1266 if (ret != OK)
1267 return ret;
1269 ret = finalize_patches(pack);
1270 if (ret != OK)
1271 return ret;
1273 ret = finalize_exports(pack);
1274 if (ret != OK)
1275 return ret;
1277 return OK;
1280 static abort_t finalize_exports(struct ksplice_pack *pack)
1282 struct ksplice_export *exp;
1283 struct module *m;
1284 const struct kernel_symbol *sym;
1286 for (exp = pack->exports; exp < pack->exports_end; exp++) {
1287 sym = find_symbol(exp->name, &m, NULL, true, false);
1288 if (sym == NULL) {
1289 ksdebug(pack, "Could not find kernel_symbol struct for "
1290 "%s\n", exp->name);
1291 return MISSING_EXPORT;
1294 /* Cast away const since we are planning to mutate the
1295 * kernel_symbol structure. */
1296 exp->sym = (struct kernel_symbol *)sym;
1297 exp->saved_name = exp->sym->name;
1298 if (m != pack->primary && use_module(pack->primary, m) != 1) {
1299 ksdebug(pack, "Aborted. Could not add dependency on "
1300 "symbol %s from module %s.\n", sym->name,
1301 m->name);
1302 return UNEXPECTED;
1305 return OK;
1308 static abort_t finalize_patches(struct ksplice_pack *pack)
1310 struct ksplice_patch *p;
1311 struct safety_record *rec;
1312 abort_t ret;
1314 for (p = pack->patches; p < pack->patches_end; p++) {
1315 bool found = false;
1316 list_for_each_entry(rec, &pack->safety_records, list) {
1317 if (rec->addr <= p->oldaddr &&
1318 p->oldaddr < rec->addr + rec->size) {
1319 found = true;
1320 break;
1323 if (!found) {
1324 const struct ksplice_reloc *r = patch_reloc(pack, p);
1325 if (r == NULL) {
1326 ksdebug(pack, "A patch with no ksplice_reloc at"
1327 " its oldaddr has no safety record\n");
1328 return NO_MATCH;
1330 ksdebug(pack, "No safety record for patch with oldaddr "
1331 "%s+%lx\n", r->symbol->label, r->target_addend);
1332 return NO_MATCH;
1335 if (p->type == KSPLICE_PATCH_TEXT) {
1336 ret = prepare_trampoline(pack, p);
1337 if (ret != OK)
1338 return ret;
1341 if (rec->addr + rec->size < p->oldaddr + p->size) {
1342 ksdebug(pack, "Safety record %s is too short for "
1343 "patch\n", rec->label);
1344 return UNEXPECTED;
1347 if (p->type == KSPLICE_PATCH_TEXT) {
1348 if (p->repladdr == 0)
1349 p->repladdr = (unsigned long)ksplice_deleted;
1350 else
1351 rec->first_byte_safe = true;
1354 ret = add_dependency_on_address(pack, p->oldaddr);
1355 if (ret != OK)
1356 return ret;
1358 return OK;
1361 static abort_t map_trampoline_pages(struct update *update)
1363 struct ksplice_pack *pack;
1364 list_for_each_entry(pack, &update->packs, list) {
1365 struct ksplice_patch *p;
1366 for (p = pack->patches; p < pack->patches_end; p++) {
1367 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1368 if (p->vaddr == NULL) {
1369 ksdebug(pack, "Unable to map oldaddr read/write"
1370 "\n");
1371 unmap_trampoline_pages(update);
1372 return UNEXPECTED;
1376 return OK;
1379 static void unmap_trampoline_pages(struct update *update)
1381 struct ksplice_pack *pack;
1382 list_for_each_entry(pack, &update->packs, list) {
1383 struct ksplice_patch *p;
1384 for (p = pack->patches; p < pack->patches_end; p++) {
1385 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1386 p->vaddr = NULL;
1391 /* Based off of linux's text_poke. */
1392 static void *map_writable(void *addr, size_t len)
1394 void *vaddr;
1395 int nr_pages = 2;
1396 unsigned long laddr = (unsigned long)addr;
1397 struct page *pages[2];
1399 if ((laddr >= init_mm.start_code && laddr < init_mm.end_code) ||
1400 (laddr >= init_mm.start_data && laddr < init_mm.end_data)) {
1401 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1402 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1403 pages[0] = pfn_to_page(__pa_symbol(addr) >> PAGE_SHIFT);
1404 WARN_ON(!PageReserved(pages[0]));
1405 pages[1] = pfn_to_page(__pa_symbol(addr + PAGE_SIZE) >>
1406 PAGE_SHIFT);
1407 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
1408 pages[0] = virt_to_page(addr);
1409 WARN_ON(!PageReserved(pages[0]));
1410 pages[1] = virt_to_page(addr + PAGE_SIZE);
1411 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
1412 } else {
1413 pages[0] = vmalloc_to_page(addr);
1414 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1416 if (!pages[0])
1417 return NULL;
1418 if (!pages[1])
1419 nr_pages = 1;
1420 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1421 if (vaddr == NULL)
1422 return NULL;
1423 return vaddr + offset_in_page(addr);
1426 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
1427 unsigned long addr)
1429 struct ksplice_pack *p;
1430 struct module *m =
1431 __module_text_address(follow_trampolines(pack, addr));
1432 if (m == NULL)
1433 return OK;
1434 list_for_each_entry(p, &pack->update->packs, list) {
1435 if (m == p->primary)
1436 return OK;
1438 if (use_module(pack->primary, m) != 1)
1439 return MODULE_BUSY;
1440 return OK;
1443 static abort_t apply_relocs(struct ksplice_pack *pack,
1444 const struct ksplice_reloc *relocs,
1445 const struct ksplice_reloc *relocs_end)
1447 const struct ksplice_reloc *r;
1448 for (r = relocs; r < relocs_end; r++) {
1449 abort_t ret = apply_reloc(pack, r);
1450 if (ret != OK)
1451 return ret;
1453 return OK;
1456 static abort_t apply_reloc(struct ksplice_pack *pack,
1457 const struct ksplice_reloc *r)
1459 switch (r->howto->type) {
1460 case KSPLICE_HOWTO_RELOC:
1461 case KSPLICE_HOWTO_RELOC_PATCH:
1462 return apply_howto_reloc(pack, r);
1463 case KSPLICE_HOWTO_DATE:
1464 case KSPLICE_HOWTO_TIME:
1465 return apply_howto_date(pack, r);
1466 default:
1467 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
1468 return UNEXPECTED;
1472 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
1473 const struct ksplice_reloc *r)
1475 abort_t ret;
1476 int canary_ret;
1477 unsigned long sym_addr;
1478 LIST_HEAD(vals);
1480 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
1481 if (canary_ret < 0)
1482 return UNEXPECTED;
1483 if (canary_ret == 0) {
1484 ksdebug(pack, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1485 r->blank_addr, r->symbol->label, r->target_addend);
1486 return OK;
1489 #ifdef KSPLICE_STANDALONE
1490 if (!bootstrapped) {
1491 ret = add_system_map_candidates(pack,
1492 pack->primary_system_map,
1493 pack->primary_system_map_end,
1494 r->symbol->label, &vals);
1495 if (ret != OK) {
1496 release_vals(&vals);
1497 return ret;
1500 #endif /* KSPLICE_STANDALONE */
1501 ret = lookup_symbol(pack, r->symbol, &vals);
1502 if (ret != OK) {
1503 release_vals(&vals);
1504 return ret;
1506 if (!singular(&vals) || (r->symbol->vals != NULL &&
1507 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1508 release_vals(&vals);
1509 ksdebug(pack, "Failed to find %s for reloc\n",
1510 r->symbol->label);
1511 return FAILED_TO_FIND;
1513 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1514 release_vals(&vals);
1516 ret = write_reloc_value(pack, r, r->blank_addr,
1517 r->howto->pcrel ? sym_addr - r->blank_addr :
1518 sym_addr);
1519 if (ret != OK)
1520 return ret;
1522 ksdebug(pack, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1523 r->symbol->label, r->target_addend, sym_addr);
1524 switch (r->howto->size) {
1525 case 1:
1526 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1527 break;
1528 case 2:
1529 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1530 break;
1531 case 4:
1532 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1533 break;
1534 #if BITS_PER_LONG >= 64
1535 case 8:
1536 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1537 break;
1538 #endif /* BITS_PER_LONG */
1539 default:
1540 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1541 return UNEXPECTED;
1543 #ifdef KSPLICE_STANDALONE
1544 if (!bootstrapped)
1545 return OK;
1546 #endif /* KSPLICE_STANDALONE */
1548 /* Create labelvals so that we can verify our choices in the second
1549 round of run-pre matching that considers data sections. */
1550 ret = create_labelval(pack, r->symbol, sym_addr, VAL);
1551 if (ret != OK)
1552 return ret;
1554 return add_dependency_on_address(pack, sym_addr);
1557 static abort_t apply_howto_date(struct ksplice_pack *pack,
1558 const struct ksplice_reloc *r)
1560 if (r->symbol->vals != NULL) {
1561 ksdebug(pack, "Failed to find %s for date\n", r->symbol->label);
1562 return FAILED_TO_FIND;
1564 memcpy((unsigned char *)r->blank_addr,
1565 (const unsigned char *)r->symbol->value, r->howto->size);
1566 return OK;
1569 static abort_t read_reloc_value(struct ksplice_pack *pack,
1570 const struct ksplice_reloc *r,
1571 unsigned long addr, unsigned long *valp)
1573 unsigned char bytes[sizeof(long)];
1574 unsigned long val;
1575 const struct ksplice_reloc_howto *howto = r->howto;
1577 if (howto->size <= 0 || howto->size > sizeof(long)) {
1578 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1579 return UNEXPECTED;
1582 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1583 return NO_MATCH;
1585 switch (howto->size) {
1586 case 1:
1587 val = *(uint8_t *)bytes;
1588 break;
1589 case 2:
1590 val = *(uint16_t *)bytes;
1591 break;
1592 case 4:
1593 val = *(uint32_t *)bytes;
1594 break;
1595 #if BITS_PER_LONG >= 64
1596 case 8:
1597 val = *(uint64_t *)bytes;
1598 break;
1599 #endif /* BITS_PER_LONG */
1600 default:
1601 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1602 return UNEXPECTED;
1605 val &= howto->dst_mask;
1606 if (howto->signed_addend)
1607 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1608 val <<= howto->rightshift;
1609 val -= r->insn_addend + r->target_addend;
1610 *valp = val;
1611 return OK;
1614 static abort_t write_reloc_value(struct ksplice_pack *pack,
1615 const struct ksplice_reloc *r,
1616 unsigned long addr, unsigned long sym_addr)
1618 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1619 const struct ksplice_reloc_howto *howto = r->howto;
1620 val >>= howto->rightshift;
1621 switch (howto->size) {
1622 case 1:
1623 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1624 (val & howto->dst_mask);
1625 break;
1626 case 2:
1627 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1628 (val & howto->dst_mask);
1629 break;
1630 case 4:
1631 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1632 (val & howto->dst_mask);
1633 break;
1634 #if BITS_PER_LONG >= 64
1635 case 8:
1636 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1637 (val & howto->dst_mask);
1638 break;
1639 #endif /* BITS_PER_LONG */
1640 default:
1641 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1642 return UNEXPECTED;
1645 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1646 ksdebug(pack, "Aborted. Relocation overflow.\n");
1647 return UNEXPECTED;
1650 return OK;
1653 static void __attribute__((noreturn)) ksplice_deleted(void)
1655 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1656 BUG();
1657 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1658 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1659 for (;;);
1660 #endif
1663 static abort_t match_pack_sections(struct ksplice_pack *pack,
1664 bool consider_data_sections)
1666 struct ksplice_section *sect;
1667 abort_t ret;
1668 int remaining = 0;
1669 bool progress;
1671 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1672 sect++) {
1673 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1674 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1675 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1676 remaining++;
1679 while (remaining > 0) {
1680 progress = false;
1681 for (sect = pack->helper_sections;
1682 sect < pack->helper_sections_end; sect++) {
1683 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1684 continue;
1685 if ((!consider_data_sections &&
1686 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1687 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1688 continue;
1689 ret = find_section(pack, sect);
1690 if (ret == OK) {
1691 sect->flags |= KSPLICE_SECTION_MATCHED;
1692 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1693 remaining--;
1694 progress = true;
1695 } else if (ret != NO_MATCH) {
1696 return ret;
1700 if (progress)
1701 continue;
1703 for (sect = pack->helper_sections;
1704 sect < pack->helper_sections_end; sect++) {
1705 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1706 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1707 continue;
1708 ksdebug(pack, "run-pre: could not match %s "
1709 "section %s\n",
1710 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1711 "data" :
1712 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1713 "rodata" : "text", sect->symbol->label);
1715 ksdebug(pack, "Aborted. run-pre: could not match some "
1716 "sections.\n");
1717 return NO_MATCH;
1719 return OK;
1722 static abort_t find_section(struct ksplice_pack *pack,
1723 struct ksplice_section *sect)
1725 int i;
1726 abort_t ret;
1727 unsigned long run_addr;
1728 LIST_HEAD(vals);
1729 struct candidate_val *v, *n;
1731 #ifdef KSPLICE_STANDALONE
1732 ret = add_system_map_candidates(pack, pack->helper_system_map,
1733 pack->helper_system_map_end,
1734 sect->symbol->label, &vals);
1735 if (ret != OK) {
1736 release_vals(&vals);
1737 return ret;
1739 #endif /* KSPLICE_STANDALONE */
1740 ret = lookup_symbol(pack, sect->symbol, &vals);
1741 if (ret != OK) {
1742 release_vals(&vals);
1743 return ret;
1746 ksdebug(pack, "run-pre: starting sect search for %s\n",
1747 sect->symbol->label);
1749 list_for_each_entry_safe(v, n, &vals, list) {
1750 run_addr = v->val;
1752 yield();
1753 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1754 if (ret == NO_MATCH) {
1755 list_del(&v->list);
1756 kfree(v);
1757 } else if (ret != OK) {
1758 release_vals(&vals);
1759 return ret;
1763 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1764 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1765 ret = brute_search_all(pack, sect, &vals);
1766 if (ret != OK) {
1767 release_vals(&vals);
1768 return ret;
1770 /* Make sure run-pre matching output is displayed if
1771 brute_search succeeds */
1772 if (singular(&vals)) {
1773 run_addr = list_entry(vals.next, struct candidate_val,
1774 list)->val;
1775 ret = try_addr(pack, sect, run_addr, NULL,
1776 RUN_PRE_INITIAL);
1777 if (ret != OK) {
1778 ksdebug(pack, "run-pre: Debug run failed for "
1779 "sect %s:\n", sect->symbol->label);
1780 release_vals(&vals);
1781 return ret;
1785 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1787 if (singular(&vals)) {
1788 LIST_HEAD(safety_records);
1789 run_addr = list_entry(vals.next, struct candidate_val,
1790 list)->val;
1791 ret = try_addr(pack, sect, run_addr, &safety_records,
1792 RUN_PRE_FINAL);
1793 release_vals(&vals);
1794 if (ret != OK) {
1795 clear_list(&safety_records, struct safety_record, list);
1796 ksdebug(pack, "run-pre: Final run failed for sect "
1797 "%s:\n", sect->symbol->label);
1798 } else {
1799 list_splice(&safety_records, &pack->safety_records);
1801 return ret;
1802 } else if (!list_empty(&vals)) {
1803 struct candidate_val *val;
1804 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1805 sect->symbol->label);
1806 i = 0;
1807 list_for_each_entry(val, &vals, list) {
1808 i++;
1809 ksdebug(pack, "%lx\n", val->val);
1810 if (i > 5) {
1811 ksdebug(pack, "...\n");
1812 break;
1815 release_vals(&vals);
1816 return NO_MATCH;
1818 release_vals(&vals);
1819 return NO_MATCH;
1822 static abort_t try_addr(struct ksplice_pack *pack,
1823 struct ksplice_section *sect,
1824 unsigned long run_addr,
1825 struct list_head *safety_records,
1826 enum run_pre_mode mode)
1828 abort_t ret;
1829 const struct module *run_module;
1831 if ((sect->flags & KSPLICE_SECTION_RODATA) != 0 ||
1832 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1833 run_module = __module_data_address(run_addr);
1834 else
1835 run_module = __module_text_address(run_addr);
1836 if (run_module == pack->primary) {
1837 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1838 "module %s for sect %s\n", run_addr, run_module->name,
1839 sect->symbol->label);
1840 return UNEXPECTED;
1842 if (!patches_module(run_module, pack->target)) {
1843 ksdebug(pack, "run-pre: ignoring address %lx in other module "
1844 "%s for sect %s\n", run_addr, run_module == NULL ?
1845 "vmlinux" : run_module->name, sect->symbol->label);
1846 return NO_MATCH;
1849 ret = create_labelval(pack, sect->symbol, run_addr, TEMP);
1850 if (ret != OK)
1851 return ret;
1853 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1854 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1855 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1856 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1857 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
1858 mode);
1859 else
1860 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1861 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1862 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
1863 set_temp_labelvals(pack, NOVAL);
1864 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
1865 "p_a=%lx s=%lx)\n",
1866 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
1867 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
1868 "text", sect->symbol->label, run_addr, sect->address,
1869 sect->size);
1870 ksdebug(pack, "run-pre: ");
1871 if (pack->update->debug >= 1) {
1872 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1873 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
1874 RUN_PRE_DEBUG);
1875 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1876 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1877 ret = arch_run_pre_cmp(pack, sect, run_addr,
1878 safety_records,
1879 RUN_PRE_DEBUG);
1880 else
1881 ret = run_pre_cmp(pack, sect, run_addr,
1882 safety_records,
1883 RUN_PRE_DEBUG);
1884 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1885 set_temp_labelvals(pack, NOVAL);
1887 ksdebug(pack, "\n");
1888 return ret;
1889 } else if (ret != OK) {
1890 set_temp_labelvals(pack, NOVAL);
1891 return ret;
1894 if (mode != RUN_PRE_FINAL) {
1895 set_temp_labelvals(pack, NOVAL);
1896 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
1897 sect->symbol->label, run_addr);
1898 return OK;
1901 set_temp_labelvals(pack, VAL);
1902 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
1903 run_addr);
1904 return OK;
1907 static abort_t run_pre_cmp(struct ksplice_pack *pack,
1908 const struct ksplice_section *sect,
1909 unsigned long run_addr,
1910 struct list_head *safety_records,
1911 enum run_pre_mode mode)
1913 int matched = 0;
1914 abort_t ret;
1915 const struct ksplice_reloc *r, *finger;
1916 const unsigned char *pre, *run, *pre_start, *run_start;
1917 unsigned char runval;
1919 pre_start = (const unsigned char *)sect->address;
1920 run_start = (const unsigned char *)run_addr;
1922 finger = init_reloc_search(pack, sect);
1924 pre = pre_start;
1925 run = run_start;
1926 while (pre < pre_start + sect->size) {
1927 unsigned long offset = pre - pre_start;
1928 ret = lookup_reloc(pack, &finger, (unsigned long)pre, &r);
1929 if (ret == OK) {
1930 ret = handle_reloc(pack, sect, r, (unsigned long)run,
1931 mode);
1932 if (ret != OK) {
1933 if (mode == RUN_PRE_INITIAL)
1934 ksdebug(pack, "reloc in sect does not "
1935 "match after %lx/%lx bytes\n",
1936 offset, sect->size);
1937 return ret;
1939 if (mode == RUN_PRE_DEBUG)
1940 print_bytes(pack, run, r->howto->size, pre,
1941 r->howto->size);
1942 pre += r->howto->size;
1943 run += r->howto->size;
1944 finger++;
1945 continue;
1946 } else if (ret != NO_MATCH) {
1947 return ret;
1950 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
1951 ret = handle_paravirt(pack, (unsigned long)pre,
1952 (unsigned long)run, &matched);
1953 if (ret != OK)
1954 return ret;
1955 if (matched != 0) {
1956 if (mode == RUN_PRE_DEBUG)
1957 print_bytes(pack, run, matched, pre,
1958 matched);
1959 pre += matched;
1960 run += matched;
1961 continue;
1965 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
1966 if (mode == RUN_PRE_INITIAL)
1967 ksdebug(pack, "sect unmapped after %lx/%lx "
1968 "bytes\n", offset, sect->size);
1969 return NO_MATCH;
1972 if (runval != *pre &&
1973 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1974 if (mode == RUN_PRE_INITIAL)
1975 ksdebug(pack, "sect does not match after "
1976 "%lx/%lx bytes\n", offset, sect->size);
1977 if (mode == RUN_PRE_DEBUG) {
1978 print_bytes(pack, run, 1, pre, 1);
1979 ksdebug(pack, "[p_o=%lx] ! ", offset);
1980 print_bytes(pack, run + 1, 2, pre + 1, 2);
1982 return NO_MATCH;
1984 if (mode == RUN_PRE_DEBUG)
1985 print_bytes(pack, run, 1, pre, 1);
1986 pre++;
1987 run++;
1989 return create_safety_record(pack, sect, safety_records, run_addr,
1990 run - run_start);
1993 static void print_bytes(struct ksplice_pack *pack,
1994 const unsigned char *run, int runc,
1995 const unsigned char *pre, int prec)
1997 int o;
1998 int matched = min(runc, prec);
1999 for (o = 0; o < matched; o++) {
2000 if (run[o] == pre[o])
2001 ksdebug(pack, "%02x ", run[o]);
2002 else
2003 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
2005 for (o = matched; o < runc; o++)
2006 ksdebug(pack, "%02x/ ", run[o]);
2007 for (o = matched; o < prec; o++)
2008 ksdebug(pack, "/%02x ", pre[o]);
2011 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2012 static abort_t brute_search(struct ksplice_pack *pack,
2013 struct ksplice_section *sect,
2014 const void *start, unsigned long len,
2015 struct list_head *vals)
2017 unsigned long addr;
2018 char run, pre;
2019 abort_t ret;
2021 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2022 addr++) {
2023 if (addr % 100000 == 0)
2024 yield();
2026 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2027 return OK;
2029 pre = *(const unsigned char *)(sect->address);
2031 if (run != pre)
2032 continue;
2034 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
2035 if (ret == OK) {
2036 ret = add_candidate_val(pack, vals, addr);
2037 if (ret != OK)
2038 return ret;
2039 } else if (ret != NO_MATCH) {
2040 return ret;
2044 return OK;
2047 static abort_t brute_search_all(struct ksplice_pack *pack,
2048 struct ksplice_section *sect,
2049 struct list_head *vals)
2051 struct module *m;
2052 abort_t ret = OK;
2053 int saved_debug;
2055 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
2056 saved_debug = pack->update->debug;
2057 pack->update->debug = 0;
2059 list_for_each_entry(m, &modules, list) {
2060 if (!patches_module(m, pack->target) || m == pack->primary)
2061 continue;
2062 ret = brute_search(pack, sect, m->module_core, m->core_size,
2063 vals);
2064 if (ret != OK)
2065 goto out;
2066 ret = brute_search(pack, sect, m->module_init, m->init_size,
2067 vals);
2068 if (ret != OK)
2069 goto out;
2072 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
2073 init_mm.end_code - init_mm.start_code, vals);
2075 out:
2076 pack->update->debug = saved_debug;
2077 return ret;
2079 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2081 struct range {
2082 unsigned long address;
2083 unsigned long size;
2086 static int reloc_bsearch_compare(const void *key, const void *elt)
2088 const struct range *range = key;
2089 const struct ksplice_reloc *r = elt;
2090 if (range->address + range->size <= r->blank_addr)
2091 return -1;
2092 if (range->address > r->blank_addr)
2093 return 1;
2094 return 0;
2097 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2098 const struct ksplice_reloc *end,
2099 unsigned long address,
2100 unsigned long size)
2102 const struct ksplice_reloc *r;
2103 struct range range = { address, size };
2104 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2105 reloc_bsearch_compare);
2106 if (r == NULL)
2107 return NULL;
2108 while (r > start && (r - 1)->blank_addr >= address)
2109 r--;
2110 return r;
2113 static const struct ksplice_reloc *
2114 init_reloc_search(struct ksplice_pack *pack, const struct ksplice_section *sect)
2116 const struct ksplice_reloc *r;
2117 r = find_reloc(pack->helper_relocs, pack->helper_relocs_end,
2118 sect->address, sect->size);
2119 if (r == NULL)
2120 return pack->helper_relocs_end;
2121 return r;
2124 static abort_t lookup_reloc(struct ksplice_pack *pack,
2125 const struct ksplice_reloc **fingerp,
2126 unsigned long addr,
2127 const struct ksplice_reloc **relocp)
2129 const struct ksplice_reloc *r = *fingerp;
2130 int canary_ret;
2132 while (r < pack->helper_relocs_end &&
2133 addr >= r->blank_addr + r->howto->size &&
2134 !(addr == r->blank_addr && r->howto->size == 0))
2135 r++;
2136 *fingerp = r;
2137 if (r == pack->helper_relocs_end)
2138 return NO_MATCH;
2139 if (addr < r->blank_addr)
2140 return NO_MATCH;
2141 *relocp = r;
2142 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2143 return OK;
2145 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
2146 if (canary_ret < 0)
2147 return UNEXPECTED;
2148 if (canary_ret == 0) {
2149 ksdebug(pack, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2150 "(altinstr)\n", r->blank_addr, r->symbol->label,
2151 r->target_addend);
2152 return NO_MATCH;
2154 if (addr != r->blank_addr) {
2155 ksdebug(pack, "Invalid nonzero relocation offset\n");
2156 return UNEXPECTED;
2158 return OK;
2161 static abort_t handle_reloc(struct ksplice_pack *pack,
2162 const struct ksplice_section *sect,
2163 const struct ksplice_reloc *r,
2164 unsigned long run_addr, enum run_pre_mode mode)
2166 switch (r->howto->type) {
2167 case KSPLICE_HOWTO_RELOC:
2168 return handle_howto_reloc(pack, sect, r, run_addr, mode);
2169 case KSPLICE_HOWTO_DATE:
2170 case KSPLICE_HOWTO_TIME:
2171 return handle_howto_date(pack, sect, r, run_addr, mode);
2172 case KSPLICE_HOWTO_BUG:
2173 return handle_bug(pack, r, run_addr);
2174 case KSPLICE_HOWTO_EXTABLE:
2175 return handle_extable(pack, r, run_addr);
2176 default:
2177 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
2178 return UNEXPECTED;
2182 static abort_t handle_howto_date(struct ksplice_pack *pack,
2183 const struct ksplice_section *sect,
2184 const struct ksplice_reloc *r,
2185 unsigned long run_addr, enum run_pre_mode mode)
2187 abort_t ret;
2188 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2190 if (buf == NULL)
2191 return OUT_OF_MEMORY;
2192 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2193 ret = NO_MATCH;
2194 goto out;
2197 switch (r->howto->type) {
2198 case KSPLICE_HOWTO_TIME:
2199 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2200 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2201 isdigit(buf[6]) && isdigit(buf[7]))
2202 ret = OK;
2203 else
2204 ret = NO_MATCH;
2205 break;
2206 case KSPLICE_HOWTO_DATE:
2207 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2208 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2209 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2210 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2211 ret = OK;
2212 else
2213 ret = NO_MATCH;
2214 break;
2215 default:
2216 ret = UNEXPECTED;
2218 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2219 ksdebug(pack, "%s string: \"%.*s\" does not match format\n",
2220 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2221 r->howto->size, buf);
2223 if (ret != OK)
2224 goto out;
2225 ret = create_labelval(pack, r->symbol, run_addr, TEMP);
2226 out:
2227 kfree(buf);
2228 return ret;
2231 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
2232 const struct ksplice_section *sect,
2233 const struct ksplice_reloc *r,
2234 unsigned long run_addr,
2235 enum run_pre_mode mode)
2237 struct ksplice_section *sym_sect = symbol_section(pack, r->symbol);
2238 unsigned long offset = r->target_addend;
2239 unsigned long val;
2240 abort_t ret;
2242 ret = read_reloc_value(pack, r, run_addr, &val);
2243 if (ret != OK)
2244 return ret;
2245 if (r->howto->pcrel)
2246 val += run_addr;
2248 #ifdef KSPLICE_STANDALONE
2249 /* The match_map is only used in KSPLICE_STANDALONE */
2250 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2252 } else if (offset < 0 || offset >= sym_sect->size) {
2253 ksdebug(pack, "Out of range relocation: %s+%lx -> %s+%lx",
2254 sect->symbol->label, r->blank_addr - sect->address,
2255 r->symbol->label, offset);
2256 return NO_MATCH;
2257 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2258 sym_sect->match_map[offset] =
2259 (const unsigned char *)r->symbol->value + offset;
2260 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2261 r->symbol->value + offset) {
2263 } else if (sect == sym_sect) {
2264 ksdebug(pack, "Relocations to nonmatching locations within "
2265 "section %s: %lx does not match %lx\n",
2266 sect->symbol->label, offset,
2267 (unsigned long)sect->match_map[offset] -
2268 r->symbol->value);
2269 return NO_MATCH;
2270 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2271 if (mode == RUN_PRE_INITIAL)
2272 ksdebug(pack, "Delaying matching of %s due to reloc "
2273 "from to unmatching section: %s+%lx\n",
2274 sect->symbol->label, r->symbol->label, offset);
2275 return NO_MATCH;
2276 } else if (sym_sect->match_map[offset] == NULL) {
2277 if (mode == RUN_PRE_INITIAL)
2278 ksdebug(pack, "Relocation not to instruction boundary: "
2279 "%s+%lx -> %s+%lx", sect->symbol->label,
2280 r->blank_addr - sect->address, r->symbol->label,
2281 offset);
2282 return NO_MATCH;
2283 } else if ((unsigned long)sym_sect->match_map[offset] !=
2284 r->symbol->value + offset) {
2285 if (mode == RUN_PRE_INITIAL)
2286 ksdebug(pack, "Match map shift %s+%lx: %lx != %lx\n",
2287 r->symbol->label, offset,
2288 r->symbol->value + offset,
2289 (unsigned long)sym_sect->match_map[offset]);
2290 val += r->symbol->value + offset -
2291 (unsigned long)sym_sect->match_map[offset];
2293 #endif /* KSPLICE_STANDALONE */
2295 if (mode == RUN_PRE_INITIAL)
2296 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2297 "found %s = %lx\n", run_addr, r->blank_addr,
2298 r->symbol->label, offset, r->symbol->label, val);
2300 if (contains_canary(pack, run_addr, r->howto) != 0) {
2301 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
2302 "\n", run_addr);
2303 return UNEXPECTED;
2306 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2307 sect->symbol == r->symbol)
2308 return OK;
2309 ret = create_labelval(pack, r->symbol, val, TEMP);
2310 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2311 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2312 "= %lx does not match expected %lx\n", run_addr,
2313 r->blank_addr, r->symbol->label, r->symbol->value, val);
2315 if (ret != OK)
2316 return ret;
2317 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2318 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2319 if (mode == RUN_PRE_INITIAL)
2320 ksdebug(pack, "Recursively comparing string section "
2321 "%s\n", sym_sect->symbol->label);
2322 else if (mode == RUN_PRE_DEBUG)
2323 ksdebug(pack, "[str start] ");
2324 ret = run_pre_cmp(pack, sym_sect, val, NULL, mode);
2325 if (mode == RUN_PRE_DEBUG)
2326 ksdebug(pack, "[str end] ");
2327 if (ret == OK && mode == RUN_PRE_INITIAL)
2328 ksdebug(pack, "Successfully matched string section %s"
2329 "\n", sym_sect->symbol->label);
2330 else if (mode == RUN_PRE_INITIAL)
2331 ksdebug(pack, "Failed to match string section %s\n",
2332 sym_sect->symbol->label);
2334 return ret;
2337 static int symbol_section_bsearch_compare(const void *a, const void *b)
2339 const struct ksplice_symbol *sym = a;
2340 const struct ksplice_section *sect = b;
2341 return strcmp(sym->label, sect->symbol->label);
2344 static int compare_section_labels(const void *va, const void *vb)
2346 const struct ksplice_section *a = va, *b = vb;
2347 return strcmp(a->symbol->label, b->symbol->label);
2350 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
2351 const struct ksplice_symbol *sym)
2353 return bsearch(sym, pack->helper_sections, pack->helper_sections_end -
2354 pack->helper_sections, sizeof(struct ksplice_section),
2355 symbol_section_bsearch_compare);
2358 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
2359 const struct ksplice_patch *p)
2361 unsigned long addr = (unsigned long)&p->oldaddr;
2362 const struct ksplice_reloc *r =
2363 find_reloc(pack->primary_relocs, pack->primary_relocs_end, addr,
2364 sizeof(addr));
2365 if (r == NULL || r->blank_addr < addr ||
2366 r->blank_addr >= addr + sizeof(addr))
2367 return NULL;
2368 return r;
2371 static abort_t lookup_symbol(struct ksplice_pack *pack,
2372 const struct ksplice_symbol *ksym,
2373 struct list_head *vals)
2375 abort_t ret;
2377 #ifdef KSPLICE_STANDALONE
2378 if (!bootstrapped)
2379 return OK;
2380 #endif /* KSPLICE_STANDALONE */
2382 if (ksym->vals == NULL) {
2383 release_vals(vals);
2384 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
2385 ksym->value);
2386 return add_candidate_val(pack, vals, ksym->value);
2389 #ifdef CONFIG_MODULE_UNLOAD
2390 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2391 && pack->target->exit != NULL) {
2392 ret = add_candidate_val(pack, vals,
2393 (unsigned long)pack->target->exit);
2394 if (ret != OK)
2395 return ret;
2397 #endif
2399 if (ksym->name != NULL) {
2400 struct candidate_val *val;
2401 list_for_each_entry(val, ksym->vals, list) {
2402 ret = add_candidate_val(pack, vals, val->val);
2403 if (ret != OK)
2404 return ret;
2407 ret = new_export_lookup(pack, pack->update, ksym->name, vals);
2408 if (ret != OK)
2409 return ret;
2412 return OK;
2415 #ifdef KSPLICE_STANDALONE
2416 static abort_t
2417 add_system_map_candidates(struct ksplice_pack *pack,
2418 const struct ksplice_system_map *start,
2419 const struct ksplice_system_map *end,
2420 const char *label, struct list_head *vals)
2422 abort_t ret;
2423 long off;
2424 int i;
2425 const struct ksplice_system_map *smap;
2427 /* Some Fedora kernel releases have System.map files whose symbol
2428 * addresses disagree with the running kernel by a constant address
2429 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2430 * values used to compile these kernels. This constant address offset
2431 * is always a multiple of 0x100000.
2433 * If we observe an offset that is NOT a multiple of 0x100000, then the
2434 * user provided us with an incorrect System.map file, and we should
2435 * abort.
2436 * If we observe an offset that is a multiple of 0x100000, then we can
2437 * adjust the System.map address values accordingly and proceed.
2439 off = (unsigned long)printk - pack->map_printk;
2440 if (off & 0xfffff) {
2441 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
2442 return BAD_SYSTEM_MAP;
2445 smap = bsearch(label, start, end - start, sizeof(*smap),
2446 system_map_bsearch_compare);
2447 if (smap == NULL)
2448 return OK;
2450 for (i = 0; i < smap->nr_candidates; i++) {
2451 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
2452 if (ret != OK)
2453 return ret;
2455 return OK;
2458 static int system_map_bsearch_compare(const void *key, const void *elt)
2460 const struct ksplice_system_map *map = elt;
2461 const char *label = key;
2462 return strcmp(label, map->label);
2464 #endif /* !KSPLICE_STANDALONE */
2466 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
2467 const char *name, struct list_head *vals)
2469 struct ksplice_pack *pack;
2470 struct ksplice_export *exp;
2471 list_for_each_entry(pack, &update->packs, list) {
2472 for (exp = pack->exports; exp < pack->exports_end; exp++) {
2473 struct ksplice_reloc_howto howto;
2474 howto.size = sizeof(unsigned long);
2475 howto.type = KSPLICE_HOWTO_RELOC;
2476 howto.dst_mask = -1;
2477 if (strcmp(exp->new_name, name) == 0 &&
2478 exp->sym != NULL &&
2479 contains_canary(pack,
2480 (unsigned long)&exp->sym->value,
2481 &howto) == 0)
2482 return add_candidate_val(p, vals,
2483 exp->sym->value);
2486 return OK;
2489 static abort_t apply_patches(struct update *update)
2491 int i;
2492 abort_t ret;
2493 struct ksplice_pack *pack;
2494 const struct ksplice_section *sect;
2496 list_for_each_entry(pack, &update->packs, list) {
2497 for (sect = pack->primary_sections;
2498 sect < pack->primary_sections_end; sect++) {
2499 struct safety_record *rec = kmalloc(sizeof(*rec),
2500 GFP_KERNEL);
2501 if (rec == NULL)
2502 return OUT_OF_MEMORY;
2503 rec->addr = sect->address;
2504 rec->size = sect->size;
2505 rec->label = sect->symbol->label;
2506 rec->first_byte_safe = false;
2507 list_add(&rec->list, &pack->safety_records);
2511 ret = map_trampoline_pages(update);
2512 if (ret != OK)
2513 return ret;
2514 for (i = 0; i < 5; i++) {
2515 cleanup_conflicts(update);
2516 #ifdef KSPLICE_STANDALONE
2517 bust_spinlocks(1);
2518 #endif /* KSPLICE_STANDALONE */
2519 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2520 ret = (__force abort_t)stop_machine(__apply_patches, update,
2521 NULL);
2522 #else /* LINUX_VERSION_CODE < */
2523 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2524 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2525 NR_CPUS);
2526 #endif /* LINUX_VERSION_CODE */
2527 #ifdef KSPLICE_STANDALONE
2528 bust_spinlocks(0);
2529 #endif /* KSPLICE_STANDALONE */
2530 if (ret != CODE_BUSY)
2531 break;
2532 set_current_state(TASK_INTERRUPTIBLE);
2533 schedule_timeout(msecs_to_jiffies(1000));
2535 unmap_trampoline_pages(update);
2537 if (ret == CODE_BUSY) {
2538 print_conflicts(update);
2539 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2540 "code is busy.\n", update->kid);
2541 } else if (ret == ALREADY_REVERSED) {
2542 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2543 "reversed.\n", update->kid, update->kid);
2546 if (ret != OK)
2547 return ret;
2549 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2550 update->kid);
2551 return OK;
2554 static abort_t reverse_patches(struct update *update)
2556 int i;
2557 abort_t ret;
2558 struct ksplice_pack *pack;
2560 clear_debug_buf(update);
2561 ret = init_debug_buf(update);
2562 if (ret != OK)
2563 return ret;
2565 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2567 ret = map_trampoline_pages(update);
2568 if (ret != OK)
2569 return ret;
2570 for (i = 0; i < 5; i++) {
2571 cleanup_conflicts(update);
2572 clear_list(&update->conflicts, struct conflict, list);
2573 #ifdef KSPLICE_STANDALONE
2574 bust_spinlocks(1);
2575 #endif /* KSPLICE_STANDALONE */
2576 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2577 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2578 NULL);
2579 #else /* LINUX_VERSION_CODE < */
2580 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2581 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2582 update, NR_CPUS);
2583 #endif /* LINUX_VERSION_CODE */
2584 #ifdef KSPLICE_STANDALONE
2585 bust_spinlocks(0);
2586 #endif /* KSPLICE_STANDALONE */
2587 if (ret != CODE_BUSY)
2588 break;
2589 set_current_state(TASK_INTERRUPTIBLE);
2590 schedule_timeout(msecs_to_jiffies(1000));
2592 unmap_trampoline_pages(update);
2594 if (ret == CODE_BUSY) {
2595 print_conflicts(update);
2596 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2597 "code is busy.\n", update->kid);
2598 } else if (ret == MODULE_BUSY) {
2599 _ksdebug(update, "Update %s is in use by another module\n",
2600 update->kid);
2603 if (ret != OK)
2604 return ret;
2606 list_for_each_entry(pack, &update->packs, list)
2607 clear_list(&pack->safety_records, struct safety_record, list);
2609 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2610 return OK;
2613 static int __apply_patches(void *updateptr)
2615 struct update *update = updateptr;
2616 struct ksplice_pack *pack;
2617 struct ksplice_patch *p;
2618 struct ksplice_export *exp;
2619 abort_t ret;
2621 if (update->stage == STAGE_APPLIED)
2622 return (__force int)OK;
2624 if (update->stage != STAGE_PREPARING)
2625 return (__force int)UNEXPECTED;
2627 ret = check_each_task(update);
2628 if (ret != OK)
2629 return (__force int)ret;
2631 list_for_each_entry(pack, &update->packs, list) {
2632 if (try_module_get(pack->primary) != 1) {
2633 struct ksplice_pack *pack1;
2634 list_for_each_entry(pack1, &update->packs, list) {
2635 if (pack1 == pack)
2636 break;
2637 module_put(pack1->primary);
2639 module_put(THIS_MODULE);
2640 return (__force int)UNEXPECTED;
2644 update->stage = STAGE_APPLIED;
2645 #ifdef TAINT_KSPLICE
2646 add_taint(TAINT_KSPLICE);
2647 #endif
2649 list_for_each_entry(pack, &update->packs, list)
2650 list_add(&pack->module_list_entry.list, &ksplice_module_list);
2652 list_for_each_entry(pack, &update->packs, list) {
2653 for (exp = pack->exports; exp < pack->exports_end; exp++)
2654 exp->sym->name = exp->new_name;
2657 list_for_each_entry(pack, &update->packs, list) {
2658 for (p = pack->patches; p < pack->patches_end; p++)
2659 insert_trampoline(p);
2661 return (__force int)OK;
2664 static int __reverse_patches(void *updateptr)
2666 struct update *update = updateptr;
2667 struct ksplice_pack *pack;
2668 const struct ksplice_patch *p;
2669 struct ksplice_export *exp;
2670 abort_t ret;
2672 if (update->stage != STAGE_APPLIED)
2673 return (__force int)OK;
2675 #ifdef CONFIG_MODULE_UNLOAD
2676 list_for_each_entry(pack, &update->packs, list) {
2677 if (module_refcount(pack->primary) != 1)
2678 return (__force int)MODULE_BUSY;
2680 #endif /* CONFIG_MODULE_UNLOAD */
2682 ret = check_each_task(update);
2683 if (ret != OK)
2684 return (__force int)ret;
2686 list_for_each_entry(pack, &update->packs, list) {
2687 for (p = pack->patches; p < pack->patches_end; p++) {
2688 ret = verify_trampoline(pack, p);
2689 if (ret != OK)
2690 return (__force int)ret;
2694 update->stage = STAGE_REVERSED;
2696 list_for_each_entry(pack, &update->packs, list)
2697 module_put(pack->primary);
2699 list_for_each_entry(pack, &update->packs, list)
2700 list_del(&pack->module_list_entry.list);
2702 list_for_each_entry(pack, &update->packs, list) {
2703 for (exp = pack->exports; exp < pack->exports_end; exp++)
2704 exp->sym->name = exp->saved_name;
2707 list_for_each_entry(pack, &update->packs, list) {
2708 for (p = pack->patches; p < pack->patches_end; p++)
2709 remove_trampoline(p);
2711 return (__force int)OK;
2714 static abort_t check_each_task(struct update *update)
2716 const struct task_struct *g, *p;
2717 abort_t status = OK, ret;
2718 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2719 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2720 read_lock(&tasklist_lock);
2721 #endif /* LINUX_VERSION_CODE */
2722 do_each_thread(g, p) {
2723 /* do_each_thread is a double loop! */
2724 ret = check_task(update, p, false);
2725 if (ret != OK) {
2726 check_task(update, p, true);
2727 status = ret;
2729 if (ret != OK && ret != CODE_BUSY)
2730 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2731 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2732 goto out;
2733 #else /* LINUX_VERSION_CODE < */
2734 return ret;
2735 #endif /* LINUX_VERSION_CODE */
2736 } while_each_thread(g, p);
2737 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2738 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2739 out:
2740 read_unlock(&tasklist_lock);
2741 #endif /* LINUX_VERSION_CODE */
2742 return status;
2745 static abort_t check_task(struct update *update,
2746 const struct task_struct *t, bool rerun)
2748 abort_t status, ret;
2749 struct conflict *conf = NULL;
2751 if (rerun) {
2752 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
2753 if (conf == NULL)
2754 return OUT_OF_MEMORY;
2755 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
2756 if (conf->process_name == NULL) {
2757 kfree(conf);
2758 return OUT_OF_MEMORY;
2760 conf->pid = t->pid;
2761 INIT_LIST_HEAD(&conf->stack);
2762 list_add(&conf->list, &update->conflicts);
2765 status = check_address(update, conf, KSPLICE_IP(t));
2766 if (t == current) {
2767 ret = check_stack(update, conf, task_thread_info(t),
2768 (unsigned long *)__builtin_frame_address(0));
2769 if (status == OK)
2770 status = ret;
2771 } else if (!task_curr(t)) {
2772 ret = check_stack(update, conf, task_thread_info(t),
2773 (unsigned long *)KSPLICE_SP(t));
2774 if (status == OK)
2775 status = ret;
2776 } else if (!is_stop_machine(t)) {
2777 status = UNEXPECTED_RUNNING_TASK;
2779 return status;
2782 static abort_t check_stack(struct update *update, struct conflict *conf,
2783 const struct thread_info *tinfo,
2784 const unsigned long *stack)
2786 abort_t status = OK, ret;
2787 unsigned long addr;
2789 while (valid_stack_ptr(tinfo, stack)) {
2790 addr = *stack++;
2791 ret = check_address(update, conf, addr);
2792 if (ret != OK)
2793 status = ret;
2795 return status;
2798 static abort_t check_address(struct update *update,
2799 struct conflict *conf, unsigned long addr)
2801 abort_t status = OK, ret;
2802 const struct safety_record *rec;
2803 struct ksplice_pack *pack;
2804 struct conflict_addr *ca = NULL;
2806 if (conf != NULL) {
2807 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
2808 if (ca == NULL)
2809 return OUT_OF_MEMORY;
2810 ca->addr = addr;
2811 ca->has_conflict = false;
2812 ca->label = NULL;
2813 list_add(&ca->list, &conf->stack);
2816 list_for_each_entry(pack, &update->packs, list) {
2817 list_for_each_entry(rec, &pack->safety_records, list) {
2818 ret = check_record(ca, rec, addr);
2819 if (ret != OK)
2820 status = ret;
2823 return status;
2826 static abort_t check_record(struct conflict_addr *ca,
2827 const struct safety_record *rec, unsigned long addr)
2829 if ((addr > rec->addr && addr < rec->addr + rec->size) ||
2830 (addr == rec->addr && !rec->first_byte_safe)) {
2831 if (ca != NULL) {
2832 ca->label = rec->label;
2833 ca->has_conflict = true;
2835 return CODE_BUSY;
2837 return OK;
2840 static bool is_stop_machine(const struct task_struct *t)
2842 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2843 const char *num;
2844 if (!starts_with(t->comm, "kstop"))
2845 return false;
2846 num = t->comm + strlen("kstop");
2847 return num[strspn(num, "0123456789")] == '\0';
2848 #else /* LINUX_VERSION_CODE < */
2849 return strcmp(t->comm, "kstopmachine") == 0;
2850 #endif /* LINUX_VERSION_CODE */
2853 static void cleanup_conflicts(struct update *update)
2855 struct conflict *conf;
2856 list_for_each_entry(conf, &update->conflicts, list) {
2857 clear_list(&conf->stack, struct conflict_addr, list);
2858 kfree(conf->process_name);
2860 clear_list(&update->conflicts, struct conflict, list);
2863 static void print_conflicts(struct update *update)
2865 const struct conflict *conf;
2866 const struct conflict_addr *ca;
2867 list_for_each_entry(conf, &update->conflicts, list) {
2868 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
2869 conf->process_name);
2870 list_for_each_entry(ca, &conf->stack, list) {
2871 _ksdebug(update, " %lx", ca->addr);
2872 if (ca->has_conflict)
2873 _ksdebug(update, " [<-CONFLICT]");
2875 _ksdebug(update, "\n");
2879 static void insert_trampoline(struct ksplice_patch *p)
2881 mm_segment_t old_fs = get_fs();
2882 set_fs(KERNEL_DS);
2883 memcpy(p->saved, p->vaddr, p->size);
2884 memcpy(p->vaddr, p->contents, p->size);
2885 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2886 set_fs(old_fs);
2889 static abort_t verify_trampoline(struct ksplice_pack *pack,
2890 const struct ksplice_patch *p)
2892 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
2893 ksdebug(pack, "Aborted. Trampoline at %lx has been "
2894 "overwritten.\n", p->oldaddr);
2895 return CODE_BUSY;
2897 return OK;
2900 static void remove_trampoline(const struct ksplice_patch *p)
2902 mm_segment_t old_fs = get_fs();
2903 set_fs(KERNEL_DS);
2904 memcpy(p->vaddr, p->saved, p->size);
2905 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2906 set_fs(old_fs);
2909 static abort_t create_labelval(struct ksplice_pack *pack,
2910 struct ksplice_symbol *ksym,
2911 unsigned long val, int status)
2913 val = follow_trampolines(pack, val);
2914 if (ksym->vals == NULL)
2915 return ksym->value == val ? OK : NO_MATCH;
2917 ksym->value = val;
2918 if (status == TEMP) {
2919 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
2920 if (lv == NULL)
2921 return OUT_OF_MEMORY;
2922 lv->symbol = ksym;
2923 lv->saved_vals = ksym->vals;
2924 list_add(&lv->list, &pack->temp_labelvals);
2926 ksym->vals = NULL;
2927 return OK;
2930 static abort_t create_safety_record(struct ksplice_pack *pack,
2931 const struct ksplice_section *sect,
2932 struct list_head *record_list,
2933 unsigned long run_addr,
2934 unsigned long run_size)
2936 struct safety_record *rec;
2937 struct ksplice_patch *p;
2939 if (record_list == NULL)
2940 return OK;
2942 for (p = pack->patches; p < pack->patches_end; p++) {
2943 const struct ksplice_reloc *r = patch_reloc(pack, p);
2944 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
2945 break;
2947 if (p >= pack->patches_end)
2948 return OK;
2950 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
2951 if (rec == NULL)
2952 return OUT_OF_MEMORY;
2953 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
2954 if (rec->label == NULL) {
2955 kfree(rec);
2956 return OUT_OF_MEMORY;
2958 rec->addr = run_addr;
2959 rec->size = run_size;
2960 rec->first_byte_safe = false;
2962 list_add(&rec->list, record_list);
2963 return OK;
2966 static abort_t add_candidate_val(struct ksplice_pack *pack,
2967 struct list_head *vals, unsigned long val)
2969 struct candidate_val *tmp, *new;
2970 val = follow_trampolines(pack, val);
2972 list_for_each_entry(tmp, vals, list) {
2973 if (tmp->val == val)
2974 return OK;
2976 new = kmalloc(sizeof(*new), GFP_KERNEL);
2977 if (new == NULL)
2978 return OUT_OF_MEMORY;
2979 new->val = val;
2980 list_add(&new->list, vals);
2981 return OK;
2984 static void release_vals(struct list_head *vals)
2986 clear_list(vals, struct candidate_val, list);
2989 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
2991 struct labelval *lv, *n;
2992 list_for_each_entry_safe(lv, n, &pack->temp_labelvals, list) {
2993 if (status == NOVAL) {
2994 lv->symbol->vals = lv->saved_vals;
2995 } else {
2996 release_vals(lv->saved_vals);
2997 kfree(lv->saved_vals);
2999 list_del(&lv->list);
3000 kfree(lv);
3004 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
3005 const struct ksplice_reloc_howto *howto)
3007 switch (howto->size) {
3008 case 1:
3009 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3010 (KSPLICE_CANARY & howto->dst_mask);
3011 case 2:
3012 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3013 (KSPLICE_CANARY & howto->dst_mask);
3014 case 4:
3015 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3016 (KSPLICE_CANARY & howto->dst_mask);
3017 #if BITS_PER_LONG >= 64
3018 case 8:
3019 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3020 (KSPLICE_CANARY & howto->dst_mask);
3021 #endif /* BITS_PER_LONG */
3022 default:
3023 ksdebug(pack, "Aborted. Invalid relocation size.\n");
3024 return -1;
3028 static unsigned long follow_trampolines(struct ksplice_pack *pack,
3029 unsigned long addr)
3031 unsigned long new_addr;
3032 struct module *m;
3034 while (1) {
3035 if (trampoline_target(pack, addr, &new_addr) != OK)
3036 return addr;
3037 m = __module_text_address(new_addr);
3038 if (m == NULL || m == pack->target ||
3039 !starts_with(m->name, "ksplice"))
3040 return addr;
3041 ksdebug(pack, "Following trampoline %lx %lx(%s)\n", addr,
3042 new_addr, m->name);
3043 addr = new_addr;
3047 /* Does module a patch module b? */
3048 static bool patches_module(const struct module *a, const struct module *b)
3050 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3051 const char *name;
3052 if (a == b)
3053 return true;
3054 if (a == NULL || !starts_with(a->name, "ksplice_"))
3055 return false;
3056 name = a->name + strlen("ksplice_");
3057 name += strcspn(name, "_");
3058 if (name[0] != '_')
3059 return false;
3060 name++;
3061 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
3062 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3063 struct ksplice_module_list_entry *entry;
3064 if (a == b)
3065 return true;
3066 list_for_each_entry(entry, &ksplice_module_list, list) {
3067 if (entry->target == b && entry->primary == a)
3068 return true;
3070 return false;
3071 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3074 static bool starts_with(const char *str, const char *prefix)
3076 return strncmp(str, prefix, strlen(prefix)) == 0;
3079 static bool singular(struct list_head *list)
3081 return !list_empty(list) && list->next->next == list;
3084 static void *bsearch(const void *key, const void *base, size_t n,
3085 size_t size, int (*cmp)(const void *key, const void *elt))
3087 int start = 0, end = n - 1, mid, result;
3088 if (n == 0)
3089 return NULL;
3090 while (start <= end) {
3091 mid = (start + end) / 2;
3092 result = cmp(key, base + mid * size);
3093 if (result < 0)
3094 end = mid - 1;
3095 else if (result > 0)
3096 start = mid + 1;
3097 else
3098 return (void *)base + mid * size;
3100 return NULL;
3103 static int compare_relocs(const void *a, const void *b)
3105 const struct ksplice_reloc *ra = a, *rb = b;
3106 if (ra->blank_addr > rb->blank_addr)
3107 return 1;
3108 else if (ra->blank_addr < rb->blank_addr)
3109 return -1;
3110 else
3111 return ra->howto->size - rb->howto->size;
3114 #ifdef KSPLICE_STANDALONE
3115 static int compare_system_map(const void *a, const void *b)
3117 const struct ksplice_system_map *sa = a, *sb = b;
3118 return strcmp(sa->label, sb->label);
3120 #endif /* KSPLICE_STANDALONE */
3122 #ifdef CONFIG_DEBUG_FS
3123 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3124 /* Old kernels don't have debugfs_create_blob */
3125 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3126 size_t count, loff_t *ppos)
3128 struct debugfs_blob_wrapper *blob = file->private_data;
3129 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3130 blob->size);
3133 static int blob_open(struct inode *inode, struct file *file)
3135 if (inode->i_private)
3136 file->private_data = inode->i_private;
3137 return 0;
3140 static struct file_operations fops_blob = {
3141 .read = read_file_blob,
3142 .open = blob_open,
3145 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3146 struct dentry *parent,
3147 struct debugfs_blob_wrapper *blob)
3149 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3151 #endif /* LINUX_VERSION_CODE */
3153 static abort_t init_debug_buf(struct update *update)
3155 update->debug_blob.size = 0;
3156 update->debug_blob.data = NULL;
3157 update->debugfs_dentry =
3158 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3159 &update->debug_blob);
3160 if (update->debugfs_dentry == NULL)
3161 return OUT_OF_MEMORY;
3162 return OK;
3165 static void clear_debug_buf(struct update *update)
3167 if (update->debugfs_dentry == NULL)
3168 return;
3169 debugfs_remove(update->debugfs_dentry);
3170 update->debugfs_dentry = NULL;
3171 update->debug_blob.size = 0;
3172 vfree(update->debug_blob.data);
3173 update->debug_blob.data = NULL;
3176 static int _ksdebug(struct update *update, const char *fmt, ...)
3178 va_list args;
3179 unsigned long size, old_size, new_size;
3181 if (update->debug == 0)
3182 return 0;
3184 /* size includes the trailing '\0' */
3185 va_start(args, fmt);
3186 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3187 va_end(args);
3188 old_size = update->debug_blob.size == 0 ? 0 :
3189 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3190 new_size = update->debug_blob.size + size == 0 ? 0 :
3191 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3192 if (new_size > old_size) {
3193 char *buf = vmalloc(new_size);
3194 if (buf == NULL)
3195 return -ENOMEM;
3196 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3197 vfree(update->debug_blob.data);
3198 update->debug_blob.data = buf;
3200 va_start(args, fmt);
3201 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3202 update->debug_blob.size,
3203 size, fmt, args);
3204 va_end(args);
3205 return 0;
3207 #else /* CONFIG_DEBUG_FS */
3208 static abort_t init_debug_buf(struct update *update)
3210 return OK;
3213 static void clear_debug_buf(struct update *update)
3215 return;
3218 static int _ksdebug(struct update *update, const char *fmt, ...)
3220 va_list args;
3222 if (update->debug == 0)
3223 return 0;
3225 if (!update->debug_continue_line)
3226 printk(KERN_DEBUG "ksplice: ");
3228 va_start(args, fmt);
3229 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3230 vprintk(fmt, args);
3231 #else /* LINUX_VERSION_CODE < */
3232 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3234 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3235 printk("%s", buf);
3236 kfree(buf);
3238 #endif /* LINUX_VERSION_CODE */
3239 va_end(args);
3241 update->debug_continue_line =
3242 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3243 return 0;
3245 #endif /* CONFIG_DEBUG_FS */
3247 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3248 #ifdef CONFIG_KALLSYMS
3249 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3250 struct module *, unsigned long),
3251 void *data)
3253 char namebuf[KSYM_NAME_LEN];
3254 unsigned long i;
3255 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3256 unsigned int off;
3257 #endif /* LINUX_VERSION_CODE */
3258 int ret;
3260 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3261 * 2.6.10 was the first release after this commit
3263 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3264 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3265 off = kallsyms_expand_symbol(off, namebuf);
3266 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3267 if (ret != 0)
3268 return ret;
3270 #else /* LINUX_VERSION_CODE < */
3271 char *knames;
3273 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3274 unsigned prefix = *knames++;
3276 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3278 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3279 if (ret != OK)
3280 return ret;
3282 knames += strlen(knames) + 1;
3284 #endif /* LINUX_VERSION_CODE */
3285 return module_kallsyms_on_each_symbol(fn, data);
3288 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3289 * 2.6.10 was the first release after this commit
3291 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3292 extern u8 kallsyms_token_table[];
3293 extern u16 kallsyms_token_index[];
3295 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3297 long len, skipped_first = 0;
3298 const u8 *tptr, *data;
3300 data = &kallsyms_names[off];
3301 len = *data;
3302 data++;
3304 off += len + 1;
3306 while (len) {
3307 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3308 data++;
3309 len--;
3311 while (*tptr) {
3312 if (skipped_first) {
3313 *result = *tptr;
3314 result++;
3315 } else
3316 skipped_first = 1;
3317 tptr++;
3321 *result = '\0';
3323 return off;
3325 #endif /* LINUX_VERSION_CODE */
3327 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3328 struct module *,
3329 unsigned long),
3330 void *data)
3332 struct module *mod;
3333 unsigned int i;
3334 int ret;
3336 list_for_each_entry(mod, &modules, list) {
3337 for (i = 0; i < mod->num_symtab; i++) {
3338 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3339 mod, mod->symtab[i].st_value);
3340 if (ret != 0)
3341 return ret;
3344 return 0;
3346 #endif /* CONFIG_KALLSYMS */
3348 static struct module *find_module(const char *name)
3350 struct module *mod;
3352 list_for_each_entry(mod, &modules, list) {
3353 if (strcmp(mod->name, name) == 0)
3354 return mod;
3356 return NULL;
3359 #ifdef CONFIG_MODULE_UNLOAD
3360 struct module_use {
3361 struct list_head list;
3362 struct module *module_which_uses;
3365 /* I'm not yet certain whether we need the strong form of this. */
3366 static inline int strong_try_module_get(struct module *mod)
3368 if (mod && mod->state != MODULE_STATE_LIVE)
3369 return -EBUSY;
3370 if (try_module_get(mod))
3371 return 0;
3372 return -ENOENT;
3375 /* Does a already use b? */
3376 static int already_uses(struct module *a, struct module *b)
3378 struct module_use *use;
3379 list_for_each_entry(use, &b->modules_which_use_me, list) {
3380 if (use->module_which_uses == a)
3381 return 1;
3383 return 0;
3386 /* Make it so module a uses b. Must be holding module_mutex */
3387 static int use_module(struct module *a, struct module *b)
3389 struct module_use *use;
3390 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3391 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3392 int no_warn;
3393 #endif /* LINUX_VERSION_CODE */
3394 if (b == NULL || already_uses(a, b))
3395 return 1;
3397 if (strong_try_module_get(b) < 0)
3398 return 0;
3400 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3401 if (!use) {
3402 module_put(b);
3403 return 0;
3405 use->module_which_uses = a;
3406 list_add(&use->list, &b->modules_which_use_me);
3407 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3408 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3409 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3410 #endif /* LINUX_VERSION_CODE */
3411 return 1;
3413 #else /* CONFIG_MODULE_UNLOAD */
3414 static int use_module(struct module *a, struct module *b)
3416 return 1;
3418 #endif /* CONFIG_MODULE_UNLOAD */
3420 #ifndef CONFIG_MODVERSIONS
3421 #define symversion(base, idx) NULL
3422 #else
3423 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3424 #endif
3426 static bool each_symbol_in_section(const struct symsearch *arr,
3427 unsigned int arrsize,
3428 struct module *owner,
3429 bool (*fn)(const struct symsearch *syms,
3430 struct module *owner,
3431 unsigned int symnum, void *data),
3432 void *data)
3434 unsigned int i, j;
3436 for (j = 0; j < arrsize; j++) {
3437 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3438 if (fn(&arr[j], owner, i, data))
3439 return true;
3442 return false;
3445 /* Returns true as soon as fn returns true, otherwise false. */
3446 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3447 struct module *owner,
3448 unsigned int symnum, void *data),
3449 void *data)
3451 struct module *mod;
3452 const struct symsearch arr[] = {
3453 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3454 NOT_GPL_ONLY, false },
3455 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3456 __start___kcrctab_gpl,
3457 GPL_ONLY, false },
3458 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3459 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3460 __start___kcrctab_gpl_future,
3461 WILL_BE_GPL_ONLY, false },
3462 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3463 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3464 { __start___ksymtab_unused, __stop___ksymtab_unused,
3465 __start___kcrctab_unused,
3466 NOT_GPL_ONLY, true },
3467 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3468 __start___kcrctab_unused_gpl,
3469 GPL_ONLY, true },
3470 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3473 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3474 return 1;
3476 list_for_each_entry(mod, &modules, list) {
3477 struct symsearch module_arr[] = {
3478 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3479 NOT_GPL_ONLY, false },
3480 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3481 mod->gpl_crcs,
3482 GPL_ONLY, false },
3483 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3484 { mod->gpl_future_syms,
3485 mod->gpl_future_syms + mod->num_gpl_future_syms,
3486 mod->gpl_future_crcs,
3487 WILL_BE_GPL_ONLY, false },
3488 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3489 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3490 { mod->unused_syms,
3491 mod->unused_syms + mod->num_unused_syms,
3492 mod->unused_crcs,
3493 NOT_GPL_ONLY, true },
3494 { mod->unused_gpl_syms,
3495 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3496 mod->unused_gpl_crcs,
3497 GPL_ONLY, true },
3498 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3501 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3502 mod, fn, data))
3503 return true;
3505 return false;
3508 struct find_symbol_arg {
3509 /* Input */
3510 const char *name;
3511 bool gplok;
3512 bool warn;
3514 /* Output */
3515 struct module *owner;
3516 const unsigned long *crc;
3517 const struct kernel_symbol *sym;
3520 static bool find_symbol_in_section(const struct symsearch *syms,
3521 struct module *owner,
3522 unsigned int symnum, void *data)
3524 struct find_symbol_arg *fsa = data;
3526 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3527 return false;
3529 if (!fsa->gplok) {
3530 if (syms->licence == GPL_ONLY)
3531 return false;
3532 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3533 printk(KERN_WARNING "Symbol %s is being used "
3534 "by a non-GPL module, which will not "
3535 "be allowed in the future\n", fsa->name);
3536 printk(KERN_WARNING "Please see the file "
3537 "Documentation/feature-removal-schedule.txt "
3538 "in the kernel source tree for more details.\n");
3542 #ifdef CONFIG_UNUSED_SYMBOLS
3543 if (syms->unused && fsa->warn) {
3544 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3545 "however this module is using it.\n", fsa->name);
3546 printk(KERN_WARNING
3547 "This symbol will go away in the future.\n");
3548 printk(KERN_WARNING
3549 "Please evalute if this is the right api to use and if "
3550 "it really is, submit a report the linux kernel "
3551 "mailinglist together with submitting your code for "
3552 "inclusion.\n");
3554 #endif
3556 fsa->owner = owner;
3557 fsa->crc = symversion(syms->crcs, symnum);
3558 fsa->sym = &syms->start[symnum];
3559 return true;
3562 /* Find a symbol and return it, along with, (optional) crc and
3563 * (optional) module which owns it */
3564 static const struct kernel_symbol *find_symbol(const char *name,
3565 struct module **owner,
3566 const unsigned long **crc,
3567 bool gplok, bool warn)
3569 struct find_symbol_arg fsa;
3571 fsa.name = name;
3572 fsa.gplok = gplok;
3573 fsa.warn = warn;
3575 if (each_symbol(find_symbol_in_section, &fsa)) {
3576 if (owner)
3577 *owner = fsa.owner;
3578 if (crc)
3579 *crc = fsa.crc;
3580 return fsa.sym;
3583 return NULL;
3586 static struct module *__module_data_address(unsigned long addr)
3588 struct module *mod;
3590 list_for_each_entry(mod, &modules, list) {
3591 if (addr >= (unsigned long)mod->module_core +
3592 mod->core_text_size &&
3593 addr < (unsigned long)mod->module_core + mod->core_size)
3594 return mod;
3596 return NULL;
3598 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3600 struct ksplice_attribute {
3601 struct attribute attr;
3602 ssize_t (*show)(struct update *update, char *buf);
3603 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3606 static ssize_t ksplice_attr_show(struct kobject *kobj, struct attribute *attr,
3607 char *buf)
3609 struct ksplice_attribute *attribute =
3610 container_of(attr, struct ksplice_attribute, attr);
3611 struct update *update = container_of(kobj, struct update, kobj);
3612 if (attribute->show == NULL)
3613 return -EIO;
3614 return attribute->show(update, buf);
3617 static ssize_t ksplice_attr_store(struct kobject *kobj, struct attribute *attr,
3618 const char *buf, size_t len)
3620 struct ksplice_attribute *attribute =
3621 container_of(attr, struct ksplice_attribute, attr);
3622 struct update *update = container_of(kobj, struct update, kobj);
3623 if (attribute->store == NULL)
3624 return -EIO;
3625 return attribute->store(update, buf, len);
3628 static struct sysfs_ops ksplice_sysfs_ops = {
3629 .show = ksplice_attr_show,
3630 .store = ksplice_attr_store,
3633 static void ksplice_release(struct kobject *kobj)
3635 struct update *update;
3636 update = container_of(kobj, struct update, kobj);
3637 cleanup_ksplice_update(update);
3640 static ssize_t stage_show(struct update *update, char *buf)
3642 switch (update->stage) {
3643 case STAGE_PREPARING:
3644 return snprintf(buf, PAGE_SIZE, "preparing\n");
3645 case STAGE_APPLIED:
3646 return snprintf(buf, PAGE_SIZE, "applied\n");
3647 case STAGE_REVERSED:
3648 return snprintf(buf, PAGE_SIZE, "reversed\n");
3650 return 0;
3653 static ssize_t abort_cause_show(struct update *update, char *buf)
3655 switch (update->abort_cause) {
3656 case OK:
3657 return snprintf(buf, PAGE_SIZE, "ok\n");
3658 case NO_MATCH:
3659 return snprintf(buf, PAGE_SIZE, "no_match\n");
3660 #ifdef KSPLICE_STANDALONE
3661 case BAD_SYSTEM_MAP:
3662 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
3663 #endif /* KSPLICE_STANDALONE */
3664 case CODE_BUSY:
3665 return snprintf(buf, PAGE_SIZE, "code_busy\n");
3666 case MODULE_BUSY:
3667 return snprintf(buf, PAGE_SIZE, "module_busy\n");
3668 case OUT_OF_MEMORY:
3669 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
3670 case FAILED_TO_FIND:
3671 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
3672 case ALREADY_REVERSED:
3673 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
3674 case MISSING_EXPORT:
3675 return snprintf(buf, PAGE_SIZE, "missing_export\n");
3676 case UNEXPECTED_RUNNING_TASK:
3677 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
3678 case TARGET_NOT_LOADED:
3679 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
3680 case UNEXPECTED:
3681 return snprintf(buf, PAGE_SIZE, "unexpected\n");
3683 return 0;
3686 static ssize_t conflict_show(struct update *update, char *buf)
3688 const struct conflict *conf;
3689 const struct conflict_addr *ca;
3690 int used = 0;
3691 list_for_each_entry(conf, &update->conflicts, list) {
3692 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
3693 conf->process_name, conf->pid);
3694 list_for_each_entry(ca, &conf->stack, list) {
3695 if (!ca->has_conflict)
3696 continue;
3697 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
3698 ca->label);
3700 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
3702 return used;
3705 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
3707 struct update *update = updateptr;
3708 maybe_cleanup_ksplice_update(update);
3709 return 0;
3712 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
3714 enum stage old_stage = update->stage;
3715 if ((strncmp(buf, "applied", len) == 0 ||
3716 strncmp(buf, "applied\n", len) == 0) &&
3717 update->stage == STAGE_PREPARING)
3718 update->abort_cause = apply_update(update);
3719 else if ((strncmp(buf, "reversed", len) == 0 ||
3720 strncmp(buf, "reversed\n", len) == 0) &&
3721 update->stage == STAGE_APPLIED)
3722 update->abort_cause = reverse_patches(update);
3723 else if ((strncmp(buf, "cleanup", len) == 0 ||
3724 strncmp(buf, "cleanup\n", len) == 0) &&
3725 update->stage == STAGE_REVERSED)
3726 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
3727 "ksplice_cleanup_%s", update->kid);
3729 if (old_stage != STAGE_REVERSED && update->abort_cause == OK)
3730 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
3731 update->kid,
3732 update->stage == STAGE_APPLIED ? "applied" : "reversed");
3733 return len;
3736 static ssize_t debug_show(struct update *update, char *buf)
3738 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
3741 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
3743 unsigned long l;
3744 int ret = strict_strtoul(buf, 10, &l);
3745 if (ret != 0)
3746 return ret;
3747 update->debug = l;
3748 return len;
3751 static ssize_t partial_show(struct update *update, char *buf)
3753 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
3756 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
3758 unsigned long l;
3759 int ret = strict_strtoul(buf, 10, &l);
3760 if (ret != 0)
3761 return ret;
3762 update->partial = l;
3763 return len;
3766 static struct ksplice_attribute stage_attribute =
3767 __ATTR(stage, 0600, stage_show, stage_store);
3768 static struct ksplice_attribute abort_cause_attribute =
3769 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
3770 static struct ksplice_attribute debug_attribute =
3771 __ATTR(debug, 0600, debug_show, debug_store);
3772 static struct ksplice_attribute partial_attribute =
3773 __ATTR(partial, 0600, partial_show, partial_store);
3774 static struct ksplice_attribute conflict_attribute =
3775 __ATTR(conflicts, 0400, conflict_show, NULL);
3777 static struct attribute *ksplice_attrs[] = {
3778 &stage_attribute.attr,
3779 &abort_cause_attribute.attr,
3780 &debug_attribute.attr,
3781 &partial_attribute.attr,
3782 &conflict_attribute.attr,
3783 NULL
3786 static struct kobj_type ksplice_ktype = {
3787 .sysfs_ops = &ksplice_sysfs_ops,
3788 .release = ksplice_release,
3789 .default_attrs = ksplice_attrs,
3792 #ifdef KSPLICE_STANDALONE
3793 static int debug;
3794 module_param(debug, int, 0600);
3795 MODULE_PARM_DESC(debug, "Debug level");
3797 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
3799 static struct ksplice_pack bootstrap_pack = {
3800 .name = "ksplice_" __stringify(KSPLICE_KID),
3801 .kid = "init_" __stringify(KSPLICE_KID),
3802 .target_name = NULL,
3803 .target = NULL,
3804 .map_printk = MAP_PRINTK,
3805 .primary = THIS_MODULE,
3806 .primary_system_map = ksplice_system_map,
3807 .primary_system_map_end = ksplice_system_map_end,
3809 #endif /* KSPLICE_STANDALONE */
3811 static int init_ksplice(void)
3813 #ifdef KSPLICE_STANDALONE
3814 struct ksplice_pack *pack = &bootstrap_pack;
3815 pack->update = init_ksplice_update(pack->kid);
3816 #ifdef KSPLICE_STANDALONE
3817 sort(pack->primary_system_map,
3818 (pack->primary_system_map_end - pack->primary_system_map),
3819 sizeof(struct ksplice_system_map), compare_system_map, NULL);
3820 #endif /* KSPLICE_STANDALONE */
3821 if (pack->update == NULL)
3822 return -ENOMEM;
3823 add_to_update(pack, pack->update);
3824 pack->update->debug = debug;
3825 pack->update->abort_cause =
3826 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
3827 if (pack->update->abort_cause == OK)
3828 bootstrapped = true;
3829 cleanup_ksplice_update(bootstrap_pack.update);
3830 #else /* !KSPLICE_STANDALONE */
3831 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
3832 if (ksplice_kobj == NULL)
3833 return -ENOMEM;
3834 #endif /* KSPLICE_STANDALONE */
3835 return 0;
3838 static void cleanup_ksplice(void)
3840 #ifndef KSPLICE_STANDALONE
3841 kobject_put(ksplice_kobj);
3842 #endif /* KSPLICE_STANDALONE */
3845 module_init(init_ksplice);
3846 module_exit(cleanup_ksplice);
3848 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
3849 MODULE_DESCRIPTION("Ksplice rebootless update system");
3850 #ifdef KSPLICE_VERSION
3851 MODULE_VERSION(KSPLICE_VERSION);
3852 #endif
3853 MODULE_LICENSE("GPL v2");