Merge apply_patches() and reverse_patches() into patch_action().
[ksplice.git] / kmodsrc / ksplice.c
blobd66b12d611c0649917ba94b0e7d39218c8409602
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 enum stage {
67 STAGE_PREPARING, /* the update is not yet applied */
68 STAGE_APPLIED, /* the update is applied */
69 STAGE_REVERSED, /* the update has been applied and reversed */
72 /* parameter to modify run-pre matching */
73 enum run_pre_mode {
74 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
75 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
76 RUN_PRE_FINAL, /* finalizes the matching */
77 #ifdef KSPLICE_STANDALONE
78 RUN_PRE_SILENT,
79 #endif /* KSPLICE_STANDALONE */
82 enum { NOVAL, TEMP, VAL };
84 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
85 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #define __bitwise__
87 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
88 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
89 #define __bitwise__ __bitwise
90 #endif
92 typedef int __bitwise__ abort_t;
94 #define OK ((__force abort_t) 0)
95 #define NO_MATCH ((__force abort_t) 1)
96 #define CODE_BUSY ((__force abort_t) 2)
97 #define MODULE_BUSY ((__force abort_t) 3)
98 #define OUT_OF_MEMORY ((__force abort_t) 4)
99 #define FAILED_TO_FIND ((__force abort_t) 5)
100 #define ALREADY_REVERSED ((__force abort_t) 6)
101 #define MISSING_EXPORT ((__force abort_t) 7)
102 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
103 #define UNEXPECTED ((__force abort_t) 9)
104 #define TARGET_NOT_LOADED ((__force abort_t) 10)
105 #define CALL_FAILED ((__force abort_t) 11)
106 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
107 #ifdef KSPLICE_STANDALONE
108 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
109 #endif /* KSPLICE_STANDALONE */
111 struct update {
112 const char *kid;
113 const char *name;
114 struct kobject kobj;
115 enum stage stage;
116 abort_t abort_cause;
117 int debug;
118 #ifdef CONFIG_DEBUG_FS
119 struct debugfs_blob_wrapper debug_blob;
120 struct dentry *debugfs_dentry;
121 #else /* !CONFIG_DEBUG_FS */
122 bool debug_continue_line;
123 #endif /* CONFIG_DEBUG_FS */
124 bool partial; /* is it OK if some target mods aren't loaded */
125 struct list_head changes, /* changes for loaded target mods */
126 unused_changes; /* changes for non-loaded target mods */
127 struct list_head conflicts;
128 struct list_head list;
129 struct list_head ksplice_module_list;
132 /* a process conflicting with an update */
133 struct conflict {
134 const char *process_name;
135 pid_t pid;
136 struct list_head stack;
137 struct list_head list;
140 /* an address on the stack of a conflict */
141 struct conflict_addr {
142 unsigned long addr; /* the address on the stack */
143 bool has_conflict; /* does this address in particular conflict? */
144 const char *label; /* the label of the conflicting safety_record */
145 struct list_head list;
148 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
149 /* Old kernels don't have debugfs_create_blob */
150 struct debugfs_blob_wrapper {
151 void *data;
152 unsigned long size;
154 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
157 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
158 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
159 #endif
161 struct labelval {
162 struct list_head list;
163 struct ksplice_symbol *symbol;
164 struct list_head *saved_vals;
167 /* region to be checked for conflicts in the stack check */
168 struct safety_record {
169 struct list_head list;
170 const char *label;
171 unsigned long addr; /* the address to be checked for conflicts
172 * (e.g. an obsolete function's starting addr)
174 unsigned long size; /* the size of the region to be checked */
177 /* possible value for a symbol */
178 struct candidate_val {
179 struct list_head list;
180 unsigned long val;
183 /* private struct used by init_symbol_array */
184 struct ksplice_lookup {
185 /* input */
186 struct ksplice_mod_change *change;
187 struct ksplice_symbol **arr;
188 size_t size;
189 /* output */
190 abort_t ret;
193 #ifdef KSPLICE_NO_KERNEL_SUPPORT
194 struct symsearch {
195 const struct kernel_symbol *start, *stop;
196 const unsigned long *crcs;
197 enum {
198 NOT_GPL_ONLY,
199 GPL_ONLY,
200 WILL_BE_GPL_ONLY,
201 } licence;
202 bool unused;
204 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
207 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
210 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
211 static bool virtual_address_mapped(unsigned long addr)
213 char retval;
214 return probe_kernel_address(addr, retval) != -EFAULT;
216 #else /* LINUX_VERSION_CODE < */
217 static bool virtual_address_mapped(unsigned long addr);
218 #endif /* LINUX_VERSION_CODE */
220 static long probe_kernel_read(void *dst, void *src, size_t size)
222 if (size == 0)
223 return 0;
224 if (!virtual_address_mapped((unsigned long)src) ||
225 !virtual_address_mapped((unsigned long)src + size - 1))
226 return -EFAULT;
228 memcpy(dst, src, size);
229 return 0;
231 #endif /* LINUX_VERSION_CODE */
233 static LIST_HEAD(updates);
234 #ifdef KSPLICE_STANDALONE
235 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
236 extern struct list_head ksplice_modules;
237 #else /* !CONFIG_KSPLICE */
238 LIST_HEAD(ksplice_modules);
239 #endif /* CONFIG_KSPLICE */
240 #else /* !KSPLICE_STANDALONE */
241 LIST_HEAD(ksplice_modules);
242 EXPORT_SYMBOL_GPL(ksplice_modules);
243 static struct kobject *ksplice_kobj;
244 #endif /* KSPLICE_STANDALONE */
246 static struct kobj_type update_ktype;
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
249 /* Old kernels do not have kcalloc
250 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
252 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
254 char *mem;
255 if (n != 0 && size > ULONG_MAX / n)
256 return NULL;
257 mem = kmalloc(n * size, flags);
258 if (mem)
259 memset(mem, 0, n * size);
260 return mem;
262 #endif /* LINUX_VERSION_CODE */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
265 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
266 static void u32_swap(void *a, void *b, int size)
268 u32 t = *(u32 *)a;
269 *(u32 *)a = *(u32 *)b;
270 *(u32 *)b = t;
273 static void generic_swap(void *a, void *b, int size)
275 char t;
277 do {
278 t = *(char *)a;
279 *(char *)a++ = *(char *)b;
280 *(char *)b++ = t;
281 } while (--size > 0);
285 * sort - sort an array of elements
286 * @base: pointer to data to sort
287 * @num: number of elements
288 * @size: size of each element
289 * @cmp: pointer to comparison function
290 * @swap: pointer to swap function or NULL
292 * This function does a heapsort on the given array. You may provide a
293 * swap function optimized to your element type.
295 * Sorting time is O(n log n) both on average and worst-case. While
296 * qsort is about 20% faster on average, it suffers from exploitable
297 * O(n*n) worst-case behavior and extra memory requirements that make
298 * it less suitable for kernel use.
301 void sort(void *base, size_t num, size_t size,
302 int (*cmp)(const void *, const void *),
303 void (*swap)(void *, void *, int size))
305 /* pre-scale counters for performance */
306 int i = (num / 2 - 1) * size, n = num * size, c, r;
308 if (!swap)
309 swap = (size == 4 ? u32_swap : generic_swap);
311 /* heapify */
312 for (; i >= 0; i -= size) {
313 for (r = i; r * 2 + size < n; r = c) {
314 c = r * 2 + size;
315 if (c < n - size && cmp(base + c, base + c + size) < 0)
316 c += size;
317 if (cmp(base + r, base + c) >= 0)
318 break;
319 swap(base + r, base + c, size);
323 /* sort */
324 for (i = n - size; i > 0; i -= size) {
325 swap(base, base + i, size);
326 for (r = 0; r * 2 + size < i; r = c) {
327 c = r * 2 + size;
328 if (c < i - size && cmp(base + c, base + c + size) < 0)
329 c += size;
330 if (cmp(base + r, base + c) >= 0)
331 break;
332 swap(base + r, base + c, size);
336 #endif /* LINUX_VERSION_CODE < */
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
339 /* Old kernels do not have kstrdup
340 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
342 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
344 size_t len;
345 char *buf;
347 if (!s)
348 return NULL;
350 len = strlen(s) + 1;
351 buf = kmalloc(len, gfp);
352 if (buf)
353 memcpy(buf, s, len);
354 return buf;
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
359 /* Old kernels use semaphore instead of mutex
360 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
362 #define mutex semaphore
363 #define mutex_lock down
364 #define mutex_unlock up
365 #endif /* LINUX_VERSION_CODE */
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
368 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
369 static char * __attribute_used__
370 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
372 unsigned int len;
373 char *p, dummy[1];
374 va_list aq;
376 va_copy(aq, ap);
377 len = vsnprintf(dummy, 0, fmt, aq);
378 va_end(aq);
380 p = kmalloc(len + 1, gfp);
381 if (!p)
382 return NULL;
384 vsnprintf(p, len + 1, fmt, ap);
386 return p;
388 #endif
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
391 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
392 static char * __attribute__((format (printf, 2, 3)))
393 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
395 va_list ap;
396 char *p;
398 va_start(ap, fmt);
399 p = kvasprintf(gfp, fmt, ap);
400 va_end(ap);
402 return p;
404 #endif
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
407 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
408 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
410 char *tail;
411 unsigned long val;
412 size_t len;
414 *res = 0;
415 len = strlen(cp);
416 if (len == 0)
417 return -EINVAL;
419 val = simple_strtoul(cp, &tail, base);
420 if ((*tail == '\0') ||
421 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
422 *res = val;
423 return 0;
426 return -EINVAL;
428 #endif
430 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
431 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
432 /* Assume cpus == NULL. */
433 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
434 #endif /* LINUX_VERSION_CODE */
436 #ifndef task_thread_info
437 #define task_thread_info(task) (task)->thread_info
438 #endif /* !task_thread_info */
440 #ifdef KSPLICE_STANDALONE
442 static bool bootstrapped = false;
444 #ifdef CONFIG_KALLSYMS
445 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
446 extern u8 kallsyms_names[];
447 #endif /* CONFIG_KALLSYMS */
449 /* defined by ksplice-create */
450 extern const struct ksplice_reloc ksplice_init_relocs[],
451 ksplice_init_relocs_end[];
453 /* Obtained via System.map */
454 extern struct list_head modules;
455 extern struct mutex module_mutex;
456 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
457 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
458 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
459 #endif /* LINUX_VERSION_CODE */
460 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
461 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
462 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
463 #endif /* LINUX_VERSION_CODE */
464 extern const struct kernel_symbol __start___ksymtab[];
465 extern const struct kernel_symbol __stop___ksymtab[];
466 extern const unsigned long __start___kcrctab[];
467 extern const struct kernel_symbol __start___ksymtab_gpl[];
468 extern const struct kernel_symbol __stop___ksymtab_gpl[];
469 extern const unsigned long __start___kcrctab_gpl[];
470 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
471 extern const struct kernel_symbol __start___ksymtab_unused[];
472 extern const struct kernel_symbol __stop___ksymtab_unused[];
473 extern const unsigned long __start___kcrctab_unused[];
474 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
475 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
476 extern const unsigned long __start___kcrctab_unused_gpl[];
477 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
478 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
479 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
480 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
481 extern const unsigned long __start___kcrctab_gpl_future[];
482 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
484 #endif /* KSPLICE_STANDALONE */
486 static struct update *init_ksplice_update(const char *kid);
487 static void cleanup_ksplice_update(struct update *update);
488 static void maybe_cleanup_ksplice_update(struct update *update);
489 static void add_to_update(struct ksplice_mod_change *change,
490 struct update *update);
491 static int ksplice_sysfs_init(struct update *update);
493 /* Preparing the relocations and patches for application */
494 static abort_t apply_update(struct update *update);
495 static abort_t reverse_update(struct update *update);
496 static abort_t prepare_change(struct ksplice_mod_change *change);
497 static abort_t finalize_change(struct ksplice_mod_change *change);
498 static abort_t finalize_patches(struct ksplice_mod_change *change);
499 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
500 unsigned long addr);
501 static abort_t map_trampoline_pages(struct update *update);
502 static void unmap_trampoline_pages(struct update *update);
503 static void *map_writable(void *addr, size_t len);
504 static abort_t apply_relocs(struct ksplice_mod_change *change,
505 const struct ksplice_reloc *relocs,
506 const struct ksplice_reloc *relocs_end);
507 static abort_t apply_reloc(struct ksplice_mod_change *change,
508 const struct ksplice_reloc *r);
509 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
510 const struct ksplice_reloc *r);
511 static abort_t apply_howto_date(struct ksplice_mod_change *change,
512 const struct ksplice_reloc *r);
513 static abort_t read_reloc_value(struct ksplice_mod_change *change,
514 const struct ksplice_reloc *r,
515 unsigned long addr, unsigned long *valp);
516 static abort_t write_reloc_value(struct ksplice_mod_change *change,
517 const struct ksplice_reloc *r,
518 unsigned long addr, unsigned long sym_addr);
519 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
520 bool to_be_applied);
521 static void cleanup_module_list_entries(struct update *update);
522 static void __attribute__((noreturn)) ksplice_deleted(void);
524 /* run-pre matching */
525 static abort_t match_change_sections(struct ksplice_mod_change *change,
526 bool consider_data_sections);
527 static abort_t find_section(struct ksplice_mod_change *change,
528 struct ksplice_section *sect);
529 static abort_t try_addr(struct ksplice_mod_change *change,
530 struct ksplice_section *sect,
531 unsigned long run_addr,
532 struct list_head *safety_records,
533 enum run_pre_mode mode);
534 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
535 const struct ksplice_section *sect,
536 unsigned long run_addr,
537 struct list_head *safety_records,
538 enum run_pre_mode mode);
539 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
540 /* defined in arch/ARCH/kernel/ksplice-arch.c */
541 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
542 struct ksplice_section *sect,
543 unsigned long run_addr,
544 struct list_head *safety_records,
545 enum run_pre_mode mode);
546 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
547 static void print_bytes(struct ksplice_mod_change *change,
548 const unsigned char *run, int runc,
549 const unsigned char *pre, int prec);
550 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
551 static abort_t brute_search(struct ksplice_mod_change *change,
552 struct ksplice_section *sect,
553 const void *start, unsigned long len,
554 struct list_head *vals);
555 static abort_t brute_search_all(struct ksplice_mod_change *change,
556 struct ksplice_section *sect,
557 struct list_head *vals);
558 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
559 static const struct ksplice_reloc *
560 init_reloc_search(struct ksplice_mod_change *change,
561 const struct ksplice_section *sect);
562 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
563 const struct ksplice_reloc *end,
564 unsigned long address,
565 unsigned long size);
566 static abort_t lookup_reloc(struct ksplice_mod_change *change,
567 const struct ksplice_reloc **fingerp,
568 unsigned long addr,
569 const struct ksplice_reloc **relocp);
570 static abort_t handle_reloc(struct ksplice_mod_change *change,
571 const struct ksplice_section *sect,
572 const struct ksplice_reloc *r,
573 unsigned long run_addr, enum run_pre_mode mode);
574 static abort_t handle_howto_date(struct ksplice_mod_change *change,
575 const struct ksplice_section *sect,
576 const struct ksplice_reloc *r,
577 unsigned long run_addr,
578 enum run_pre_mode mode);
579 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
580 const struct ksplice_section *sect,
581 const struct ksplice_reloc *r,
582 unsigned long run_addr,
583 enum run_pre_mode mode);
584 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
585 const struct ksplice_symbol *sym);
586 static int compare_section_labels(const void *va, const void *vb);
587 static int symbol_section_bsearch_compare(const void *a, const void *b);
588 static const struct ksplice_reloc *
589 patch_reloc(struct ksplice_mod_change *change,
590 const struct ksplice_patch *p);
592 /* Computing possible addresses for symbols */
593 static abort_t lookup_symbol(struct ksplice_mod_change *change,
594 const struct ksplice_symbol *ksym,
595 struct list_head *vals);
596 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
597 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
598 static abort_t init_symbol_array(struct ksplice_mod_change *change,
599 struct ksplice_symbol *start,
600 struct ksplice_symbol *end);
601 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
602 static abort_t add_matching_values(struct ksplice_lookup *lookup,
603 const char *sym_name, unsigned long sym_val);
604 static bool add_export_values(const struct symsearch *syms,
605 struct module *owner,
606 unsigned int symnum, void *data);
607 static int symbolp_bsearch_compare(const void *key, const void *elt);
608 static int compare_symbolp_names(const void *a, const void *b);
609 static int compare_symbolp_labels(const void *a, const void *b);
610 #ifdef CONFIG_KALLSYMS
611 static int add_kallsyms_values(void *data, const char *name,
612 struct module *owner, unsigned long val);
613 #endif /* CONFIG_KALLSYMS */
614 #ifdef KSPLICE_STANDALONE
615 static abort_t
616 add_system_map_candidates(struct ksplice_mod_change *change,
617 const struct ksplice_system_map *start,
618 const struct ksplice_system_map *end,
619 const char *label, struct list_head *vals);
620 static int compare_system_map(const void *a, const void *b);
621 static int system_map_bsearch_compare(const void *key, const void *elt);
622 #endif /* KSPLICE_STANDALONE */
623 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
624 const char *name, struct list_head *vals);
626 /* Atomic update trampoline insertion and removal */
627 static abort_t patch_action(struct update *update, enum ksplice_action action);
628 static int __apply_patches(void *update);
629 static int __reverse_patches(void *update);
630 static abort_t check_each_task(struct update *update);
631 static abort_t check_task(struct update *update,
632 const struct task_struct *t, bool rerun);
633 static abort_t check_stack(struct update *update, struct conflict *conf,
634 const struct thread_info *tinfo,
635 const unsigned long *stack);
636 static abort_t check_address(struct update *update,
637 struct conflict *conf, unsigned long addr);
638 static abort_t check_record(struct conflict_addr *ca,
639 const struct safety_record *rec,
640 unsigned long addr);
641 static bool is_stop_machine(const struct task_struct *t);
642 static void cleanup_conflicts(struct update *update);
643 static void print_conflicts(struct update *update);
644 static void insert_trampoline(struct ksplice_patch *p);
645 static abort_t verify_trampoline(struct ksplice_mod_change *change,
646 const struct ksplice_patch *p);
647 static void remove_trampoline(const struct ksplice_patch *p);
649 static abort_t create_labelval(struct ksplice_mod_change *change,
650 struct ksplice_symbol *ksym,
651 unsigned long val, int status);
652 static abort_t create_safety_record(struct ksplice_mod_change *change,
653 const struct ksplice_section *sect,
654 struct list_head *record_list,
655 unsigned long run_addr,
656 unsigned long run_size);
657 static abort_t add_candidate_val(struct ksplice_mod_change *change,
658 struct list_head *vals, unsigned long val);
659 static void release_vals(struct list_head *vals);
660 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
662 static int contains_canary(struct ksplice_mod_change *change,
663 unsigned long blank_addr,
664 const struct ksplice_reloc_howto *howto);
665 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
666 unsigned long addr);
667 static bool patches_module(const struct module *a, const struct module *b);
668 static bool strstarts(const char *str, const char *prefix);
669 static bool singular(struct list_head *list);
670 static void *bsearch(const void *key, const void *base, size_t n,
671 size_t size, int (*cmp)(const void *key, const void *elt));
672 static int compare_relocs(const void *a, const void *b);
673 static int reloc_bsearch_compare(const void *key, const void *elt);
675 /* Debugging */
676 static abort_t init_debug_buf(struct update *update);
677 static void clear_debug_buf(struct update *update);
678 static int __attribute__((format(printf, 2, 3)))
679 _ksdebug(struct update *update, const char *fmt, ...);
680 #define ksdebug(change, fmt, ...) \
681 _ksdebug(change->update, fmt, ## __VA_ARGS__)
683 #ifdef KSPLICE_NO_KERNEL_SUPPORT
684 /* Functions defined here that will be exported in later kernels */
685 #ifdef CONFIG_KALLSYMS
686 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
687 struct module *, unsigned long),
688 void *data);
689 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
690 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
691 #endif /* LINUX_VERSION_CODE */
692 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
693 struct module *,
694 unsigned long),
695 void *data);
696 #endif /* CONFIG_KALLSYMS */
697 static struct module *find_module(const char *name);
698 static int use_module(struct module *a, struct module *b);
699 static const struct kernel_symbol *find_symbol(const char *name,
700 struct module **owner,
701 const unsigned long **crc,
702 bool gplok, bool warn);
703 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
704 struct module *owner,
705 unsigned int symnum, void *data),
706 void *data);
707 static struct module *__module_address(unsigned long addr);
708 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
710 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
712 /* Prepare a trampoline for the given patch */
713 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
714 struct ksplice_patch *p);
715 /* What address does the trampoline at addr jump to? */
716 static abort_t trampoline_target(struct ksplice_mod_change *change,
717 unsigned long addr, unsigned long *new_addr);
718 /* Hook to handle pc-relative jumps inserted by parainstructions */
719 static abort_t handle_paravirt(struct ksplice_mod_change *change,
720 unsigned long pre, unsigned long run,
721 int *matched);
722 /* Called for relocations of type KSPLICE_HOWTO_BUG */
723 static abort_t handle_bug(struct ksplice_mod_change *change,
724 const struct ksplice_reloc *r,
725 unsigned long run_addr);
726 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
727 static abort_t handle_extable(struct ksplice_mod_change *change,
728 const struct ksplice_reloc *r,
729 unsigned long run_addr);
730 /* Is address p on the stack of the given thread? */
731 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
733 #ifndef KSPLICE_STANDALONE
734 #include "ksplice-arch.c"
735 #elif defined CONFIG_X86
736 #include "x86/ksplice-arch.c"
737 #elif defined CONFIG_ARM
738 #include "arm/ksplice-arch.c"
739 #endif /* KSPLICE_STANDALONE */
741 #define clear_list(head, type, member) \
742 do { \
743 struct list_head *_pos, *_n; \
744 list_for_each_safe(_pos, _n, head) { \
745 list_del(_pos); \
746 kfree(list_entry(_pos, type, member)); \
748 } while (0)
751 * init_ksplice_mod_change() - Initializes a ksplice change
752 * @change: The change to be initialized. All of the public fields of the
753 * change and its associated data structures should be populated
754 * before this function is called. The values of the private
755 * fields will be ignored.
757 int init_ksplice_mod_change(struct ksplice_mod_change *change)
759 struct update *update;
760 struct ksplice_patch *p;
761 struct ksplice_section *s;
762 int ret = 0;
764 #ifdef KSPLICE_STANDALONE
765 if (!bootstrapped)
766 return -1;
767 #endif /* KSPLICE_STANDALONE */
769 INIT_LIST_HEAD(&change->temp_labelvals);
770 INIT_LIST_HEAD(&change->safety_records);
772 sort(change->old_code.relocs,
773 change->old_code.relocs_end - change->old_code.relocs,
774 sizeof(*change->old_code.relocs), compare_relocs, NULL);
775 sort(change->new_code.relocs,
776 change->new_code.relocs_end - change->new_code.relocs,
777 sizeof(*change->new_code.relocs), compare_relocs, NULL);
778 sort(change->old_code.sections,
779 change->old_code.sections_end - change->old_code.sections,
780 sizeof(*change->old_code.sections), compare_section_labels, NULL);
781 #ifdef KSPLICE_STANDALONE
782 sort(change->new_code.system_map,
783 change->new_code.system_map_end - change->new_code.system_map,
784 sizeof(*change->new_code.system_map), compare_system_map, NULL);
785 sort(change->old_code.system_map,
786 change->old_code.system_map_end - change->old_code.system_map,
787 sizeof(*change->old_code.system_map), compare_system_map, NULL);
788 #endif /* KSPLICE_STANDALONE */
790 for (p = change->patches; p < change->patches_end; p++)
791 p->vaddr = NULL;
792 for (s = change->old_code.sections; s < change->old_code.sections_end;
793 s++)
794 s->match_map = NULL;
795 for (p = change->patches; p < change->patches_end; p++) {
796 const struct ksplice_reloc *r = patch_reloc(change, p);
797 if (r == NULL)
798 return -ENOENT;
799 if (p->type == KSPLICE_PATCH_DATA) {
800 s = symbol_section(change, r->symbol);
801 if (s == NULL)
802 return -ENOENT;
803 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
804 * to modify rodata sections that have been explicitly
805 * marked for patching using the ksplice-patch.h macro
806 * ksplice_assume_rodata. Here we modify the section
807 * flags appropriately.
809 if (s->flags & KSPLICE_SECTION_DATA)
810 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
811 KSPLICE_SECTION_RODATA;
815 mutex_lock(&module_mutex);
816 list_for_each_entry(update, &updates, list) {
817 if (strcmp(change->kid, update->kid) == 0) {
818 if (update->stage != STAGE_PREPARING) {
819 ret = -EPERM;
820 goto out;
822 add_to_update(change, update);
823 ret = 0;
824 goto out;
827 update = init_ksplice_update(change->kid);
828 if (update == NULL) {
829 ret = -ENOMEM;
830 goto out;
832 ret = ksplice_sysfs_init(update);
833 if (ret != 0) {
834 cleanup_ksplice_update(update);
835 goto out;
837 add_to_update(change, update);
838 out:
839 mutex_unlock(&module_mutex);
840 return ret;
842 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
845 * cleanup_ksplice_mod_change() - Cleans up a change
846 * @change: The change to be cleaned up
848 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
850 if (change->update == NULL)
851 return;
853 mutex_lock(&module_mutex);
854 if (change->update->stage == STAGE_APPLIED) {
855 /* If the change wasn't actually applied (because we
856 * only applied this update to loaded modules and this
857 * target was not loaded), then unregister the change
858 * from the list of unused changes.
860 struct ksplice_mod_change *c;
861 bool found = false;
863 list_for_each_entry(c, &change->update->unused_changes, list) {
864 if (c == change)
865 found = true;
867 if (found)
868 list_del(&change->list);
869 mutex_unlock(&module_mutex);
870 return;
872 list_del(&change->list);
873 if (change->update->stage == STAGE_PREPARING)
874 maybe_cleanup_ksplice_update(change->update);
875 change->update = NULL;
876 mutex_unlock(&module_mutex);
878 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
880 static struct update *init_ksplice_update(const char *kid)
882 struct update *update;
883 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
884 if (update == NULL)
885 return NULL;
886 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
887 if (update->name == NULL) {
888 kfree(update);
889 return NULL;
891 update->kid = kstrdup(kid, GFP_KERNEL);
892 if (update->kid == NULL) {
893 kfree(update->name);
894 kfree(update);
895 return NULL;
897 if (try_module_get(THIS_MODULE) != 1) {
898 kfree(update->kid);
899 kfree(update->name);
900 kfree(update);
901 return NULL;
903 INIT_LIST_HEAD(&update->changes);
904 INIT_LIST_HEAD(&update->unused_changes);
905 INIT_LIST_HEAD(&update->ksplice_module_list);
906 if (init_debug_buf(update) != OK) {
907 module_put(THIS_MODULE);
908 kfree(update->kid);
909 kfree(update->name);
910 kfree(update);
911 return NULL;
913 list_add(&update->list, &updates);
914 update->stage = STAGE_PREPARING;
915 update->abort_cause = OK;
916 update->partial = 0;
917 INIT_LIST_HEAD(&update->conflicts);
918 return update;
921 static void cleanup_ksplice_update(struct update *update)
923 list_del(&update->list);
924 cleanup_conflicts(update);
925 clear_debug_buf(update);
926 cleanup_module_list_entries(update);
927 kfree(update->kid);
928 kfree(update->name);
929 kfree(update);
930 module_put(THIS_MODULE);
933 /* Clean up the update if it no longer has any changes */
934 static void maybe_cleanup_ksplice_update(struct update *update)
936 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
937 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
938 kobject_put(&update->kobj);
939 #else /* LINUX_VERSION_CODE < */
940 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
941 kobject_unregister(&update->kobj);
942 #endif /* LINUX_VERSION_CODE */
945 static void add_to_update(struct ksplice_mod_change *change,
946 struct update *update)
948 change->update = update;
949 list_add(&change->list, &update->unused_changes);
952 static int ksplice_sysfs_init(struct update *update)
954 int ret = 0;
955 memset(&update->kobj, 0, sizeof(update->kobj));
956 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
957 #ifndef KSPLICE_STANDALONE
958 ret = kobject_init_and_add(&update->kobj, &update_ktype,
959 ksplice_kobj, "%s", update->kid);
960 #else /* KSPLICE_STANDALONE */
961 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
962 ret = kobject_init_and_add(&update->kobj, &update_ktype,
963 &THIS_MODULE->mkobj.kobj, "ksplice");
964 #endif /* KSPLICE_STANDALONE */
965 #else /* LINUX_VERSION_CODE < */
966 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
967 if (ret != 0)
968 return ret;
969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
970 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
971 #else /* LINUX_VERSION_CODE < */
972 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
973 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
974 #endif /* LINUX_VERSION_CODE */
975 update->kobj.ktype = &update_ktype;
976 ret = kobject_register(&update->kobj);
977 #endif /* LINUX_VERSION_CODE */
978 if (ret != 0)
979 return ret;
980 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
981 kobject_uevent(&update->kobj, KOBJ_ADD);
982 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
983 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
984 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
985 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
986 #endif /* LINUX_VERSION_CODE */
987 return 0;
990 static abort_t apply_update(struct update *update)
992 struct ksplice_mod_change *change, *n;
993 abort_t ret;
994 int retval;
996 list_for_each_entry(change, &update->changes, list) {
997 ret = create_module_list_entry(change, true);
998 if (ret != OK)
999 goto out;
1002 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
1003 if (strcmp(change->target_name, "vmlinux") == 0) {
1004 change->target = NULL;
1005 } else if (change->target == NULL) {
1006 change->target = find_module(change->target_name);
1007 if (change->target == NULL ||
1008 !module_is_live(change->target)) {
1009 if (!update->partial) {
1010 ret = TARGET_NOT_LOADED;
1011 goto out;
1013 ret = create_module_list_entry(change, false);
1014 if (ret != OK)
1015 goto out;
1016 continue;
1018 retval = use_module(change->new_code_mod,
1019 change->target);
1020 if (retval != 1) {
1021 ret = UNEXPECTED;
1022 goto out;
1025 ret = create_module_list_entry(change, true);
1026 if (ret != OK)
1027 goto out;
1028 list_del(&change->list);
1029 list_add_tail(&change->list, &update->changes);
1031 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1032 if (change->target == NULL) {
1033 apply_paravirt(change->new_code.parainstructions,
1034 change->new_code.parainstructions_end);
1035 apply_paravirt(change->old_code.parainstructions,
1036 change->old_code.parainstructions_end);
1038 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1041 list_for_each_entry(change, &update->changes, list) {
1042 const struct ksplice_section *sect;
1043 for (sect = change->new_code.sections;
1044 sect < change->new_code.sections_end; sect++) {
1045 struct safety_record *rec = kmalloc(sizeof(*rec),
1046 GFP_KERNEL);
1047 if (rec == NULL) {
1048 ret = OUT_OF_MEMORY;
1049 goto out;
1051 rec->addr = sect->address;
1052 rec->size = sect->size;
1053 rec->label = sect->symbol->label;
1054 list_add(&rec->list, &change->safety_records);
1058 list_for_each_entry(change, &update->changes, list) {
1059 ret = init_symbol_arrays(change);
1060 if (ret != OK) {
1061 cleanup_symbol_arrays(change);
1062 goto out;
1064 ret = prepare_change(change);
1065 cleanup_symbol_arrays(change);
1066 if (ret != OK)
1067 goto out;
1069 ret = patch_action(update, KS_APPLY);
1070 out:
1071 list_for_each_entry(change, &update->changes, list) {
1072 struct ksplice_section *s;
1073 if (update->stage == STAGE_PREPARING)
1074 clear_list(&change->safety_records,
1075 struct safety_record, list);
1076 for (s = change->old_code.sections;
1077 s < change->old_code.sections_end; s++) {
1078 if (s->match_map != NULL) {
1079 vfree(s->match_map);
1080 s->match_map = NULL;
1084 if (update->stage == STAGE_PREPARING)
1085 cleanup_module_list_entries(update);
1087 if (ret == OK)
1088 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
1089 update->kid);
1090 return ret;
1093 static abort_t reverse_update(struct update *update)
1095 abort_t ret;
1096 struct ksplice_mod_change *change;
1098 clear_debug_buf(update);
1099 ret = init_debug_buf(update);
1100 if (ret != OK)
1101 return ret;
1103 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1105 ret = patch_action(update, KS_REVERSE);
1106 if (ret != OK)
1107 return ret;
1109 list_for_each_entry(change, &update->changes, list)
1110 clear_list(&change->safety_records, struct safety_record, list);
1112 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
1113 update->kid);
1114 return OK;
1117 static int compare_symbolp_names(const void *a, const void *b)
1119 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1120 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1121 return 0;
1122 if ((*sympa)->name == NULL)
1123 return -1;
1124 if ((*sympb)->name == NULL)
1125 return 1;
1126 return strcmp((*sympa)->name, (*sympb)->name);
1129 static int compare_symbolp_labels(const void *a, const void *b)
1131 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1132 return strcmp((*sympa)->label, (*sympb)->label);
1135 static int symbolp_bsearch_compare(const void *key, const void *elt)
1137 const char *name = key;
1138 const struct ksplice_symbol *const *symp = elt;
1139 const struct ksplice_symbol *sym = *symp;
1140 if (sym->name == NULL)
1141 return 1;
1142 return strcmp(name, sym->name);
1145 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1146 const char *sym_name, unsigned long sym_val)
1148 struct ksplice_symbol **symp;
1149 abort_t ret;
1151 symp = bsearch(sym_name, lookup->arr, lookup->size,
1152 sizeof(*lookup->arr), symbolp_bsearch_compare);
1153 if (symp == NULL)
1154 return OK;
1156 while (symp > lookup->arr &&
1157 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1158 symp--;
1160 for (; symp < lookup->arr + lookup->size; symp++) {
1161 struct ksplice_symbol *sym = *symp;
1162 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1163 break;
1164 ret = add_candidate_val(lookup->change, sym->vals, sym_val);
1165 if (ret != OK)
1166 return ret;
1168 return OK;
1171 #ifdef CONFIG_KALLSYMS
1172 static int add_kallsyms_values(void *data, const char *name,
1173 struct module *owner, unsigned long val)
1175 struct ksplice_lookup *lookup = data;
1176 if (owner == lookup->change->new_code_mod ||
1177 !patches_module(owner, lookup->change->target))
1178 return (__force int)OK;
1179 return (__force int)add_matching_values(lookup, name, val);
1181 #endif /* CONFIG_KALLSYMS */
1183 static bool add_export_values(const struct symsearch *syms,
1184 struct module *owner,
1185 unsigned int symnum, void *data)
1187 struct ksplice_lookup *lookup = data;
1188 abort_t ret;
1190 ret = add_matching_values(lookup, syms->start[symnum].name,
1191 syms->start[symnum].value);
1192 if (ret != OK) {
1193 lookup->ret = ret;
1194 return true;
1196 return false;
1199 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1201 struct ksplice_symbol *sym;
1202 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1203 sym++) {
1204 if (sym->vals != NULL) {
1205 clear_list(sym->vals, struct candidate_val, list);
1206 kfree(sym->vals);
1207 sym->vals = NULL;
1210 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1211 sym++) {
1212 if (sym->vals != NULL) {
1213 clear_list(sym->vals, struct candidate_val, list);
1214 kfree(sym->vals);
1215 sym->vals = NULL;
1221 * The new_code and old_code modules each have their own independent
1222 * ksplice_symbol structures. uniquify_symbols unifies these separate
1223 * pieces of kernel symbol information by replacing all references to
1224 * the old_code copy of symbols with references to the new_code copy.
1226 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1228 struct ksplice_reloc *r;
1229 struct ksplice_section *s;
1230 struct ksplice_symbol *sym, **sym_arr, **symp;
1231 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1233 if (size == 0)
1234 return OK;
1236 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1237 if (sym_arr == NULL)
1238 return OUT_OF_MEMORY;
1240 for (symp = sym_arr, sym = change->new_code.symbols;
1241 symp < sym_arr + size && sym < change->new_code.symbols_end;
1242 sym++, symp++)
1243 *symp = sym;
1245 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1247 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1248 r++) {
1249 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1250 compare_symbolp_labels);
1251 if (symp != NULL) {
1252 if ((*symp)->name == NULL)
1253 (*symp)->name = r->symbol->name;
1254 r->symbol = *symp;
1258 for (s = change->old_code.sections; s < change->old_code.sections_end;
1259 s++) {
1260 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1261 compare_symbolp_labels);
1262 if (symp != NULL) {
1263 if ((*symp)->name == NULL)
1264 (*symp)->name = s->symbol->name;
1265 s->symbol = *symp;
1269 vfree(sym_arr);
1270 return OK;
1274 * Initialize the ksplice_symbol structures in the given array using
1275 * the kallsyms and exported symbol tables.
1277 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1278 struct ksplice_symbol *start,
1279 struct ksplice_symbol *end)
1281 struct ksplice_symbol *sym, **sym_arr, **symp;
1282 struct ksplice_lookup lookup;
1283 size_t size = end - start;
1284 abort_t ret;
1286 if (size == 0)
1287 return OK;
1289 for (sym = start; sym < end; sym++) {
1290 if (strstarts(sym->label, "__ksymtab")) {
1291 const struct kernel_symbol *ksym;
1292 const char *colon = strchr(sym->label, ':');
1293 const char *name = colon + 1;
1294 if (colon == NULL)
1295 continue;
1296 ksym = find_symbol(name, NULL, NULL, true, false);
1297 if (ksym == NULL) {
1298 ksdebug(change, "Could not find kernel_symbol "
1299 "structure for %s\n", name);
1300 continue;
1302 sym->value = (unsigned long)ksym;
1303 sym->vals = NULL;
1304 continue;
1307 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1308 if (sym->vals == NULL)
1309 return OUT_OF_MEMORY;
1310 INIT_LIST_HEAD(sym->vals);
1311 sym->value = 0;
1314 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1315 if (sym_arr == NULL)
1316 return OUT_OF_MEMORY;
1318 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1319 sym++, symp++)
1320 *symp = sym;
1322 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1324 lookup.change = change;
1325 lookup.arr = sym_arr;
1326 lookup.size = size;
1327 lookup.ret = OK;
1329 each_symbol(add_export_values, &lookup);
1330 ret = lookup.ret;
1331 #ifdef CONFIG_KALLSYMS
1332 if (ret == OK)
1333 ret = (__force abort_t)
1334 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1335 #endif /* CONFIG_KALLSYMS */
1336 vfree(sym_arr);
1337 return ret;
1340 /* Prepare the change's ksplice_symbol structures for run-pre matching */
1341 static abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1343 abort_t ret;
1345 ret = uniquify_symbols(change);
1346 if (ret != OK)
1347 return ret;
1349 ret = init_symbol_array(change, change->old_code.symbols,
1350 change->old_code.symbols_end);
1351 if (ret != OK)
1352 return ret;
1354 ret = init_symbol_array(change, change->new_code.symbols,
1355 change->new_code.symbols_end);
1356 if (ret != OK)
1357 return ret;
1359 return OK;
1362 static abort_t prepare_change(struct ksplice_mod_change *change)
1364 abort_t ret;
1366 ksdebug(change, "Preparing and checking %s\n", change->name);
1367 ret = match_change_sections(change, false);
1368 if (ret == NO_MATCH) {
1369 /* It is possible that by using relocations from .data sections
1370 * we can successfully run-pre match the rest of the sections.
1371 * To avoid using any symbols obtained from .data sections
1372 * (which may be unreliable) in the post code, we first prepare
1373 * the post code and then try to run-pre match the remaining
1374 * sections with the help of .data sections.
1376 ksdebug(change, "Continuing without some sections; we might "
1377 "find them later.\n");
1378 ret = finalize_change(change);
1379 if (ret != OK) {
1380 ksdebug(change, "Aborted. Unable to continue without "
1381 "the unmatched sections.\n");
1382 return ret;
1385 ksdebug(change, "run-pre: Considering .data sections to find "
1386 "the unmatched sections\n");
1387 ret = match_change_sections(change, true);
1388 if (ret != OK)
1389 return ret;
1391 ksdebug(change, "run-pre: Found all previously unmatched "
1392 "sections\n");
1393 return OK;
1394 } else if (ret != OK) {
1395 return ret;
1398 return finalize_change(change);
1402 * Finish preparing the change for insertion into the kernel.
1403 * Afterwards, the replacement code should be ready to run and the
1404 * ksplice_patches should all be ready for trampoline insertion.
1406 static abort_t finalize_change(struct ksplice_mod_change *change)
1408 abort_t ret;
1409 ret = apply_relocs(change, change->new_code.relocs,
1410 change->new_code.relocs_end);
1411 if (ret != OK)
1412 return ret;
1414 ret = finalize_patches(change);
1415 if (ret != OK)
1416 return ret;
1418 return OK;
1421 static abort_t finalize_patches(struct ksplice_mod_change *change)
1423 struct ksplice_patch *p;
1424 struct safety_record *rec;
1425 abort_t ret;
1427 for (p = change->patches; p < change->patches_end; p++) {
1428 bool found = false;
1429 list_for_each_entry(rec, &change->safety_records, list) {
1430 if (rec->addr <= p->oldaddr &&
1431 p->oldaddr < rec->addr + rec->size) {
1432 found = true;
1433 break;
1436 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1437 const struct ksplice_reloc *r = patch_reloc(change, p);
1438 if (r == NULL) {
1439 ksdebug(change, "A patch with no reloc at its "
1440 "oldaddr has no safety record\n");
1441 return NO_MATCH;
1443 ksdebug(change, "No safety record for patch with"
1444 "oldaddr %s+%lx\n", r->symbol->label,
1445 r->target_addend);
1446 return NO_MATCH;
1449 if (p->type == KSPLICE_PATCH_TEXT) {
1450 ret = prepare_trampoline(change, p);
1451 if (ret != OK)
1452 return ret;
1455 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1456 ksdebug(change, "Safety record %s is too short for "
1457 "patch\n", rec->label);
1458 return UNEXPECTED;
1461 if (p->type == KSPLICE_PATCH_TEXT) {
1462 if (p->repladdr == 0)
1463 p->repladdr = (unsigned long)ksplice_deleted;
1466 return OK;
1469 /* noinline to prevent garbage on the stack from confusing check_stack */
1470 static noinline abort_t map_trampoline_pages(struct update *update)
1472 struct ksplice_mod_change *change;
1473 list_for_each_entry(change, &update->changes, list) {
1474 struct ksplice_patch *p;
1475 for (p = change->patches; p < change->patches_end; p++) {
1476 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1477 if (p->vaddr == NULL) {
1478 ksdebug(change,
1479 "Unable to map oldaddr read/write\n");
1480 unmap_trampoline_pages(update);
1481 return UNEXPECTED;
1485 return OK;
1488 static void unmap_trampoline_pages(struct update *update)
1490 struct ksplice_mod_change *change;
1491 list_for_each_entry(change, &update->changes, list) {
1492 struct ksplice_patch *p;
1493 for (p = change->patches; p < change->patches_end; p++) {
1494 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1495 p->vaddr = NULL;
1501 * map_writable creates a shadow page mapping of the range
1502 * [addr, addr + len) so that we can write to code mapped read-only.
1504 * It is similar to a generalized version of x86's text_poke. But
1505 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1506 * map_writable to map the pages before stop_machine, then use the
1507 * mapping inside stop_machine, and unmap the pages afterwards.
1509 static void *map_writable(void *addr, size_t len)
1511 void *vaddr;
1512 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1513 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1514 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1515 int i;
1517 if (pages == NULL)
1518 return NULL;
1520 for (i = 0; i < nr_pages; i++) {
1521 if (__module_address((unsigned long)page_addr) == NULL) {
1522 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1523 pages[i] = virt_to_page(page_addr);
1524 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1525 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1526 pages[i] =
1527 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1528 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1529 WARN_ON(!PageReserved(pages[i]));
1530 } else {
1531 pages[i] = vmalloc_to_page(addr);
1533 if (pages[i] == NULL) {
1534 kfree(pages);
1535 return NULL;
1537 page_addr += PAGE_SIZE;
1539 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1540 kfree(pages);
1541 if (vaddr == NULL)
1542 return NULL;
1543 return vaddr + offset_in_page(addr);
1547 * Ksplice adds a dependency on any symbol address used to resolve
1548 * relocations in the new_code module.
1550 * Be careful to follow_trampolines so that we always depend on the
1551 * latest version of the target function, since that's the code that
1552 * will run if we call addr.
1554 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1555 unsigned long addr)
1557 struct ksplice_mod_change *c;
1558 struct module *m =
1559 __module_text_address(follow_trampolines(change, addr));
1560 if (m == NULL)
1561 return OK;
1562 list_for_each_entry(c, &change->update->changes, list) {
1563 if (m == c->new_code_mod)
1564 return OK;
1566 if (use_module(change->new_code_mod, m) != 1)
1567 return MODULE_BUSY;
1568 return OK;
1571 static abort_t apply_relocs(struct ksplice_mod_change *change,
1572 const struct ksplice_reloc *relocs,
1573 const struct ksplice_reloc *relocs_end)
1575 const struct ksplice_reloc *r;
1576 for (r = relocs; r < relocs_end; r++) {
1577 abort_t ret = apply_reloc(change, r);
1578 if (ret != OK)
1579 return ret;
1581 return OK;
1584 static abort_t apply_reloc(struct ksplice_mod_change *change,
1585 const struct ksplice_reloc *r)
1587 switch (r->howto->type) {
1588 case KSPLICE_HOWTO_RELOC:
1589 case KSPLICE_HOWTO_RELOC_PATCH:
1590 return apply_howto_reloc(change, r);
1591 case KSPLICE_HOWTO_DATE:
1592 case KSPLICE_HOWTO_TIME:
1593 return apply_howto_date(change, r);
1594 default:
1595 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1596 return UNEXPECTED;
1601 * Applies a relocation. Aborts if the symbol referenced in it has
1602 * not been uniquely resolved.
1604 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1605 const struct ksplice_reloc *r)
1607 abort_t ret;
1608 int canary_ret;
1609 unsigned long sym_addr;
1610 LIST_HEAD(vals);
1612 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1613 if (canary_ret < 0)
1614 return UNEXPECTED;
1615 if (canary_ret == 0) {
1616 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1617 r->blank_addr, r->symbol->label, r->target_addend);
1618 return OK;
1621 #ifdef KSPLICE_STANDALONE
1622 if (!bootstrapped) {
1623 ret = add_system_map_candidates(change,
1624 change->new_code.system_map,
1625 change->new_code.system_map_end,
1626 r->symbol->label, &vals);
1627 if (ret != OK) {
1628 release_vals(&vals);
1629 return ret;
1632 #endif /* KSPLICE_STANDALONE */
1633 ret = lookup_symbol(change, r->symbol, &vals);
1634 if (ret != OK) {
1635 release_vals(&vals);
1636 return ret;
1639 * Relocations for the oldaddr fields of patches must have
1640 * been resolved via run-pre matching.
1642 if (!singular(&vals) || (r->symbol->vals != NULL &&
1643 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1644 release_vals(&vals);
1645 ksdebug(change, "Failed to find %s for reloc\n",
1646 r->symbol->label);
1647 return FAILED_TO_FIND;
1649 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1650 release_vals(&vals);
1652 ret = write_reloc_value(change, r, r->blank_addr,
1653 r->howto->pcrel ? sym_addr - r->blank_addr :
1654 sym_addr);
1655 if (ret != OK)
1656 return ret;
1658 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1659 r->symbol->label, r->target_addend, sym_addr);
1660 switch (r->howto->size) {
1661 case 1:
1662 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1663 break;
1664 case 2:
1665 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1666 break;
1667 case 4:
1668 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1669 break;
1670 #if BITS_PER_LONG >= 64
1671 case 8:
1672 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1673 break;
1674 #endif /* BITS_PER_LONG */
1675 default:
1676 ksdebug(change, "Aborted. Invalid relocation size.\n");
1677 return UNEXPECTED;
1679 #ifdef KSPLICE_STANDALONE
1680 if (!bootstrapped)
1681 return OK;
1682 #endif /* KSPLICE_STANDALONE */
1685 * Create labelvals so that we can verify our choices in the
1686 * second round of run-pre matching that considers data sections.
1688 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1689 if (ret != OK)
1690 return ret;
1692 return add_dependency_on_address(change, sym_addr);
1696 * Date relocations are created wherever __DATE__ or __TIME__ is used
1697 * in the kernel; we resolve them by simply copying in the date/time
1698 * obtained from run-pre matching the relevant compilation unit.
1700 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1701 const struct ksplice_reloc *r)
1703 if (r->symbol->vals != NULL) {
1704 ksdebug(change, "Failed to find %s for date\n",
1705 r->symbol->label);
1706 return FAILED_TO_FIND;
1708 memcpy((unsigned char *)r->blank_addr,
1709 (const unsigned char *)r->symbol->value, r->howto->size);
1710 return OK;
1714 * Given a relocation and its run address, compute the address of the
1715 * symbol the relocation referenced, and store it in *valp.
1717 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1718 const struct ksplice_reloc *r,
1719 unsigned long addr, unsigned long *valp)
1721 unsigned char bytes[sizeof(long)];
1722 unsigned long val;
1723 const struct ksplice_reloc_howto *howto = r->howto;
1725 if (howto->size <= 0 || howto->size > sizeof(long)) {
1726 ksdebug(change, "Aborted. Invalid relocation size.\n");
1727 return UNEXPECTED;
1730 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1731 return NO_MATCH;
1733 switch (howto->size) {
1734 case 1:
1735 val = *(uint8_t *)bytes;
1736 break;
1737 case 2:
1738 val = *(uint16_t *)bytes;
1739 break;
1740 case 4:
1741 val = *(uint32_t *)bytes;
1742 break;
1743 #if BITS_PER_LONG >= 64
1744 case 8:
1745 val = *(uint64_t *)bytes;
1746 break;
1747 #endif /* BITS_PER_LONG */
1748 default:
1749 ksdebug(change, "Aborted. Invalid relocation size.\n");
1750 return UNEXPECTED;
1753 val &= howto->dst_mask;
1754 if (howto->signed_addend)
1755 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1756 val <<= howto->rightshift;
1757 val -= r->insn_addend + r->target_addend;
1758 *valp = val;
1759 return OK;
1763 * Given a relocation, the address of its storage unit, and the
1764 * address of the symbol the relocation references, write the
1765 * relocation's final value into the storage unit.
1767 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1768 const struct ksplice_reloc *r,
1769 unsigned long addr, unsigned long sym_addr)
1771 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1772 const struct ksplice_reloc_howto *howto = r->howto;
1773 val >>= howto->rightshift;
1774 switch (howto->size) {
1775 case 1:
1776 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1777 (val & howto->dst_mask);
1778 break;
1779 case 2:
1780 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1781 (val & howto->dst_mask);
1782 break;
1783 case 4:
1784 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1785 (val & howto->dst_mask);
1786 break;
1787 #if BITS_PER_LONG >= 64
1788 case 8:
1789 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1790 (val & howto->dst_mask);
1791 break;
1792 #endif /* BITS_PER_LONG */
1793 default:
1794 ksdebug(change, "Aborted. Invalid relocation size.\n");
1795 return UNEXPECTED;
1798 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1799 ksdebug(change, "Aborted. Relocation overflow.\n");
1800 return UNEXPECTED;
1803 return OK;
1806 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1807 bool to_be_applied)
1809 struct ksplice_module_list_entry *entry =
1810 kmalloc(sizeof(*entry), GFP_KERNEL);
1811 if (entry == NULL)
1812 return OUT_OF_MEMORY;
1813 entry->new_code_mod_name =
1814 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1815 if (entry->new_code_mod_name == NULL) {
1816 kfree(entry);
1817 return OUT_OF_MEMORY;
1819 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1820 if (entry->target_mod_name == NULL) {
1821 kfree(entry->new_code_mod_name);
1822 kfree(entry);
1823 return OUT_OF_MEMORY;
1825 /* The update's kid is guaranteed to outlast the module_list_entry */
1826 entry->kid = change->update->kid;
1827 entry->applied = to_be_applied;
1828 list_add(&entry->update_list, &change->update->ksplice_module_list);
1829 return OK;
1832 static void cleanup_module_list_entries(struct update *update)
1834 struct ksplice_module_list_entry *entry;
1835 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1836 kfree(entry->target_mod_name);
1837 kfree(entry->new_code_mod_name);
1839 clear_list(&update->ksplice_module_list,
1840 struct ksplice_module_list_entry, update_list);
1843 /* Replacement address used for functions deleted by the patch */
1844 static void __attribute__((noreturn)) ksplice_deleted(void)
1846 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1847 BUG();
1848 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1849 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1850 for (;;);
1851 #endif
1854 /* Floodfill to run-pre match the sections within a change. */
1855 static abort_t match_change_sections(struct ksplice_mod_change *change,
1856 bool consider_data_sections)
1858 struct ksplice_section *sect;
1859 abort_t ret;
1860 int remaining = 0;
1861 bool progress;
1863 for (sect = change->old_code.sections;
1864 sect < change->old_code.sections_end; sect++) {
1865 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1866 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1867 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1868 remaining++;
1871 while (remaining > 0) {
1872 progress = false;
1873 for (sect = change->old_code.sections;
1874 sect < change->old_code.sections_end; sect++) {
1875 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1876 continue;
1877 if ((!consider_data_sections &&
1878 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1879 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1880 continue;
1881 ret = find_section(change, sect);
1882 if (ret == OK) {
1883 sect->flags |= KSPLICE_SECTION_MATCHED;
1884 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1885 remaining--;
1886 progress = true;
1887 } else if (ret != NO_MATCH) {
1888 return ret;
1892 if (progress)
1893 continue;
1895 for (sect = change->old_code.sections;
1896 sect < change->old_code.sections_end; sect++) {
1897 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1898 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1899 continue;
1900 ksdebug(change, "run-pre: could not match %s "
1901 "section %s\n",
1902 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1903 "data" :
1904 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1905 "rodata" : "text", sect->symbol->label);
1907 ksdebug(change, "Aborted. run-pre: could not match some "
1908 "sections.\n");
1909 return NO_MATCH;
1911 return OK;
1915 * Search for the section in the running kernel. Returns OK if and
1916 * only if it finds precisely one address in the kernel matching the
1917 * section.
1919 static abort_t find_section(struct ksplice_mod_change *change,
1920 struct ksplice_section *sect)
1922 int i;
1923 abort_t ret;
1924 unsigned long run_addr;
1925 LIST_HEAD(vals);
1926 struct candidate_val *v, *n;
1928 #ifdef KSPLICE_STANDALONE
1929 ret = add_system_map_candidates(change, change->old_code.system_map,
1930 change->old_code.system_map_end,
1931 sect->symbol->label, &vals);
1932 if (ret != OK) {
1933 release_vals(&vals);
1934 return ret;
1936 #endif /* KSPLICE_STANDALONE */
1937 ret = lookup_symbol(change, sect->symbol, &vals);
1938 if (ret != OK) {
1939 release_vals(&vals);
1940 return ret;
1943 ksdebug(change, "run-pre: starting sect search for %s\n",
1944 sect->symbol->label);
1946 list_for_each_entry_safe(v, n, &vals, list) {
1947 run_addr = v->val;
1949 yield();
1950 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
1951 if (ret == NO_MATCH) {
1952 list_del(&v->list);
1953 kfree(v);
1954 } else if (ret != OK) {
1955 release_vals(&vals);
1956 return ret;
1960 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1961 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1962 ret = brute_search_all(change, sect, &vals);
1963 if (ret != OK) {
1964 release_vals(&vals);
1965 return ret;
1968 * Make sure run-pre matching output is displayed if
1969 * brute_search succeeds.
1971 if (singular(&vals)) {
1972 run_addr = list_entry(vals.next, struct candidate_val,
1973 list)->val;
1974 ret = try_addr(change, sect, run_addr, NULL,
1975 RUN_PRE_INITIAL);
1976 if (ret != OK) {
1977 ksdebug(change, "run-pre: Debug run failed for "
1978 "sect %s:\n", sect->symbol->label);
1979 release_vals(&vals);
1980 return ret;
1984 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1986 if (singular(&vals)) {
1987 LIST_HEAD(safety_records);
1988 run_addr = list_entry(vals.next, struct candidate_val,
1989 list)->val;
1990 ret = try_addr(change, sect, run_addr, &safety_records,
1991 RUN_PRE_FINAL);
1992 release_vals(&vals);
1993 if (ret != OK) {
1994 clear_list(&safety_records, struct safety_record, list);
1995 ksdebug(change, "run-pre: Final run failed for sect "
1996 "%s:\n", sect->symbol->label);
1997 } else {
1998 list_splice(&safety_records, &change->safety_records);
2000 return ret;
2001 } else if (!list_empty(&vals)) {
2002 struct candidate_val *val;
2003 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
2004 sect->symbol->label);
2005 i = 0;
2006 list_for_each_entry(val, &vals, list) {
2007 i++;
2008 ksdebug(change, "%lx\n", val->val);
2009 if (i > 5) {
2010 ksdebug(change, "...\n");
2011 break;
2014 release_vals(&vals);
2015 return NO_MATCH;
2017 release_vals(&vals);
2018 return NO_MATCH;
2022 * try_addr is the the interface to run-pre matching. Its primary
2023 * purpose is to manage debugging information for run-pre matching;
2024 * all the hard work is in run_pre_cmp.
2026 static abort_t try_addr(struct ksplice_mod_change *change,
2027 struct ksplice_section *sect,
2028 unsigned long run_addr,
2029 struct list_head *safety_records,
2030 enum run_pre_mode mode)
2032 abort_t ret;
2033 const struct module *run_module = __module_address(run_addr);
2035 if (run_module == change->new_code_mod) {
2036 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2037 "module %s for sect %s\n", run_addr, run_module->name,
2038 sect->symbol->label);
2039 return UNEXPECTED;
2041 if (!patches_module(run_module, change->target)) {
2042 ksdebug(change, "run-pre: ignoring address %lx in other module "
2043 "%s for sect %s\n", run_addr, run_module == NULL ?
2044 "vmlinux" : run_module->name, sect->symbol->label);
2045 return NO_MATCH;
2048 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2049 if (ret != OK)
2050 return ret;
2052 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2053 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2054 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2055 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2056 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2057 mode);
2058 else
2059 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2060 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2061 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2062 set_temp_labelvals(change, NOVAL);
2063 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2064 "p_a=%lx s=%lx)\n",
2065 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2066 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2067 "text", sect->symbol->label, run_addr, sect->address,
2068 sect->size);
2069 ksdebug(change, "run-pre: ");
2070 if (change->update->debug >= 1) {
2071 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2072 ret = run_pre_cmp(change, sect, run_addr,
2073 safety_records, RUN_PRE_DEBUG);
2074 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2075 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2076 ret = arch_run_pre_cmp(change, sect, run_addr,
2077 safety_records,
2078 RUN_PRE_DEBUG);
2079 else
2080 ret = run_pre_cmp(change, sect, run_addr,
2081 safety_records,
2082 RUN_PRE_DEBUG);
2083 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2084 set_temp_labelvals(change, NOVAL);
2086 ksdebug(change, "\n");
2087 return ret;
2088 } else if (ret != OK) {
2089 set_temp_labelvals(change, NOVAL);
2090 return ret;
2093 if (mode != RUN_PRE_FINAL) {
2094 set_temp_labelvals(change, NOVAL);
2095 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2096 sect->symbol->label, run_addr);
2097 return OK;
2100 set_temp_labelvals(change, VAL);
2101 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2102 run_addr);
2103 return OK;
2107 * run_pre_cmp is the primary run-pre matching function; it determines
2108 * whether the given ksplice_section matches the code or data in the
2109 * running kernel starting at run_addr.
2111 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2112 * section is created.
2114 * The run_pre_mode is also used to determine what debugging
2115 * information to display.
2117 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2118 const struct ksplice_section *sect,
2119 unsigned long run_addr,
2120 struct list_head *safety_records,
2121 enum run_pre_mode mode)
2123 int matched = 0;
2124 abort_t ret;
2125 const struct ksplice_reloc *r, *finger;
2126 const unsigned char *pre, *run, *pre_start, *run_start;
2127 unsigned char runval;
2129 pre_start = (const unsigned char *)sect->address;
2130 run_start = (const unsigned char *)run_addr;
2132 finger = init_reloc_search(change, sect);
2134 pre = pre_start;
2135 run = run_start;
2136 while (pre < pre_start + sect->size) {
2137 unsigned long offset = pre - pre_start;
2138 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2139 if (ret == OK) {
2140 ret = handle_reloc(change, sect, r, (unsigned long)run,
2141 mode);
2142 if (ret != OK) {
2143 if (mode == RUN_PRE_INITIAL)
2144 ksdebug(change, "reloc in sect does "
2145 "not match after %lx/%lx "
2146 "bytes\n", offset, sect->size);
2147 return ret;
2149 if (mode == RUN_PRE_DEBUG)
2150 print_bytes(change, run, r->howto->size, pre,
2151 r->howto->size);
2152 pre += r->howto->size;
2153 run += r->howto->size;
2154 finger++;
2155 continue;
2156 } else if (ret != NO_MATCH) {
2157 return ret;
2160 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2161 ret = handle_paravirt(change, (unsigned long)pre,
2162 (unsigned long)run, &matched);
2163 if (ret != OK)
2164 return ret;
2165 if (matched != 0) {
2166 if (mode == RUN_PRE_DEBUG)
2167 print_bytes(change, run, matched, pre,
2168 matched);
2169 pre += matched;
2170 run += matched;
2171 continue;
2175 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2176 if (mode == RUN_PRE_INITIAL)
2177 ksdebug(change, "sect unmapped after %lx/%lx "
2178 "bytes\n", offset, sect->size);
2179 return NO_MATCH;
2182 if (runval != *pre &&
2183 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2184 if (mode == RUN_PRE_INITIAL)
2185 ksdebug(change, "sect does not match after "
2186 "%lx/%lx bytes\n", offset, sect->size);
2187 if (mode == RUN_PRE_DEBUG) {
2188 print_bytes(change, run, 1, pre, 1);
2189 ksdebug(change, "[p_o=%lx] ! ", offset);
2190 print_bytes(change, run + 1, 2, pre + 1, 2);
2192 return NO_MATCH;
2194 if (mode == RUN_PRE_DEBUG)
2195 print_bytes(change, run, 1, pre, 1);
2196 pre++;
2197 run++;
2199 return create_safety_record(change, sect, safety_records, run_addr,
2200 run - run_start);
2203 static void print_bytes(struct ksplice_mod_change *change,
2204 const unsigned char *run, int runc,
2205 const unsigned char *pre, int prec)
2207 int o;
2208 int matched = min(runc, prec);
2209 for (o = 0; o < matched; o++) {
2210 if (run[o] == pre[o])
2211 ksdebug(change, "%02x ", run[o]);
2212 else
2213 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2215 for (o = matched; o < runc; o++)
2216 ksdebug(change, "%02x/ ", run[o]);
2217 for (o = matched; o < prec; o++)
2218 ksdebug(change, "/%02x ", pre[o]);
2221 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2222 static abort_t brute_search(struct ksplice_mod_change *change,
2223 struct ksplice_section *sect,
2224 const void *start, unsigned long len,
2225 struct list_head *vals)
2227 unsigned long addr;
2228 char run, pre;
2229 abort_t ret;
2231 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2232 addr++) {
2233 if (addr % 100000 == 0)
2234 yield();
2236 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2237 return OK;
2239 pre = *(const unsigned char *)(sect->address);
2241 if (run != pre)
2242 continue;
2244 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2245 if (ret == OK) {
2246 ret = add_candidate_val(change, vals, addr);
2247 if (ret != OK)
2248 return ret;
2249 } else if (ret != NO_MATCH) {
2250 return ret;
2254 return OK;
2257 static abort_t brute_search_all(struct ksplice_mod_change *change,
2258 struct ksplice_section *sect,
2259 struct list_head *vals)
2261 struct module *m;
2262 abort_t ret = OK;
2263 int saved_debug;
2265 ksdebug(change, "brute_search: searching for %s\n",
2266 sect->symbol->label);
2267 saved_debug = change->update->debug;
2268 change->update->debug = 0;
2270 list_for_each_entry(m, &modules, list) {
2271 if (!patches_module(m, change->target) ||
2272 m == change->new_code_mod)
2273 continue;
2274 ret = brute_search(change, sect, m->module_core, m->core_size,
2275 vals);
2276 if (ret != OK)
2277 goto out;
2278 ret = brute_search(change, sect, m->module_init, m->init_size,
2279 vals);
2280 if (ret != OK)
2281 goto out;
2284 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2285 init_mm.end_code - init_mm.start_code, vals);
2287 out:
2288 change->update->debug = saved_debug;
2289 return ret;
2291 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2293 struct range {
2294 unsigned long address;
2295 unsigned long size;
2298 static int reloc_bsearch_compare(const void *key, const void *elt)
2300 const struct range *range = key;
2301 const struct ksplice_reloc *r = elt;
2302 if (range->address + range->size <= r->blank_addr)
2303 return -1;
2304 if (range->address > r->blank_addr)
2305 return 1;
2306 return 0;
2309 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2310 const struct ksplice_reloc *end,
2311 unsigned long address,
2312 unsigned long size)
2314 const struct ksplice_reloc *r;
2315 struct range range = { address, size };
2316 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2317 reloc_bsearch_compare);
2318 if (r == NULL)
2319 return NULL;
2320 while (r > start && (r - 1)->blank_addr >= address)
2321 r--;
2322 return r;
2325 static const struct ksplice_reloc *
2326 init_reloc_search(struct ksplice_mod_change *change,
2327 const struct ksplice_section *sect)
2329 const struct ksplice_reloc *r;
2330 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2331 sect->address, sect->size);
2332 if (r == NULL)
2333 return change->old_code.relocs_end;
2334 return r;
2338 * lookup_reloc implements an amortized O(1) lookup for the next
2339 * old_code relocation. It must be called with a strictly increasing
2340 * sequence of addresses.
2342 * The fingerp is private data for lookup_reloc, and needs to have
2343 * been initialized as a pointer to the result of find_reloc (or
2344 * init_reloc_search).
2346 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2347 const struct ksplice_reloc **fingerp,
2348 unsigned long addr,
2349 const struct ksplice_reloc **relocp)
2351 const struct ksplice_reloc *r = *fingerp;
2352 int canary_ret;
2354 while (r < change->old_code.relocs_end &&
2355 addr >= r->blank_addr + r->howto->size &&
2356 !(addr == r->blank_addr && r->howto->size == 0))
2357 r++;
2358 *fingerp = r;
2359 if (r == change->old_code.relocs_end)
2360 return NO_MATCH;
2361 if (addr < r->blank_addr)
2362 return NO_MATCH;
2363 *relocp = r;
2364 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2365 return OK;
2367 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2368 if (canary_ret < 0)
2369 return UNEXPECTED;
2370 if (canary_ret == 0) {
2371 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2372 "(altinstr)\n", r->blank_addr, r->symbol->label,
2373 r->target_addend);
2374 return NO_MATCH;
2376 if (addr != r->blank_addr) {
2377 ksdebug(change, "Invalid nonzero relocation offset\n");
2378 return UNEXPECTED;
2380 return OK;
2383 static abort_t handle_reloc(struct ksplice_mod_change *change,
2384 const struct ksplice_section *sect,
2385 const struct ksplice_reloc *r,
2386 unsigned long run_addr, enum run_pre_mode mode)
2388 switch (r->howto->type) {
2389 case KSPLICE_HOWTO_RELOC:
2390 return handle_howto_reloc(change, sect, r, run_addr, mode);
2391 case KSPLICE_HOWTO_DATE:
2392 case KSPLICE_HOWTO_TIME:
2393 return handle_howto_date(change, sect, r, run_addr, mode);
2394 case KSPLICE_HOWTO_BUG:
2395 return handle_bug(change, r, run_addr);
2396 case KSPLICE_HOWTO_EXTABLE:
2397 return handle_extable(change, r, run_addr);
2398 default:
2399 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2400 return UNEXPECTED;
2405 * For date/time relocations, we check that the sequence of bytes
2406 * matches the format of a date or time.
2408 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2409 const struct ksplice_section *sect,
2410 const struct ksplice_reloc *r,
2411 unsigned long run_addr, enum run_pre_mode mode)
2413 abort_t ret;
2414 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2416 if (buf == NULL)
2417 return OUT_OF_MEMORY;
2418 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2419 ret = NO_MATCH;
2420 goto out;
2423 switch (r->howto->type) {
2424 case KSPLICE_HOWTO_TIME:
2425 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2426 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2427 isdigit(buf[6]) && isdigit(buf[7]))
2428 ret = OK;
2429 else
2430 ret = NO_MATCH;
2431 break;
2432 case KSPLICE_HOWTO_DATE:
2433 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2434 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2435 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2436 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2437 ret = OK;
2438 else
2439 ret = NO_MATCH;
2440 break;
2441 default:
2442 ret = UNEXPECTED;
2444 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2445 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2446 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2447 r->howto->size, buf);
2449 if (ret != OK)
2450 goto out;
2451 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2452 out:
2453 kfree(buf);
2454 return ret;
2458 * Extract the value of a symbol used in a relocation in the pre code
2459 * during run-pre matching, giving an error if it conflicts with a
2460 * previously found value of that symbol
2462 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2463 const struct ksplice_section *sect,
2464 const struct ksplice_reloc *r,
2465 unsigned long run_addr,
2466 enum run_pre_mode mode)
2468 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2469 unsigned long offset = r->target_addend;
2470 unsigned long val;
2471 abort_t ret;
2473 ret = read_reloc_value(change, r, run_addr, &val);
2474 if (ret != OK)
2475 return ret;
2476 if (r->howto->pcrel)
2477 val += run_addr;
2479 #ifdef KSPLICE_STANDALONE
2480 /* The match_map is only used in KSPLICE_STANDALONE */
2481 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2483 } else if (offset < 0 || offset >= sym_sect->size) {
2484 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2485 sect->symbol->label, r->blank_addr - sect->address,
2486 r->symbol->label, offset);
2487 return NO_MATCH;
2488 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2489 sym_sect->match_map[offset] =
2490 (const unsigned char *)r->symbol->value + offset;
2491 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2492 r->symbol->value + offset) {
2494 } else if (sect == sym_sect) {
2495 ksdebug(change, "Relocations to nonmatching locations within "
2496 "section %s: %lx does not match %lx\n",
2497 sect->symbol->label, offset,
2498 (unsigned long)sect->match_map[offset] -
2499 r->symbol->value);
2500 return NO_MATCH;
2501 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2502 if (mode == RUN_PRE_INITIAL)
2503 ksdebug(change, "Delaying matching of %s due to reloc "
2504 "from to unmatching section: %s+%lx\n",
2505 sect->symbol->label, r->symbol->label, offset);
2506 return NO_MATCH;
2507 } else if (sym_sect->match_map[offset] == NULL) {
2508 if (mode == RUN_PRE_INITIAL)
2509 ksdebug(change, "Relocation not to instruction "
2510 "boundary: %s+%lx -> %s+%lx",
2511 sect->symbol->label, r->blank_addr -
2512 sect->address, r->symbol->label, offset);
2513 return NO_MATCH;
2514 } else if ((unsigned long)sym_sect->match_map[offset] !=
2515 r->symbol->value + offset) {
2516 if (mode == RUN_PRE_INITIAL)
2517 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2518 r->symbol->label, offset,
2519 r->symbol->value + offset,
2520 (unsigned long)sym_sect->match_map[offset]);
2521 val += r->symbol->value + offset -
2522 (unsigned long)sym_sect->match_map[offset];
2524 #endif /* KSPLICE_STANDALONE */
2526 if (mode == RUN_PRE_INITIAL)
2527 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2528 "found %s = %lx\n", run_addr, r->blank_addr,
2529 r->symbol->label, offset, r->symbol->label, val);
2531 if (contains_canary(change, run_addr, r->howto) != 0) {
2532 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2533 "\n", run_addr);
2534 return UNEXPECTED;
2537 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2538 sect->symbol == r->symbol)
2539 return OK;
2540 ret = create_labelval(change, r->symbol, val, TEMP);
2541 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2542 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2543 "%s = %lx does not match expected %lx\n", run_addr,
2544 r->blank_addr, r->symbol->label, r->symbol->value, val);
2546 if (ret != OK)
2547 return ret;
2548 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2549 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2550 if (mode == RUN_PRE_INITIAL)
2551 ksdebug(change, "Recursively comparing string section "
2552 "%s\n", sym_sect->symbol->label);
2553 else if (mode == RUN_PRE_DEBUG)
2554 ksdebug(change, "[str start] ");
2555 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2556 if (mode == RUN_PRE_DEBUG)
2557 ksdebug(change, "[str end] ");
2558 if (ret == OK && mode == RUN_PRE_INITIAL)
2559 ksdebug(change, "Successfully matched string section %s"
2560 "\n", sym_sect->symbol->label);
2561 else if (mode == RUN_PRE_INITIAL)
2562 ksdebug(change, "Failed to match string section %s\n",
2563 sym_sect->symbol->label);
2565 return ret;
2568 static int symbol_section_bsearch_compare(const void *a, const void *b)
2570 const struct ksplice_symbol *sym = a;
2571 const struct ksplice_section *sect = b;
2572 return strcmp(sym->label, sect->symbol->label);
2575 static int compare_section_labels(const void *va, const void *vb)
2577 const struct ksplice_section *a = va, *b = vb;
2578 return strcmp(a->symbol->label, b->symbol->label);
2581 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2582 const struct ksplice_symbol *sym)
2584 return bsearch(sym, change->old_code.sections,
2585 change->old_code.sections_end -
2586 change->old_code.sections,
2587 sizeof(struct ksplice_section),
2588 symbol_section_bsearch_compare);
2591 /* Find the relocation for the oldaddr of a ksplice_patch */
2592 static const struct ksplice_reloc *
2593 patch_reloc(struct ksplice_mod_change *change,
2594 const struct ksplice_patch *p)
2596 unsigned long addr = (unsigned long)&p->oldaddr;
2597 const struct ksplice_reloc *r =
2598 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2599 addr, sizeof(addr));
2600 if (r == NULL || r->blank_addr < addr ||
2601 r->blank_addr >= addr + sizeof(addr))
2602 return NULL;
2603 return r;
2607 * Populates vals with the possible values for ksym from the various
2608 * sources Ksplice uses to resolve symbols
2610 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2611 const struct ksplice_symbol *ksym,
2612 struct list_head *vals)
2614 abort_t ret;
2616 #ifdef KSPLICE_STANDALONE
2617 if (!bootstrapped)
2618 return OK;
2619 #endif /* KSPLICE_STANDALONE */
2621 if (ksym->vals == NULL) {
2622 release_vals(vals);
2623 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2624 ksym->value);
2625 return add_candidate_val(change, vals, ksym->value);
2628 #ifdef CONFIG_MODULE_UNLOAD
2629 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2630 && change->target->exit != NULL) {
2631 ret = add_candidate_val(change, vals,
2632 (unsigned long)change->target->exit);
2633 if (ret != OK)
2634 return ret;
2636 #endif
2638 if (ksym->name != NULL) {
2639 struct candidate_val *val;
2640 list_for_each_entry(val, ksym->vals, list) {
2641 ret = add_candidate_val(change, vals, val->val);
2642 if (ret != OK)
2643 return ret;
2646 ret = new_export_lookup(change, ksym->name, vals);
2647 if (ret != OK)
2648 return ret;
2651 return OK;
2654 #ifdef KSPLICE_STANDALONE
2655 static abort_t
2656 add_system_map_candidates(struct ksplice_mod_change *change,
2657 const struct ksplice_system_map *start,
2658 const struct ksplice_system_map *end,
2659 const char *label, struct list_head *vals)
2661 abort_t ret;
2662 long off;
2663 int i;
2664 const struct ksplice_system_map *smap;
2666 /* Some Fedora kernel releases have System.map files whose symbol
2667 * addresses disagree with the running kernel by a constant address
2668 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2669 * values used to compile these kernels. This constant address offset
2670 * is always a multiple of 0x100000.
2672 * If we observe an offset that is NOT a multiple of 0x100000, then the
2673 * user provided us with an incorrect System.map file, and we should
2674 * abort.
2675 * If we observe an offset that is a multiple of 0x100000, then we can
2676 * adjust the System.map address values accordingly and proceed.
2678 off = (unsigned long)printk - change->map_printk;
2679 if (off & 0xfffff) {
2680 ksdebug(change,
2681 "Aborted. System.map does not match kernel.\n");
2682 return BAD_SYSTEM_MAP;
2685 smap = bsearch(label, start, end - start, sizeof(*smap),
2686 system_map_bsearch_compare);
2687 if (smap == NULL)
2688 return OK;
2690 for (i = 0; i < smap->nr_candidates; i++) {
2691 ret = add_candidate_val(change, vals,
2692 smap->candidates[i] + off);
2693 if (ret != OK)
2694 return ret;
2696 return OK;
2699 static int system_map_bsearch_compare(const void *key, const void *elt)
2701 const struct ksplice_system_map *map = elt;
2702 const char *label = key;
2703 return strcmp(label, map->label);
2705 #endif /* !KSPLICE_STANDALONE */
2708 * An update could one module to export a symbol and at the same time
2709 * change another module to use that symbol. This violates the normal
2710 * situation where the changes can be handled independently.
2712 * new_export_lookup obtains symbol values from the changes to the
2713 * exported symbol table made by other changes.
2715 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2716 const char *name, struct list_head *vals)
2718 struct ksplice_mod_change *change;
2719 struct ksplice_patch *p;
2720 list_for_each_entry(change, &ichange->update->changes, list) {
2721 for (p = change->patches; p < change->patches_end; p++) {
2722 const struct kernel_symbol *sym;
2723 const struct ksplice_reloc *r;
2724 if (p->type != KSPLICE_PATCH_EXPORT ||
2725 strcmp(name, *(const char **)p->contents) != 0)
2726 continue;
2728 /* Check that the p->oldaddr reloc has been resolved. */
2729 r = patch_reloc(change, p);
2730 if (r == NULL ||
2731 contains_canary(change, r->blank_addr,
2732 r->howto) != 0)
2733 continue;
2734 sym = (const struct kernel_symbol *)r->symbol->value;
2737 * Check that the sym->value reloc has been resolved,
2738 * if there is a Ksplice relocation there.
2740 r = find_reloc(change->new_code.relocs,
2741 change->new_code.relocs_end,
2742 (unsigned long)&sym->value,
2743 sizeof(&sym->value));
2744 if (r != NULL &&
2745 r->blank_addr == (unsigned long)&sym->value &&
2746 contains_canary(change, r->blank_addr,
2747 r->howto) != 0)
2748 continue;
2749 return add_candidate_val(ichange, vals, sym->value);
2752 return OK;
2756 * When patch_action is called, the update should be fully prepared.
2757 * patch_action will try to actually insert or remove trampolines for
2758 * the update.
2760 static abort_t patch_action(struct update *update, enum ksplice_action action)
2762 static int (*const __patch_actions[KS_ACTIONS])(void *) = {
2763 [KS_APPLY] = __apply_patches,
2764 [KS_REVERSE] = __reverse_patches,
2766 int i;
2767 abort_t ret;
2768 struct ksplice_mod_change *change;
2770 ret = map_trampoline_pages(update);
2771 if (ret != OK)
2772 return ret;
2774 list_for_each_entry(change, &update->changes, list) {
2775 const typeof(int (*)(void)) *f;
2776 for (f = change->hooks[action].pre;
2777 f < change->hooks[action].pre_end; f++) {
2778 if ((*f)() != 0) {
2779 ret = CALL_FAILED;
2780 goto out;
2785 for (i = 0; i < 5; i++) {
2786 cleanup_conflicts(update);
2787 #ifdef KSPLICE_STANDALONE
2788 bust_spinlocks(1);
2789 #endif /* KSPLICE_STANDALONE */
2790 ret = (__force abort_t)stop_machine(__patch_actions[action],
2791 update, NULL);
2792 #ifdef KSPLICE_STANDALONE
2793 bust_spinlocks(0);
2794 #endif /* KSPLICE_STANDALONE */
2795 if (ret != CODE_BUSY)
2796 break;
2797 set_current_state(TASK_INTERRUPTIBLE);
2798 schedule_timeout(msecs_to_jiffies(1000));
2800 out:
2801 unmap_trampoline_pages(update);
2803 if (ret == CODE_BUSY) {
2804 print_conflicts(update);
2805 _ksdebug(update, "Aborted %s. stack check: to-be-%s "
2806 "code is busy.\n", update->kid,
2807 action == KS_APPLY ? "replaced" : "reversed");
2808 } else if (ret == ALREADY_REVERSED) {
2809 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2810 "reversed.\n", update->kid, update->kid);
2811 } else if (ret == MODULE_BUSY) {
2812 _ksdebug(update, "Update %s is in use by another module\n",
2813 update->kid);
2816 if (ret != OK) {
2817 list_for_each_entry(change, &update->changes, list) {
2818 const typeof(void (*)(void)) *f;
2819 for (f = change->hooks[action].fail;
2820 f < change->hooks[action].fail_end; f++)
2821 (*f)();
2824 return ret;
2827 list_for_each_entry(change, &update->changes, list) {
2828 const typeof(void (*)(void)) *f;
2829 for (f = change->hooks[action].post;
2830 f < change->hooks[action].post_end; f++)
2831 (*f)();
2834 _ksdebug(update, "Atomic patch %s for %s complete\n",
2835 action == KS_APPLY ? "insertion" : "removal", update->kid);
2836 return OK;
2839 /* Atomically insert the update; run from within stop_machine */
2840 static int __apply_patches(void *updateptr)
2842 struct update *update = updateptr;
2843 struct ksplice_mod_change *change;
2844 struct ksplice_module_list_entry *entry;
2845 struct ksplice_patch *p;
2846 abort_t ret;
2848 if (update->stage == STAGE_APPLIED)
2849 return (__force int)OK;
2851 if (update->stage != STAGE_PREPARING)
2852 return (__force int)UNEXPECTED;
2854 ret = check_each_task(update);
2855 if (ret != OK)
2856 return (__force int)ret;
2858 list_for_each_entry(change, &update->changes, list) {
2859 if (try_module_get(change->new_code_mod) != 1) {
2860 struct ksplice_mod_change *change1;
2861 list_for_each_entry(change1, &update->changes, list) {
2862 if (change1 == change)
2863 break;
2864 module_put(change1->new_code_mod);
2866 module_put(THIS_MODULE);
2867 return (__force int)UNEXPECTED;
2871 list_for_each_entry(change, &update->changes, list) {
2872 const typeof(int (*)(void)) *f;
2873 for (f = change->hooks[KS_APPLY].check;
2874 f < change->hooks[KS_APPLY].check_end; f++) {
2875 if ((*f)() != 0)
2876 return (__force int)CALL_FAILED;
2880 /* Commit point: the update application will succeed. */
2882 update->stage = STAGE_APPLIED;
2883 #ifdef TAINT_KSPLICE
2884 add_taint(TAINT_KSPLICE);
2885 #endif
2887 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2888 list_add(&entry->list, &ksplice_modules);
2890 list_for_each_entry(change, &update->changes, list) {
2891 for (p = change->patches; p < change->patches_end; p++)
2892 insert_trampoline(p);
2895 list_for_each_entry(change, &update->changes, list) {
2896 const typeof(void (*)(void)) *f;
2897 for (f = change->hooks[KS_APPLY].intra;
2898 f < change->hooks[KS_APPLY].intra_end; f++)
2899 (*f)();
2902 return (__force int)OK;
2905 /* Atomically remove the update; run from within stop_machine */
2906 static int __reverse_patches(void *updateptr)
2908 struct update *update = updateptr;
2909 struct ksplice_mod_change *change;
2910 struct ksplice_module_list_entry *entry;
2911 const struct ksplice_patch *p;
2912 abort_t ret;
2914 if (update->stage != STAGE_APPLIED)
2915 return (__force int)OK;
2917 #ifdef CONFIG_MODULE_UNLOAD
2918 list_for_each_entry(change, &update->changes, list) {
2919 if (module_refcount(change->new_code_mod) != 1)
2920 return (__force int)MODULE_BUSY;
2922 #endif /* CONFIG_MODULE_UNLOAD */
2924 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2925 if (!entry->applied &&
2926 find_module(entry->target_mod_name) != NULL)
2927 return COLD_UPDATE_LOADED;
2930 ret = check_each_task(update);
2931 if (ret != OK)
2932 return (__force int)ret;
2934 list_for_each_entry(change, &update->changes, list) {
2935 for (p = change->patches; p < change->patches_end; p++) {
2936 ret = verify_trampoline(change, p);
2937 if (ret != OK)
2938 return (__force int)ret;
2942 list_for_each_entry(change, &update->changes, list) {
2943 const typeof(int (*)(void)) *f;
2944 for (f = change->hooks[KS_REVERSE].check;
2945 f < change->hooks[KS_REVERSE].check_end; f++) {
2946 if ((*f)() != 0)
2947 return (__force int)CALL_FAILED;
2951 /* Commit point: the update reversal will succeed. */
2953 update->stage = STAGE_REVERSED;
2955 list_for_each_entry(change, &update->changes, list)
2956 module_put(change->new_code_mod);
2958 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2959 list_del(&entry->list);
2961 list_for_each_entry(change, &update->changes, list) {
2962 const typeof(void (*)(void)) *f;
2963 for (f = change->hooks[KS_REVERSE].intra;
2964 f < change->hooks[KS_REVERSE].intra_end; f++)
2965 (*f)();
2968 list_for_each_entry(change, &update->changes, list) {
2969 for (p = change->patches; p < change->patches_end; p++)
2970 remove_trampoline(p);
2973 return (__force int)OK;
2977 * Check whether any thread's instruction pointer or any address of
2978 * its stack is contained in one of the safety_records associated with
2979 * the update.
2981 * check_each_task must be called from inside stop_machine, because it
2982 * does not take tasklist_lock (which cannot be held by anyone else
2983 * during stop_machine).
2985 static abort_t check_each_task(struct update *update)
2987 const struct task_struct *g, *p;
2988 abort_t status = OK, ret;
2989 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2990 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2991 read_lock(&tasklist_lock);
2992 #endif /* LINUX_VERSION_CODE */
2993 do_each_thread(g, p) {
2994 /* do_each_thread is a double loop! */
2995 ret = check_task(update, p, false);
2996 if (ret != OK) {
2997 check_task(update, p, true);
2998 status = ret;
3000 if (ret != OK && ret != CODE_BUSY)
3001 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3002 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3003 goto out;
3004 #else /* LINUX_VERSION_CODE < */
3005 return ret;
3006 #endif /* LINUX_VERSION_CODE */
3007 } while_each_thread(g, p);
3008 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3009 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3010 out:
3011 read_unlock(&tasklist_lock);
3012 #endif /* LINUX_VERSION_CODE */
3013 return status;
3016 static abort_t check_task(struct update *update,
3017 const struct task_struct *t, bool rerun)
3019 abort_t status, ret;
3020 struct conflict *conf = NULL;
3022 if (rerun) {
3023 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3024 if (conf == NULL)
3025 return OUT_OF_MEMORY;
3026 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3027 if (conf->process_name == NULL) {
3028 kfree(conf);
3029 return OUT_OF_MEMORY;
3031 conf->pid = t->pid;
3032 INIT_LIST_HEAD(&conf->stack);
3033 list_add(&conf->list, &update->conflicts);
3036 status = check_address(update, conf, KSPLICE_IP(t));
3037 if (t == current) {
3038 ret = check_stack(update, conf, task_thread_info(t),
3039 (unsigned long *)__builtin_frame_address(0));
3040 if (status == OK)
3041 status = ret;
3042 } else if (!task_curr(t)) {
3043 ret = check_stack(update, conf, task_thread_info(t),
3044 (unsigned long *)KSPLICE_SP(t));
3045 if (status == OK)
3046 status = ret;
3047 } else if (!is_stop_machine(t)) {
3048 status = UNEXPECTED_RUNNING_TASK;
3050 return status;
3053 static abort_t check_stack(struct update *update, struct conflict *conf,
3054 const struct thread_info *tinfo,
3055 const unsigned long *stack)
3057 abort_t status = OK, ret;
3058 unsigned long addr;
3060 while (valid_stack_ptr(tinfo, stack)) {
3061 addr = *stack++;
3062 ret = check_address(update, conf, addr);
3063 if (ret != OK)
3064 status = ret;
3066 return status;
3069 static abort_t check_address(struct update *update,
3070 struct conflict *conf, unsigned long addr)
3072 abort_t status = OK, ret;
3073 const struct safety_record *rec;
3074 struct ksplice_mod_change *change;
3075 struct conflict_addr *ca = NULL;
3077 if (conf != NULL) {
3078 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3079 if (ca == NULL)
3080 return OUT_OF_MEMORY;
3081 ca->addr = addr;
3082 ca->has_conflict = false;
3083 ca->label = NULL;
3084 list_add(&ca->list, &conf->stack);
3087 list_for_each_entry(change, &update->changes, list) {
3088 unsigned long tramp_addr = follow_trampolines(change, addr);
3089 list_for_each_entry(rec, &change->safety_records, list) {
3090 ret = check_record(ca, rec, tramp_addr);
3091 if (ret != OK)
3092 status = ret;
3095 return status;
3098 static abort_t check_record(struct conflict_addr *ca,
3099 const struct safety_record *rec, unsigned long addr)
3101 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3102 if (ca != NULL) {
3103 ca->label = rec->label;
3104 ca->has_conflict = true;
3106 return CODE_BUSY;
3108 return OK;
3111 /* Is the task one of the stop_machine tasks? */
3112 static bool is_stop_machine(const struct task_struct *t)
3114 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3115 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3116 const char *kstop_prefix = "kstop/";
3117 #else /* LINUX_VERSION_CODE < */
3118 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3119 const char *kstop_prefix = "kstop";
3120 #endif /* LINUX_VERSION_CODE */
3121 const char *num;
3122 if (!strstarts(t->comm, kstop_prefix))
3123 return false;
3124 num = t->comm + strlen(kstop_prefix);
3125 return num[strspn(num, "0123456789")] == '\0';
3126 #else /* LINUX_VERSION_CODE < */
3127 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3128 return strcmp(t->comm, "kstopmachine") == 0;
3129 #endif /* LINUX_VERSION_CODE */
3132 static void cleanup_conflicts(struct update *update)
3134 struct conflict *conf;
3135 list_for_each_entry(conf, &update->conflicts, list) {
3136 clear_list(&conf->stack, struct conflict_addr, list);
3137 kfree(conf->process_name);
3139 clear_list(&update->conflicts, struct conflict, list);
3142 static void print_conflicts(struct update *update)
3144 const struct conflict *conf;
3145 const struct conflict_addr *ca;
3146 list_for_each_entry(conf, &update->conflicts, list) {
3147 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3148 conf->process_name);
3149 list_for_each_entry(ca, &conf->stack, list) {
3150 _ksdebug(update, " %lx", ca->addr);
3151 if (ca->has_conflict)
3152 _ksdebug(update, " [<-CONFLICT]");
3154 _ksdebug(update, "\n");
3158 static void insert_trampoline(struct ksplice_patch *p)
3160 mm_segment_t old_fs = get_fs();
3161 set_fs(KERNEL_DS);
3162 memcpy(p->saved, p->vaddr, p->size);
3163 memcpy(p->vaddr, p->contents, p->size);
3164 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3165 set_fs(old_fs);
3168 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3169 const struct ksplice_patch *p)
3171 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3172 ksdebug(change, "Aborted. Trampoline at %lx has been "
3173 "overwritten.\n", p->oldaddr);
3174 return CODE_BUSY;
3176 return OK;
3179 static void remove_trampoline(const struct ksplice_patch *p)
3181 mm_segment_t old_fs = get_fs();
3182 set_fs(KERNEL_DS);
3183 memcpy(p->vaddr, p->saved, p->size);
3184 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3185 set_fs(old_fs);
3188 /* Returns NO_MATCH if there's already a labelval with a different value */
3189 static abort_t create_labelval(struct ksplice_mod_change *change,
3190 struct ksplice_symbol *ksym,
3191 unsigned long val, int status)
3193 val = follow_trampolines(change, val);
3194 if (ksym->vals == NULL)
3195 return ksym->value == val ? OK : NO_MATCH;
3197 ksym->value = val;
3198 if (status == TEMP) {
3199 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3200 if (lv == NULL)
3201 return OUT_OF_MEMORY;
3202 lv->symbol = ksym;
3203 lv->saved_vals = ksym->vals;
3204 list_add(&lv->list, &change->temp_labelvals);
3206 ksym->vals = NULL;
3207 return OK;
3211 * Creates a new safety_record for a old_code section based on its
3212 * ksplice_section and run-pre matching information.
3214 static abort_t create_safety_record(struct ksplice_mod_change *change,
3215 const struct ksplice_section *sect,
3216 struct list_head *record_list,
3217 unsigned long run_addr,
3218 unsigned long run_size)
3220 struct safety_record *rec;
3221 struct ksplice_patch *p;
3223 if (record_list == NULL)
3224 return OK;
3226 for (p = change->patches; p < change->patches_end; p++) {
3227 const struct ksplice_reloc *r = patch_reloc(change, p);
3228 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3229 break;
3231 if (p >= change->patches_end)
3232 return OK;
3234 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3235 if (rec == NULL)
3236 return OUT_OF_MEMORY;
3238 * The old_code might be unloaded when checking reversing
3239 * patches, so we need to kstrdup the label here.
3241 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3242 if (rec->label == NULL) {
3243 kfree(rec);
3244 return OUT_OF_MEMORY;
3246 rec->addr = run_addr;
3247 rec->size = run_size;
3249 list_add(&rec->list, record_list);
3250 return OK;
3253 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3254 struct list_head *vals, unsigned long val)
3256 struct candidate_val *tmp, *new;
3259 * Careful: follow trampolines before comparing values so that we do
3260 * not mistake the obsolete function for another copy of the function.
3262 val = follow_trampolines(change, val);
3264 list_for_each_entry(tmp, vals, list) {
3265 if (tmp->val == val)
3266 return OK;
3268 new = kmalloc(sizeof(*new), GFP_KERNEL);
3269 if (new == NULL)
3270 return OUT_OF_MEMORY;
3271 new->val = val;
3272 list_add(&new->list, vals);
3273 return OK;
3276 static void release_vals(struct list_head *vals)
3278 clear_list(vals, struct candidate_val, list);
3282 * The temp_labelvals list is used to cache those temporary labelvals
3283 * that have been created to cross-check the symbol values obtained
3284 * from different relocations within a single section being matched.
3286 * If status is VAL, commit the temp_labelvals as final values.
3288 * If status is NOVAL, restore the list of possible values to the
3289 * ksplice_symbol, so that it no longer has a known value.
3291 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3293 struct labelval *lv, *n;
3294 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3295 if (status == NOVAL) {
3296 lv->symbol->vals = lv->saved_vals;
3297 } else {
3298 release_vals(lv->saved_vals);
3299 kfree(lv->saved_vals);
3301 list_del(&lv->list);
3302 kfree(lv);
3306 /* Is there a Ksplice canary with given howto at blank_addr? */
3307 static int contains_canary(struct ksplice_mod_change *change,
3308 unsigned long blank_addr,
3309 const struct ksplice_reloc_howto *howto)
3311 switch (howto->size) {
3312 case 1:
3313 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3314 (KSPLICE_CANARY & howto->dst_mask);
3315 case 2:
3316 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3317 (KSPLICE_CANARY & howto->dst_mask);
3318 case 4:
3319 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3320 (KSPLICE_CANARY & howto->dst_mask);
3321 #if BITS_PER_LONG >= 64
3322 case 8:
3323 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3324 (KSPLICE_CANARY & howto->dst_mask);
3325 #endif /* BITS_PER_LONG */
3326 default:
3327 ksdebug(change, "Aborted. Invalid relocation size.\n");
3328 return -1;
3333 * Compute the address of the code you would actually run if you were
3334 * to call the function at addr (i.e., follow the sequence of jumps
3335 * starting at addr)
3337 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3338 unsigned long addr)
3340 unsigned long new_addr;
3341 struct module *m;
3343 while (1) {
3344 #ifdef KSPLICE_STANDALONE
3345 if (!bootstrapped)
3346 return addr;
3347 #endif /* KSPLICE_STANDALONE */
3348 if (!__kernel_text_address(addr) ||
3349 trampoline_target(change, addr, &new_addr) != OK)
3350 return addr;
3351 m = __module_text_address(new_addr);
3352 if (m == NULL || m == change->target ||
3353 !strstarts(m->name, "ksplice"))
3354 return addr;
3355 addr = new_addr;
3359 /* Does module a patch module b? */
3360 static bool patches_module(const struct module *a, const struct module *b)
3362 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3363 const char *name;
3364 const char *modname = b == NULL ? "vmlinux" : b->name;
3365 if (a == b)
3366 return true;
3367 if (a == NULL || !strstarts(a->name, "ksplice_"))
3368 return false;
3369 name = a->name + strlen("ksplice_");
3370 name += strcspn(name, "_");
3371 if (name[0] != '_')
3372 return false;
3373 name++;
3374 return strstarts(name, modname) &&
3375 strcmp(name + strlen(modname), "_new") == 0;
3376 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3377 struct ksplice_module_list_entry *entry;
3378 if (a == b)
3379 return true;
3380 list_for_each_entry(entry, &ksplice_modules, list) {
3381 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3382 strcmp(entry->new_code_mod_name, a->name) == 0)
3383 return true;
3385 return false;
3386 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3389 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3390 static bool strstarts(const char *str, const char *prefix)
3392 return strncmp(str, prefix, strlen(prefix)) == 0;
3394 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3396 static bool singular(struct list_head *list)
3398 return !list_empty(list) && list->next->next == list;
3401 static void *bsearch(const void *key, const void *base, size_t n,
3402 size_t size, int (*cmp)(const void *key, const void *elt))
3404 int start = 0, end = n - 1, mid, result;
3405 if (n == 0)
3406 return NULL;
3407 while (start <= end) {
3408 mid = (start + end) / 2;
3409 result = cmp(key, base + mid * size);
3410 if (result < 0)
3411 end = mid - 1;
3412 else if (result > 0)
3413 start = mid + 1;
3414 else
3415 return (void *)base + mid * size;
3417 return NULL;
3420 static int compare_relocs(const void *a, const void *b)
3422 const struct ksplice_reloc *ra = a, *rb = b;
3423 if (ra->blank_addr > rb->blank_addr)
3424 return 1;
3425 else if (ra->blank_addr < rb->blank_addr)
3426 return -1;
3427 else
3428 return ra->howto->size - rb->howto->size;
3431 #ifdef KSPLICE_STANDALONE
3432 static int compare_system_map(const void *a, const void *b)
3434 const struct ksplice_system_map *sa = a, *sb = b;
3435 return strcmp(sa->label, sb->label);
3437 #endif /* KSPLICE_STANDALONE */
3439 #ifdef CONFIG_DEBUG_FS
3440 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3441 /* Old kernels don't have debugfs_create_blob */
3442 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3443 size_t count, loff_t *ppos)
3445 struct debugfs_blob_wrapper *blob = file->private_data;
3446 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3447 blob->size);
3450 static int blob_open(struct inode *inode, struct file *file)
3452 if (inode->i_private)
3453 file->private_data = inode->i_private;
3454 return 0;
3457 static struct file_operations fops_blob = {
3458 .read = read_file_blob,
3459 .open = blob_open,
3462 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3463 struct dentry *parent,
3464 struct debugfs_blob_wrapper *blob)
3466 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3468 #endif /* LINUX_VERSION_CODE */
3470 static abort_t init_debug_buf(struct update *update)
3472 update->debug_blob.size = 0;
3473 update->debug_blob.data = NULL;
3474 update->debugfs_dentry =
3475 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3476 &update->debug_blob);
3477 if (update->debugfs_dentry == NULL)
3478 return OUT_OF_MEMORY;
3479 return OK;
3482 static void clear_debug_buf(struct update *update)
3484 if (update->debugfs_dentry == NULL)
3485 return;
3486 debugfs_remove(update->debugfs_dentry);
3487 update->debugfs_dentry = NULL;
3488 update->debug_blob.size = 0;
3489 vfree(update->debug_blob.data);
3490 update->debug_blob.data = NULL;
3493 static int _ksdebug(struct update *update, const char *fmt, ...)
3495 va_list args;
3496 unsigned long size, old_size, new_size;
3498 if (update->debug == 0)
3499 return 0;
3501 /* size includes the trailing '\0' */
3502 va_start(args, fmt);
3503 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3504 va_end(args);
3505 old_size = update->debug_blob.size == 0 ? 0 :
3506 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3507 new_size = update->debug_blob.size + size == 0 ? 0 :
3508 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3509 if (new_size > old_size) {
3510 char *buf = vmalloc(new_size);
3511 if (buf == NULL)
3512 return -ENOMEM;
3513 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3514 vfree(update->debug_blob.data);
3515 update->debug_blob.data = buf;
3517 va_start(args, fmt);
3518 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3519 update->debug_blob.size,
3520 size, fmt, args);
3521 va_end(args);
3522 return 0;
3524 #else /* CONFIG_DEBUG_FS */
3525 static abort_t init_debug_buf(struct update *update)
3527 return OK;
3530 static void clear_debug_buf(struct update *update)
3532 return;
3535 static int _ksdebug(struct update *update, const char *fmt, ...)
3537 va_list args;
3539 if (update->debug == 0)
3540 return 0;
3542 if (!update->debug_continue_line)
3543 printk(KERN_DEBUG "ksplice: ");
3545 va_start(args, fmt);
3546 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3547 vprintk(fmt, args);
3548 #else /* LINUX_VERSION_CODE < */
3549 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3551 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3552 printk("%s", buf);
3553 kfree(buf);
3555 #endif /* LINUX_VERSION_CODE */
3556 va_end(args);
3558 update->debug_continue_line =
3559 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3560 return 0;
3562 #endif /* CONFIG_DEBUG_FS */
3564 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3565 #ifdef CONFIG_KALLSYMS
3566 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3567 struct module *, unsigned long),
3568 void *data)
3570 char namebuf[KSYM_NAME_LEN];
3571 unsigned long i;
3572 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3573 unsigned int off;
3574 #endif /* LINUX_VERSION_CODE */
3575 int ret;
3577 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3578 * 2.6.10 was the first release after this commit
3580 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3581 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3582 off = kallsyms_expand_symbol(off, namebuf);
3583 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3584 if (ret != 0)
3585 return ret;
3587 #else /* LINUX_VERSION_CODE < */
3588 char *knames;
3590 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3591 unsigned prefix = *knames++;
3593 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3595 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3596 if (ret != OK)
3597 return ret;
3599 knames += strlen(knames) + 1;
3601 #endif /* LINUX_VERSION_CODE */
3602 return module_kallsyms_on_each_symbol(fn, data);
3605 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3606 * 2.6.10 was the first release after this commit
3608 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3609 extern u8 kallsyms_token_table[];
3610 extern u16 kallsyms_token_index[];
3612 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3614 long len, skipped_first = 0;
3615 const u8 *tptr, *data;
3617 data = &kallsyms_names[off];
3618 len = *data;
3619 data++;
3621 off += len + 1;
3623 while (len) {
3624 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3625 data++;
3626 len--;
3628 while (*tptr) {
3629 if (skipped_first) {
3630 *result = *tptr;
3631 result++;
3632 } else
3633 skipped_first = 1;
3634 tptr++;
3638 *result = '\0';
3640 return off;
3642 #endif /* LINUX_VERSION_CODE */
3644 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3645 struct module *,
3646 unsigned long),
3647 void *data)
3649 struct module *mod;
3650 unsigned int i;
3651 int ret;
3653 list_for_each_entry(mod, &modules, list) {
3654 for (i = 0; i < mod->num_symtab; i++) {
3655 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3656 mod, mod->symtab[i].st_value);
3657 if (ret != 0)
3658 return ret;
3661 return 0;
3663 #endif /* CONFIG_KALLSYMS */
3665 static struct module *find_module(const char *name)
3667 struct module *mod;
3669 list_for_each_entry(mod, &modules, list) {
3670 if (strcmp(mod->name, name) == 0)
3671 return mod;
3673 return NULL;
3676 #ifdef CONFIG_MODULE_UNLOAD
3677 struct module_use {
3678 struct list_head list;
3679 struct module *module_which_uses;
3682 /* I'm not yet certain whether we need the strong form of this. */
3683 static inline int strong_try_module_get(struct module *mod)
3685 if (mod && mod->state != MODULE_STATE_LIVE)
3686 return -EBUSY;
3687 if (try_module_get(mod))
3688 return 0;
3689 return -ENOENT;
3692 /* Does a already use b? */
3693 static int already_uses(struct module *a, struct module *b)
3695 struct module_use *use;
3696 list_for_each_entry(use, &b->modules_which_use_me, list) {
3697 if (use->module_which_uses == a)
3698 return 1;
3700 return 0;
3703 /* Make it so module a uses b. Must be holding module_mutex */
3704 static int use_module(struct module *a, struct module *b)
3706 struct module_use *use;
3707 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3708 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3709 int no_warn;
3710 #endif /* LINUX_VERSION_CODE */
3711 if (b == NULL || already_uses(a, b))
3712 return 1;
3714 if (strong_try_module_get(b) < 0)
3715 return 0;
3717 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3718 if (!use) {
3719 module_put(b);
3720 return 0;
3722 use->module_which_uses = a;
3723 list_add(&use->list, &b->modules_which_use_me);
3724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3725 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3726 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3727 #endif /* LINUX_VERSION_CODE */
3728 return 1;
3730 #else /* CONFIG_MODULE_UNLOAD */
3731 static int use_module(struct module *a, struct module *b)
3733 return 1;
3735 #endif /* CONFIG_MODULE_UNLOAD */
3737 #ifndef CONFIG_MODVERSIONS
3738 #define symversion(base, idx) NULL
3739 #else
3740 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3741 #endif
3743 static bool each_symbol_in_section(const struct symsearch *arr,
3744 unsigned int arrsize,
3745 struct module *owner,
3746 bool (*fn)(const struct symsearch *syms,
3747 struct module *owner,
3748 unsigned int symnum, void *data),
3749 void *data)
3751 unsigned int i, j;
3753 for (j = 0; j < arrsize; j++) {
3754 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3755 if (fn(&arr[j], owner, i, data))
3756 return true;
3759 return false;
3762 /* Returns true as soon as fn returns true, otherwise false. */
3763 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3764 struct module *owner,
3765 unsigned int symnum, void *data),
3766 void *data)
3768 struct module *mod;
3769 const struct symsearch arr[] = {
3770 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3771 NOT_GPL_ONLY, false },
3772 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3773 __start___kcrctab_gpl,
3774 GPL_ONLY, false },
3775 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3776 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3777 __start___kcrctab_gpl_future,
3778 WILL_BE_GPL_ONLY, false },
3779 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3780 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3781 { __start___ksymtab_unused, __stop___ksymtab_unused,
3782 __start___kcrctab_unused,
3783 NOT_GPL_ONLY, true },
3784 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3785 __start___kcrctab_unused_gpl,
3786 GPL_ONLY, true },
3787 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3790 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3791 return 1;
3793 list_for_each_entry(mod, &modules, list) {
3794 struct symsearch module_arr[] = {
3795 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3796 NOT_GPL_ONLY, false },
3797 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3798 mod->gpl_crcs,
3799 GPL_ONLY, false },
3800 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3801 { mod->gpl_future_syms,
3802 mod->gpl_future_syms + mod->num_gpl_future_syms,
3803 mod->gpl_future_crcs,
3804 WILL_BE_GPL_ONLY, false },
3805 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3806 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3807 { mod->unused_syms,
3808 mod->unused_syms + mod->num_unused_syms,
3809 mod->unused_crcs,
3810 NOT_GPL_ONLY, true },
3811 { mod->unused_gpl_syms,
3812 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3813 mod->unused_gpl_crcs,
3814 GPL_ONLY, true },
3815 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3818 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3819 mod, fn, data))
3820 return true;
3822 return false;
3825 struct find_symbol_arg {
3826 /* Input */
3827 const char *name;
3828 bool gplok;
3829 bool warn;
3831 /* Output */
3832 struct module *owner;
3833 const unsigned long *crc;
3834 const struct kernel_symbol *sym;
3837 static bool find_symbol_in_section(const struct symsearch *syms,
3838 struct module *owner,
3839 unsigned int symnum, void *data)
3841 struct find_symbol_arg *fsa = data;
3843 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3844 return false;
3846 if (!fsa->gplok) {
3847 if (syms->licence == GPL_ONLY)
3848 return false;
3849 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3850 printk(KERN_WARNING "Symbol %s is being used "
3851 "by a non-GPL module, which will not "
3852 "be allowed in the future\n", fsa->name);
3853 printk(KERN_WARNING "Please see the file "
3854 "Documentation/feature-removal-schedule.txt "
3855 "in the kernel source tree for more details.\n");
3859 #ifdef CONFIG_UNUSED_SYMBOLS
3860 if (syms->unused && fsa->warn) {
3861 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3862 "however this module is using it.\n", fsa->name);
3863 printk(KERN_WARNING
3864 "This symbol will go away in the future.\n");
3865 printk(KERN_WARNING
3866 "Please evalute if this is the right api to use and if "
3867 "it really is, submit a report the linux kernel "
3868 "mailinglist together with submitting your code for "
3869 "inclusion.\n");
3871 #endif
3873 fsa->owner = owner;
3874 fsa->crc = symversion(syms->crcs, symnum);
3875 fsa->sym = &syms->start[symnum];
3876 return true;
3879 /* Find a symbol and return it, along with, (optional) crc and
3880 * (optional) module which owns it */
3881 static const struct kernel_symbol *find_symbol(const char *name,
3882 struct module **owner,
3883 const unsigned long **crc,
3884 bool gplok, bool warn)
3886 struct find_symbol_arg fsa;
3888 fsa.name = name;
3889 fsa.gplok = gplok;
3890 fsa.warn = warn;
3892 if (each_symbol(find_symbol_in_section, &fsa)) {
3893 if (owner)
3894 *owner = fsa.owner;
3895 if (crc)
3896 *crc = fsa.crc;
3897 return fsa.sym;
3900 return NULL;
3903 static inline int within_module_core(unsigned long addr, struct module *mod)
3905 return (unsigned long)mod->module_core <= addr &&
3906 addr < (unsigned long)mod->module_core + mod->core_size;
3909 static inline int within_module_init(unsigned long addr, struct module *mod)
3911 return (unsigned long)mod->module_init <= addr &&
3912 addr < (unsigned long)mod->module_init + mod->init_size;
3915 static struct module *__module_address(unsigned long addr)
3917 struct module *mod;
3919 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3920 list_for_each_entry_rcu(mod, &modules, list)
3921 #else
3922 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3923 list_for_each_entry(mod, &modules, list)
3924 #endif
3925 if (within_module_core(addr, mod) ||
3926 within_module_init(addr, mod))
3927 return mod;
3928 return NULL;
3930 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3932 struct update_attribute {
3933 struct attribute attr;
3934 ssize_t (*show)(struct update *update, char *buf);
3935 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3938 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
3939 char *buf)
3941 struct update_attribute *attribute =
3942 container_of(attr, struct update_attribute, attr);
3943 struct update *update = container_of(kobj, struct update, kobj);
3944 if (attribute->show == NULL)
3945 return -EIO;
3946 return attribute->show(update, buf);
3949 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
3950 const char *buf, size_t len)
3952 struct update_attribute *attribute =
3953 container_of(attr, struct update_attribute, attr);
3954 struct update *update = container_of(kobj, struct update, kobj);
3955 if (attribute->store == NULL)
3956 return -EIO;
3957 return attribute->store(update, buf, len);
3960 static struct sysfs_ops update_sysfs_ops = {
3961 .show = update_attr_show,
3962 .store = update_attr_store,
3965 static void update_release(struct kobject *kobj)
3967 struct update *update;
3968 update = container_of(kobj, struct update, kobj);
3969 cleanup_ksplice_update(update);
3972 static ssize_t stage_show(struct update *update, char *buf)
3974 switch (update->stage) {
3975 case STAGE_PREPARING:
3976 return snprintf(buf, PAGE_SIZE, "preparing\n");
3977 case STAGE_APPLIED:
3978 return snprintf(buf, PAGE_SIZE, "applied\n");
3979 case STAGE_REVERSED:
3980 return snprintf(buf, PAGE_SIZE, "reversed\n");
3982 return 0;
3985 static ssize_t abort_cause_show(struct update *update, char *buf)
3987 switch (update->abort_cause) {
3988 case OK:
3989 return snprintf(buf, PAGE_SIZE, "ok\n");
3990 case NO_MATCH:
3991 return snprintf(buf, PAGE_SIZE, "no_match\n");
3992 #ifdef KSPLICE_STANDALONE
3993 case BAD_SYSTEM_MAP:
3994 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
3995 #endif /* KSPLICE_STANDALONE */
3996 case CODE_BUSY:
3997 return snprintf(buf, PAGE_SIZE, "code_busy\n");
3998 case MODULE_BUSY:
3999 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4000 case OUT_OF_MEMORY:
4001 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4002 case FAILED_TO_FIND:
4003 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4004 case ALREADY_REVERSED:
4005 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4006 case MISSING_EXPORT:
4007 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4008 case UNEXPECTED_RUNNING_TASK:
4009 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4010 case TARGET_NOT_LOADED:
4011 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4012 case CALL_FAILED:
4013 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4014 case COLD_UPDATE_LOADED:
4015 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4016 case UNEXPECTED:
4017 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4018 default:
4019 return snprintf(buf, PAGE_SIZE, "unknown\n");
4021 return 0;
4024 static ssize_t conflict_show(struct update *update, char *buf)
4026 const struct conflict *conf;
4027 const struct conflict_addr *ca;
4028 int used = 0;
4029 mutex_lock(&module_mutex);
4030 list_for_each_entry(conf, &update->conflicts, list) {
4031 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4032 conf->process_name, conf->pid);
4033 list_for_each_entry(ca, &conf->stack, list) {
4034 if (!ca->has_conflict)
4035 continue;
4036 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4037 ca->label);
4039 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4041 mutex_unlock(&module_mutex);
4042 return used;
4045 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4046 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4048 struct update *update = updateptr;
4049 mutex_lock(&module_mutex);
4050 maybe_cleanup_ksplice_update(update);
4051 mutex_unlock(&module_mutex);
4052 return 0;
4055 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4057 enum stage old_stage;
4058 mutex_lock(&module_mutex);
4059 old_stage = update->stage;
4060 if ((strncmp(buf, "applied", len) == 0 ||
4061 strncmp(buf, "applied\n", len) == 0) &&
4062 update->stage == STAGE_PREPARING)
4063 update->abort_cause = apply_update(update);
4064 else if ((strncmp(buf, "reversed", len) == 0 ||
4065 strncmp(buf, "reversed\n", len) == 0) &&
4066 update->stage == STAGE_APPLIED)
4067 update->abort_cause = reverse_update(update);
4068 else if ((strncmp(buf, "cleanup", len) == 0 ||
4069 strncmp(buf, "cleanup\n", len) == 0) &&
4070 update->stage == STAGE_REVERSED)
4071 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4072 "ksplice_cleanup_%s", update->kid);
4074 mutex_unlock(&module_mutex);
4075 return len;
4078 static ssize_t debug_show(struct update *update, char *buf)
4080 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4083 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4085 unsigned long l;
4086 int ret = strict_strtoul(buf, 10, &l);
4087 if (ret != 0)
4088 return ret;
4089 update->debug = l;
4090 return len;
4093 static ssize_t partial_show(struct update *update, char *buf)
4095 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4098 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4100 unsigned long l;
4101 int ret = strict_strtoul(buf, 10, &l);
4102 if (ret != 0)
4103 return ret;
4104 update->partial = l;
4105 return len;
4108 static struct update_attribute stage_attribute =
4109 __ATTR(stage, 0600, stage_show, stage_store);
4110 static struct update_attribute abort_cause_attribute =
4111 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4112 static struct update_attribute debug_attribute =
4113 __ATTR(debug, 0600, debug_show, debug_store);
4114 static struct update_attribute partial_attribute =
4115 __ATTR(partial, 0600, partial_show, partial_store);
4116 static struct update_attribute conflict_attribute =
4117 __ATTR(conflicts, 0400, conflict_show, NULL);
4119 static struct attribute *update_attrs[] = {
4120 &stage_attribute.attr,
4121 &abort_cause_attribute.attr,
4122 &debug_attribute.attr,
4123 &partial_attribute.attr,
4124 &conflict_attribute.attr,
4125 NULL
4128 static struct kobj_type update_ktype = {
4129 .sysfs_ops = &update_sysfs_ops,
4130 .release = update_release,
4131 .default_attrs = update_attrs,
4134 #ifdef KSPLICE_STANDALONE
4135 static int debug;
4136 module_param(debug, int, 0600);
4137 MODULE_PARM_DESC(debug, "Debug level");
4139 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4141 static struct ksplice_mod_change bootstrap_mod_change = {
4142 .name = "ksplice_" __stringify(KSPLICE_KID),
4143 .kid = "init_" __stringify(KSPLICE_KID),
4144 .target_name = NULL,
4145 .target = NULL,
4146 .map_printk = MAP_PRINTK,
4147 .new_code_mod = THIS_MODULE,
4148 .new_code.system_map = ksplice_system_map,
4149 .new_code.system_map_end = ksplice_system_map_end,
4151 #endif /* KSPLICE_STANDALONE */
4153 static int init_ksplice(void)
4155 #ifdef KSPLICE_STANDALONE
4156 struct ksplice_mod_change *change = &bootstrap_mod_change;
4157 change->update = init_ksplice_update(change->kid);
4158 sort(change->new_code.system_map,
4159 change->new_code.system_map_end - change->new_code.system_map,
4160 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4161 if (change->update == NULL)
4162 return -ENOMEM;
4163 add_to_update(change, change->update);
4164 change->update->debug = debug;
4165 change->update->abort_cause =
4166 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4167 if (change->update->abort_cause == OK)
4168 bootstrapped = true;
4169 cleanup_ksplice_update(bootstrap_mod_change.update);
4170 #else /* !KSPLICE_STANDALONE */
4171 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4172 if (ksplice_kobj == NULL)
4173 return -ENOMEM;
4174 #endif /* KSPLICE_STANDALONE */
4175 return 0;
4178 static void cleanup_ksplice(void)
4180 #ifndef KSPLICE_STANDALONE
4181 kobject_put(ksplice_kobj);
4182 #endif /* KSPLICE_STANDALONE */
4185 module_init(init_ksplice);
4186 module_exit(cleanup_ksplice);
4188 MODULE_AUTHOR("Ksplice, Inc.");
4189 MODULE_DESCRIPTION("Ksplice rebootless update system");
4190 #ifdef KSPLICE_VERSION
4191 MODULE_VERSION(KSPLICE_VERSION);
4192 #endif
4193 MODULE_LICENSE("GPL v2");