Improve documentation of cleanup_ksplice_mod_change.
[ksplice.git] / kmodsrc / ksplice.c
blobab1a6dd7869eb4e9728bdfb9dd787b563f133e81
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 enum stage {
67 STAGE_PREPARING, /* the update is not yet applied */
68 STAGE_APPLIED, /* the update is applied */
69 STAGE_REVERSED, /* the update has been applied and reversed */
72 /* parameter to modify run-pre matching */
73 enum run_pre_mode {
74 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
75 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
76 RUN_PRE_FINAL, /* finalizes the matching */
77 #ifdef KSPLICE_STANDALONE
78 RUN_PRE_SILENT,
79 #endif /* KSPLICE_STANDALONE */
82 enum { NOVAL, TEMP, VAL };
84 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
85 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #define __bitwise__
87 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
88 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
89 #define __bitwise__ __bitwise
90 #endif
92 typedef int __bitwise__ abort_t;
94 #define OK ((__force abort_t) 0)
95 #define NO_MATCH ((__force abort_t) 1)
96 #define CODE_BUSY ((__force abort_t) 2)
97 #define MODULE_BUSY ((__force abort_t) 3)
98 #define OUT_OF_MEMORY ((__force abort_t) 4)
99 #define FAILED_TO_FIND ((__force abort_t) 5)
100 #define ALREADY_REVERSED ((__force abort_t) 6)
101 #define MISSING_EXPORT ((__force abort_t) 7)
102 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
103 #define UNEXPECTED ((__force abort_t) 9)
104 #define TARGET_NOT_LOADED ((__force abort_t) 10)
105 #define CALL_FAILED ((__force abort_t) 11)
106 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
107 #ifdef KSPLICE_STANDALONE
108 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
109 #endif /* KSPLICE_STANDALONE */
111 struct update {
112 const char *kid;
113 const char *name;
114 struct kobject kobj;
115 enum stage stage;
116 abort_t abort_cause;
117 int debug;
118 #ifdef CONFIG_DEBUG_FS
119 struct debugfs_blob_wrapper debug_blob;
120 struct dentry *debugfs_dentry;
121 #else /* !CONFIG_DEBUG_FS */
122 bool debug_continue_line;
123 #endif /* CONFIG_DEBUG_FS */
124 bool partial; /* is it OK if some target mods aren't loaded */
125 struct list_head changes, /* changes for loaded target mods */
126 unused_changes; /* changes for non-loaded target mods */
127 struct list_head conflicts;
128 struct list_head list;
129 struct list_head ksplice_module_list;
132 /* a process conflicting with an update */
133 struct conflict {
134 const char *process_name;
135 pid_t pid;
136 struct list_head stack;
137 struct list_head list;
140 /* an address on the stack of a conflict */
141 struct conflict_addr {
142 unsigned long addr; /* the address on the stack */
143 bool has_conflict; /* does this address in particular conflict? */
144 const char *label; /* the label of the conflicting safety_record */
145 struct list_head list;
148 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
149 /* Old kernels don't have debugfs_create_blob */
150 struct debugfs_blob_wrapper {
151 void *data;
152 unsigned long size;
154 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
157 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
158 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
159 #endif
161 struct labelval {
162 struct list_head list;
163 struct ksplice_symbol *symbol;
164 struct list_head *saved_vals;
167 /* region to be checked for conflicts in the stack check */
168 struct safety_record {
169 struct list_head list;
170 const char *label;
171 unsigned long addr; /* the address to be checked for conflicts
172 * (e.g. an obsolete function's starting addr)
174 unsigned long size; /* the size of the region to be checked */
177 /* possible value for a symbol */
178 struct candidate_val {
179 struct list_head list;
180 unsigned long val;
183 /* private struct used by init_symbol_array */
184 struct ksplice_lookup {
185 /* input */
186 struct ksplice_mod_change *change;
187 struct ksplice_symbol **arr;
188 size_t size;
189 /* output */
190 abort_t ret;
193 #ifdef KSPLICE_NO_KERNEL_SUPPORT
194 struct symsearch {
195 const struct kernel_symbol *start, *stop;
196 const unsigned long *crcs;
197 enum {
198 NOT_GPL_ONLY,
199 GPL_ONLY,
200 WILL_BE_GPL_ONLY,
201 } licence;
202 bool unused;
204 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
207 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
210 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
211 static bool virtual_address_mapped(unsigned long addr)
213 char retval;
214 return probe_kernel_address(addr, retval) != -EFAULT;
216 #else /* LINUX_VERSION_CODE < */
217 static bool virtual_address_mapped(unsigned long addr);
218 #endif /* LINUX_VERSION_CODE */
220 static long probe_kernel_read(void *dst, void *src, size_t size)
222 if (size == 0)
223 return 0;
224 if (!virtual_address_mapped((unsigned long)src) ||
225 !virtual_address_mapped((unsigned long)src + size - 1))
226 return -EFAULT;
228 memcpy(dst, src, size);
229 return 0;
231 #endif /* LINUX_VERSION_CODE */
233 static LIST_HEAD(updates);
234 #ifdef KSPLICE_STANDALONE
235 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
236 extern struct list_head ksplice_modules;
237 #else /* !CONFIG_KSPLICE */
238 LIST_HEAD(ksplice_modules);
239 #endif /* CONFIG_KSPLICE */
240 #else /* !KSPLICE_STANDALONE */
241 LIST_HEAD(ksplice_modules);
242 EXPORT_SYMBOL_GPL(ksplice_modules);
243 static struct kobject *ksplice_kobj;
244 #endif /* KSPLICE_STANDALONE */
246 static struct kobj_type update_ktype;
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
249 /* Old kernels do not have kcalloc
250 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
252 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
254 char *mem;
255 if (n != 0 && size > ULONG_MAX / n)
256 return NULL;
257 mem = kmalloc(n * size, flags);
258 if (mem)
259 memset(mem, 0, n * size);
260 return mem;
262 #endif /* LINUX_VERSION_CODE */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
265 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
266 static void u32_swap(void *a, void *b, int size)
268 u32 t = *(u32 *)a;
269 *(u32 *)a = *(u32 *)b;
270 *(u32 *)b = t;
273 static void generic_swap(void *a, void *b, int size)
275 char t;
277 do {
278 t = *(char *)a;
279 *(char *)a++ = *(char *)b;
280 *(char *)b++ = t;
281 } while (--size > 0);
285 * sort - sort an array of elements
286 * @base: pointer to data to sort
287 * @num: number of elements
288 * @size: size of each element
289 * @cmp: pointer to comparison function
290 * @swap: pointer to swap function or NULL
292 * This function does a heapsort on the given array. You may provide a
293 * swap function optimized to your element type.
295 * Sorting time is O(n log n) both on average and worst-case. While
296 * qsort is about 20% faster on average, it suffers from exploitable
297 * O(n*n) worst-case behavior and extra memory requirements that make
298 * it less suitable for kernel use.
301 void sort(void *base, size_t num, size_t size,
302 int (*cmp)(const void *, const void *),
303 void (*swap)(void *, void *, int size))
305 /* pre-scale counters for performance */
306 int i = (num / 2 - 1) * size, n = num * size, c, r;
308 if (!swap)
309 swap = (size == 4 ? u32_swap : generic_swap);
311 /* heapify */
312 for (; i >= 0; i -= size) {
313 for (r = i; r * 2 + size < n; r = c) {
314 c = r * 2 + size;
315 if (c < n - size && cmp(base + c, base + c + size) < 0)
316 c += size;
317 if (cmp(base + r, base + c) >= 0)
318 break;
319 swap(base + r, base + c, size);
323 /* sort */
324 for (i = n - size; i > 0; i -= size) {
325 swap(base, base + i, size);
326 for (r = 0; r * 2 + size < i; r = c) {
327 c = r * 2 + size;
328 if (c < i - size && cmp(base + c, base + c + size) < 0)
329 c += size;
330 if (cmp(base + r, base + c) >= 0)
331 break;
332 swap(base + r, base + c, size);
336 #endif /* LINUX_VERSION_CODE < */
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
339 /* Old kernels do not have kstrdup
340 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
342 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
344 size_t len;
345 char *buf;
347 if (!s)
348 return NULL;
350 len = strlen(s) + 1;
351 buf = kmalloc(len, gfp);
352 if (buf)
353 memcpy(buf, s, len);
354 return buf;
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
359 /* Old kernels use semaphore instead of mutex
360 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
362 #define mutex semaphore
363 #define mutex_lock down
364 #define mutex_unlock up
365 #endif /* LINUX_VERSION_CODE */
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
368 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
369 static char * __attribute_used__
370 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
372 unsigned int len;
373 char *p, dummy[1];
374 va_list aq;
376 va_copy(aq, ap);
377 len = vsnprintf(dummy, 0, fmt, aq);
378 va_end(aq);
380 p = kmalloc(len + 1, gfp);
381 if (!p)
382 return NULL;
384 vsnprintf(p, len + 1, fmt, ap);
386 return p;
388 #endif
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
391 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
392 static char * __attribute__((format (printf, 2, 3)))
393 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
395 va_list ap;
396 char *p;
398 va_start(ap, fmt);
399 p = kvasprintf(gfp, fmt, ap);
400 va_end(ap);
402 return p;
404 #endif
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
407 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
408 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
410 char *tail;
411 unsigned long val;
412 size_t len;
414 *res = 0;
415 len = strlen(cp);
416 if (len == 0)
417 return -EINVAL;
419 val = simple_strtoul(cp, &tail, base);
420 if ((*tail == '\0') ||
421 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
422 *res = val;
423 return 0;
426 return -EINVAL;
428 #endif
430 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
431 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
432 /* Assume cpus == NULL. */
433 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
434 #endif /* LINUX_VERSION_CODE */
436 #ifndef task_thread_info
437 #define task_thread_info(task) (task)->thread_info
438 #endif /* !task_thread_info */
440 #ifdef KSPLICE_STANDALONE
442 static bool bootstrapped = false;
444 #ifdef CONFIG_KALLSYMS
445 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
446 extern u8 kallsyms_names[];
447 #endif /* CONFIG_KALLSYMS */
449 /* defined by ksplice-create */
450 extern const struct ksplice_reloc ksplice_init_relocs[],
451 ksplice_init_relocs_end[];
453 /* Obtained via System.map */
454 extern struct list_head modules;
455 extern struct mutex module_mutex;
456 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
457 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
458 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
459 #endif /* LINUX_VERSION_CODE */
460 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
461 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
462 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
463 #endif /* LINUX_VERSION_CODE */
464 extern const struct kernel_symbol __start___ksymtab[];
465 extern const struct kernel_symbol __stop___ksymtab[];
466 extern const unsigned long __start___kcrctab[];
467 extern const struct kernel_symbol __start___ksymtab_gpl[];
468 extern const struct kernel_symbol __stop___ksymtab_gpl[];
469 extern const unsigned long __start___kcrctab_gpl[];
470 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
471 extern const struct kernel_symbol __start___ksymtab_unused[];
472 extern const struct kernel_symbol __stop___ksymtab_unused[];
473 extern const unsigned long __start___kcrctab_unused[];
474 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
475 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
476 extern const unsigned long __start___kcrctab_unused_gpl[];
477 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
478 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
479 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
480 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
481 extern const unsigned long __start___kcrctab_gpl_future[];
482 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
484 #endif /* KSPLICE_STANDALONE */
486 static struct update *init_ksplice_update(const char *kid);
487 static void cleanup_ksplice_update(struct update *update);
488 static void maybe_cleanup_ksplice_update(struct update *update);
489 static void add_to_update(struct ksplice_mod_change *change,
490 struct update *update);
491 static int ksplice_sysfs_init(struct update *update);
493 /* Preparing the relocations and patches for application */
494 static abort_t apply_update(struct update *update);
495 static abort_t reverse_update(struct update *update);
496 static abort_t prepare_change(struct ksplice_mod_change *change);
497 static abort_t finalize_change(struct ksplice_mod_change *change);
498 static abort_t finalize_patches(struct ksplice_mod_change *change);
499 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
500 unsigned long addr);
501 static abort_t map_trampoline_pages(struct update *update);
502 static void unmap_trampoline_pages(struct update *update);
503 static void *map_writable(void *addr, size_t len);
504 static abort_t apply_relocs(struct ksplice_mod_change *change,
505 const struct ksplice_reloc *relocs,
506 const struct ksplice_reloc *relocs_end);
507 static abort_t apply_reloc(struct ksplice_mod_change *change,
508 const struct ksplice_reloc *r);
509 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
510 const struct ksplice_reloc *r);
511 static abort_t apply_howto_date(struct ksplice_mod_change *change,
512 const struct ksplice_reloc *r);
513 static abort_t read_reloc_value(struct ksplice_mod_change *change,
514 const struct ksplice_reloc *r,
515 unsigned long addr, unsigned long *valp);
516 static abort_t write_reloc_value(struct ksplice_mod_change *change,
517 const struct ksplice_reloc *r,
518 unsigned long addr, unsigned long sym_addr);
519 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
520 bool to_be_applied);
521 static void cleanup_module_list_entries(struct update *update);
522 static void __attribute__((noreturn)) ksplice_deleted(void);
524 /* run-pre matching */
525 static abort_t match_change_sections(struct ksplice_mod_change *change,
526 bool consider_data_sections);
527 static abort_t find_section(struct ksplice_mod_change *change,
528 struct ksplice_section *sect);
529 static abort_t try_addr(struct ksplice_mod_change *change,
530 struct ksplice_section *sect,
531 unsigned long run_addr,
532 struct list_head *safety_records,
533 enum run_pre_mode mode);
534 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
535 const struct ksplice_section *sect,
536 unsigned long run_addr,
537 struct list_head *safety_records,
538 enum run_pre_mode mode);
539 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
540 /* defined in arch/ARCH/kernel/ksplice-arch.c */
541 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
542 struct ksplice_section *sect,
543 unsigned long run_addr,
544 struct list_head *safety_records,
545 enum run_pre_mode mode);
546 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
547 static void print_bytes(struct ksplice_mod_change *change,
548 const unsigned char *run, int runc,
549 const unsigned char *pre, int prec);
550 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
551 static abort_t brute_search(struct ksplice_mod_change *change,
552 struct ksplice_section *sect,
553 const void *start, unsigned long len,
554 struct list_head *vals);
555 static abort_t brute_search_all(struct ksplice_mod_change *change,
556 struct ksplice_section *sect,
557 struct list_head *vals);
558 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
559 static const struct ksplice_reloc *
560 init_reloc_search(struct ksplice_mod_change *change,
561 const struct ksplice_section *sect);
562 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
563 const struct ksplice_reloc *end,
564 unsigned long address,
565 unsigned long size);
566 static abort_t lookup_reloc(struct ksplice_mod_change *change,
567 const struct ksplice_reloc **fingerp,
568 unsigned long addr,
569 const struct ksplice_reloc **relocp);
570 static abort_t handle_reloc(struct ksplice_mod_change *change,
571 const struct ksplice_section *sect,
572 const struct ksplice_reloc *r,
573 unsigned long run_addr, enum run_pre_mode mode);
574 static abort_t handle_howto_date(struct ksplice_mod_change *change,
575 const struct ksplice_section *sect,
576 const struct ksplice_reloc *r,
577 unsigned long run_addr,
578 enum run_pre_mode mode);
579 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
580 const struct ksplice_section *sect,
581 const struct ksplice_reloc *r,
582 unsigned long run_addr,
583 enum run_pre_mode mode);
584 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
585 #ifdef CONFIG_BUG
586 static abort_t handle_bug(struct ksplice_mod_change *change,
587 const struct ksplice_reloc *r,
588 unsigned long run_addr);
589 #endif /* CONFIG_BUG */
590 #else /* LINUX_VERSION_CODE < */
591 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
592 #endif /* LINUX_VERSION_CODE */
593 static abort_t handle_extable(struct ksplice_mod_change *change,
594 const struct ksplice_reloc *r,
595 unsigned long run_addr);
596 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
597 const struct ksplice_symbol *sym);
598 static int compare_section_labels(const void *va, const void *vb);
599 static int symbol_section_bsearch_compare(const void *a, const void *b);
600 static const struct ksplice_reloc *
601 patch_reloc(struct ksplice_mod_change *change,
602 const struct ksplice_patch *p);
604 /* Computing possible addresses for symbols */
605 static abort_t lookup_symbol(struct ksplice_mod_change *change,
606 const struct ksplice_symbol *ksym,
607 struct list_head *vals);
608 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
609 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
610 static abort_t init_symbol_array(struct ksplice_mod_change *change,
611 struct ksplice_symbol *start,
612 struct ksplice_symbol *end);
613 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
614 static abort_t add_matching_values(struct ksplice_lookup *lookup,
615 const char *sym_name, unsigned long sym_val);
616 static bool add_export_values(const struct symsearch *syms,
617 struct module *owner,
618 unsigned int symnum, void *data);
619 static int symbolp_bsearch_compare(const void *key, const void *elt);
620 static int compare_symbolp_names(const void *a, const void *b);
621 static int compare_symbolp_labels(const void *a, const void *b);
622 #ifdef CONFIG_KALLSYMS
623 static int add_kallsyms_values(void *data, const char *name,
624 struct module *owner, unsigned long val);
625 #endif /* CONFIG_KALLSYMS */
626 #ifdef KSPLICE_STANDALONE
627 static abort_t
628 add_system_map_candidates(struct ksplice_mod_change *change,
629 const struct ksplice_system_map *start,
630 const struct ksplice_system_map *end,
631 const char *label, struct list_head *vals);
632 static int compare_system_map(const void *a, const void *b);
633 static int system_map_bsearch_compare(const void *key, const void *elt);
634 #endif /* KSPLICE_STANDALONE */
635 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
636 const char *name, struct list_head *vals);
638 /* Atomic update trampoline insertion and removal */
639 static abort_t patch_action(struct update *update, enum ksplice_action action);
640 static int __apply_patches(void *update);
641 static int __reverse_patches(void *update);
642 static abort_t check_each_task(struct update *update);
643 static abort_t check_task(struct update *update,
644 const struct task_struct *t, bool rerun);
645 static abort_t check_stack(struct update *update, struct conflict *conf,
646 const struct thread_info *tinfo,
647 const unsigned long *stack);
648 static abort_t check_address(struct update *update,
649 struct conflict *conf, unsigned long addr);
650 static abort_t check_record(struct conflict_addr *ca,
651 const struct safety_record *rec,
652 unsigned long addr);
653 static bool is_stop_machine(const struct task_struct *t);
654 static void cleanup_conflicts(struct update *update);
655 static void print_conflicts(struct update *update);
656 static void insert_trampoline(struct ksplice_patch *p);
657 static abort_t verify_trampoline(struct ksplice_mod_change *change,
658 const struct ksplice_patch *p);
659 static void remove_trampoline(const struct ksplice_patch *p);
661 static abort_t create_labelval(struct ksplice_mod_change *change,
662 struct ksplice_symbol *ksym,
663 unsigned long val, int status);
664 static abort_t create_safety_record(struct ksplice_mod_change *change,
665 const struct ksplice_section *sect,
666 struct list_head *record_list,
667 unsigned long run_addr,
668 unsigned long run_size);
669 static abort_t add_candidate_val(struct ksplice_mod_change *change,
670 struct list_head *vals, unsigned long val);
671 static void release_vals(struct list_head *vals);
672 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
674 static int contains_canary(struct ksplice_mod_change *change,
675 unsigned long blank_addr,
676 const struct ksplice_reloc_howto *howto);
677 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
678 unsigned long addr);
679 static bool patches_module(const struct module *a, const struct module *b);
680 static bool strstarts(const char *str, const char *prefix);
681 static bool singular(struct list_head *list);
682 static void *bsearch(const void *key, const void *base, size_t n,
683 size_t size, int (*cmp)(const void *key, const void *elt));
684 static int compare_relocs(const void *a, const void *b);
685 static int reloc_bsearch_compare(const void *key, const void *elt);
687 /* Debugging */
688 static abort_t init_debug_buf(struct update *update);
689 static void clear_debug_buf(struct update *update);
690 static int __attribute__((format(printf, 2, 3)))
691 _ksdebug(struct update *update, const char *fmt, ...);
692 #define ksdebug(change, fmt, ...) \
693 _ksdebug(change->update, fmt, ## __VA_ARGS__)
695 #ifdef KSPLICE_NO_KERNEL_SUPPORT
696 /* Functions defined here that will be exported in later kernels */
697 #ifdef CONFIG_KALLSYMS
698 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
699 struct module *, unsigned long),
700 void *data);
701 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
702 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
703 #endif /* LINUX_VERSION_CODE */
704 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
705 struct module *,
706 unsigned long),
707 void *data);
708 #endif /* CONFIG_KALLSYMS */
709 static struct module *find_module(const char *name);
710 static int use_module(struct module *a, struct module *b);
711 static const struct kernel_symbol *find_symbol(const char *name,
712 struct module **owner,
713 const unsigned long **crc,
714 bool gplok, bool warn);
715 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
716 struct module *owner,
717 unsigned int symnum, void *data),
718 void *data);
719 static struct module *__module_address(unsigned long addr);
720 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
722 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
724 /* Prepare a trampoline for the given patch */
725 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
726 struct ksplice_patch *p);
727 /* What address does the trampoline at addr jump to? */
728 static abort_t trampoline_target(struct ksplice_mod_change *change,
729 unsigned long addr, unsigned long *new_addr);
730 /* Hook to handle pc-relative jumps inserted by parainstructions */
731 static abort_t handle_paravirt(struct ksplice_mod_change *change,
732 unsigned long pre, unsigned long run,
733 int *matched);
734 /* Is address p on the stack of the given thread? */
735 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
737 #ifndef KSPLICE_STANDALONE
738 #include "ksplice-arch.c"
739 #elif defined CONFIG_X86
740 #include "x86/ksplice-arch.c"
741 #elif defined CONFIG_ARM
742 #include "arm/ksplice-arch.c"
743 #endif /* KSPLICE_STANDALONE */
745 #define clear_list(head, type, member) \
746 do { \
747 struct list_head *_pos, *_n; \
748 list_for_each_safe(_pos, _n, head) { \
749 list_del(_pos); \
750 kfree(list_entry(_pos, type, member)); \
752 } while (0)
755 * init_ksplice_mod_change() - Initializes a ksplice change
756 * @change: The change to be initialized. All of the public fields of the
757 * change and its associated data structures should be populated
758 * before this function is called. The values of the private
759 * fields will be ignored.
761 int init_ksplice_mod_change(struct ksplice_mod_change *change)
763 struct update *update;
764 struct ksplice_patch *p;
765 struct ksplice_section *s;
766 int ret = 0;
768 #ifdef KSPLICE_STANDALONE
769 if (!bootstrapped)
770 return -1;
771 #endif /* KSPLICE_STANDALONE */
773 INIT_LIST_HEAD(&change->temp_labelvals);
774 INIT_LIST_HEAD(&change->safety_records);
776 sort(change->old_code.relocs,
777 change->old_code.relocs_end - change->old_code.relocs,
778 sizeof(*change->old_code.relocs), compare_relocs, NULL);
779 sort(change->new_code.relocs,
780 change->new_code.relocs_end - change->new_code.relocs,
781 sizeof(*change->new_code.relocs), compare_relocs, NULL);
782 sort(change->old_code.sections,
783 change->old_code.sections_end - change->old_code.sections,
784 sizeof(*change->old_code.sections), compare_section_labels, NULL);
785 #ifdef KSPLICE_STANDALONE
786 sort(change->new_code.system_map,
787 change->new_code.system_map_end - change->new_code.system_map,
788 sizeof(*change->new_code.system_map), compare_system_map, NULL);
789 sort(change->old_code.system_map,
790 change->old_code.system_map_end - change->old_code.system_map,
791 sizeof(*change->old_code.system_map), compare_system_map, NULL);
792 #endif /* KSPLICE_STANDALONE */
794 for (p = change->patches; p < change->patches_end; p++)
795 p->vaddr = NULL;
796 for (s = change->old_code.sections; s < change->old_code.sections_end;
797 s++)
798 s->match_map = NULL;
799 for (p = change->patches; p < change->patches_end; p++) {
800 const struct ksplice_reloc *r = patch_reloc(change, p);
801 if (r == NULL)
802 return -ENOENT;
803 if (p->type == KSPLICE_PATCH_DATA) {
804 s = symbol_section(change, r->symbol);
805 if (s == NULL)
806 return -ENOENT;
807 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
808 * to modify rodata sections that have been explicitly
809 * marked for patching using the ksplice-patch.h macro
810 * ksplice_assume_rodata. Here we modify the section
811 * flags appropriately.
813 if (s->flags & KSPLICE_SECTION_DATA)
814 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
815 KSPLICE_SECTION_RODATA;
819 mutex_lock(&module_mutex);
820 list_for_each_entry(update, &updates, list) {
821 if (strcmp(change->kid, update->kid) == 0) {
822 if (update->stage != STAGE_PREPARING) {
823 ret = -EPERM;
824 goto out;
826 add_to_update(change, update);
827 ret = 0;
828 goto out;
831 update = init_ksplice_update(change->kid);
832 if (update == NULL) {
833 ret = -ENOMEM;
834 goto out;
836 ret = ksplice_sysfs_init(update);
837 if (ret != 0) {
838 cleanup_ksplice_update(update);
839 goto out;
841 add_to_update(change, update);
842 out:
843 mutex_unlock(&module_mutex);
844 return ret;
846 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
849 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
850 * @change: The change to be cleaned up
852 * cleanup_ksplice_mod_change is currently called twice for each
853 * Ksplice update; once when the old_code module is unloaded, and once
854 * when the new_code module is unloaded. The extra call is used to
855 * avoid leaks if you unload the old_code without applying the update.
857 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
859 if (change->update == NULL)
860 return;
862 mutex_lock(&module_mutex);
863 if (change->update->stage == STAGE_APPLIED) {
864 /* If the change wasn't actually applied (because we
865 * only applied this update to loaded modules and this
866 * target was not loaded), then unregister the change
867 * from the list of unused changes.
869 struct ksplice_mod_change *c;
870 bool found = false;
872 list_for_each_entry(c, &change->update->unused_changes, list) {
873 if (c == change)
874 found = true;
876 if (found)
877 list_del(&change->list);
878 mutex_unlock(&module_mutex);
879 return;
881 list_del(&change->list);
882 if (change->update->stage == STAGE_PREPARING)
883 maybe_cleanup_ksplice_update(change->update);
884 change->update = NULL;
885 mutex_unlock(&module_mutex);
887 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
889 static struct update *init_ksplice_update(const char *kid)
891 struct update *update;
892 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
893 if (update == NULL)
894 return NULL;
895 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
896 if (update->name == NULL) {
897 kfree(update);
898 return NULL;
900 update->kid = kstrdup(kid, GFP_KERNEL);
901 if (update->kid == NULL) {
902 kfree(update->name);
903 kfree(update);
904 return NULL;
906 if (try_module_get(THIS_MODULE) != 1) {
907 kfree(update->kid);
908 kfree(update->name);
909 kfree(update);
910 return NULL;
912 INIT_LIST_HEAD(&update->changes);
913 INIT_LIST_HEAD(&update->unused_changes);
914 INIT_LIST_HEAD(&update->ksplice_module_list);
915 if (init_debug_buf(update) != OK) {
916 module_put(THIS_MODULE);
917 kfree(update->kid);
918 kfree(update->name);
919 kfree(update);
920 return NULL;
922 list_add(&update->list, &updates);
923 update->stage = STAGE_PREPARING;
924 update->abort_cause = OK;
925 update->partial = 0;
926 INIT_LIST_HEAD(&update->conflicts);
927 return update;
930 static void cleanup_ksplice_update(struct update *update)
932 list_del(&update->list);
933 cleanup_conflicts(update);
934 clear_debug_buf(update);
935 cleanup_module_list_entries(update);
936 kfree(update->kid);
937 kfree(update->name);
938 kfree(update);
939 module_put(THIS_MODULE);
942 /* Clean up the update if it no longer has any changes */
943 static void maybe_cleanup_ksplice_update(struct update *update)
945 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
946 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
947 kobject_put(&update->kobj);
948 #else /* LINUX_VERSION_CODE < */
949 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
950 kobject_unregister(&update->kobj);
951 #endif /* LINUX_VERSION_CODE */
954 static void add_to_update(struct ksplice_mod_change *change,
955 struct update *update)
957 change->update = update;
958 list_add(&change->list, &update->unused_changes);
961 static int ksplice_sysfs_init(struct update *update)
963 int ret = 0;
964 memset(&update->kobj, 0, sizeof(update->kobj));
965 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
966 #ifndef KSPLICE_STANDALONE
967 ret = kobject_init_and_add(&update->kobj, &update_ktype,
968 ksplice_kobj, "%s", update->kid);
969 #else /* KSPLICE_STANDALONE */
970 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
971 ret = kobject_init_and_add(&update->kobj, &update_ktype,
972 &THIS_MODULE->mkobj.kobj, "ksplice");
973 #endif /* KSPLICE_STANDALONE */
974 #else /* LINUX_VERSION_CODE < */
975 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
976 if (ret != 0)
977 return ret;
978 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
979 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
980 #else /* LINUX_VERSION_CODE < */
981 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
982 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
983 #endif /* LINUX_VERSION_CODE */
984 update->kobj.ktype = &update_ktype;
985 ret = kobject_register(&update->kobj);
986 #endif /* LINUX_VERSION_CODE */
987 if (ret != 0)
988 return ret;
989 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
990 kobject_uevent(&update->kobj, KOBJ_ADD);
991 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
992 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
993 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
994 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
995 #endif /* LINUX_VERSION_CODE */
996 return 0;
999 static abort_t apply_update(struct update *update)
1001 struct ksplice_mod_change *change, *n;
1002 abort_t ret;
1003 int retval;
1005 list_for_each_entry(change, &update->changes, list) {
1006 ret = create_module_list_entry(change, true);
1007 if (ret != OK)
1008 goto out;
1011 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
1012 if (strcmp(change->target_name, "vmlinux") == 0) {
1013 change->target = NULL;
1014 } else if (change->target == NULL) {
1015 change->target = find_module(change->target_name);
1016 if (change->target == NULL ||
1017 !module_is_live(change->target)) {
1018 if (!update->partial) {
1019 ret = TARGET_NOT_LOADED;
1020 goto out;
1022 ret = create_module_list_entry(change, false);
1023 if (ret != OK)
1024 goto out;
1025 continue;
1027 retval = use_module(change->new_code_mod,
1028 change->target);
1029 if (retval != 1) {
1030 ret = UNEXPECTED;
1031 goto out;
1034 ret = create_module_list_entry(change, true);
1035 if (ret != OK)
1036 goto out;
1037 list_del(&change->list);
1038 list_add_tail(&change->list, &update->changes);
1040 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1041 if (change->target == NULL) {
1042 apply_paravirt(change->new_code.parainstructions,
1043 change->new_code.parainstructions_end);
1044 apply_paravirt(change->old_code.parainstructions,
1045 change->old_code.parainstructions_end);
1047 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1050 list_for_each_entry(change, &update->changes, list) {
1051 const struct ksplice_section *sect;
1052 for (sect = change->new_code.sections;
1053 sect < change->new_code.sections_end; sect++) {
1054 struct safety_record *rec = kmalloc(sizeof(*rec),
1055 GFP_KERNEL);
1056 if (rec == NULL) {
1057 ret = OUT_OF_MEMORY;
1058 goto out;
1060 rec->addr = sect->address;
1061 rec->size = sect->size;
1062 rec->label = sect->symbol->label;
1063 list_add(&rec->list, &change->safety_records);
1067 list_for_each_entry(change, &update->changes, list) {
1068 ret = init_symbol_arrays(change);
1069 if (ret != OK) {
1070 cleanup_symbol_arrays(change);
1071 goto out;
1073 ret = prepare_change(change);
1074 cleanup_symbol_arrays(change);
1075 if (ret != OK)
1076 goto out;
1078 ret = patch_action(update, KS_APPLY);
1079 out:
1080 list_for_each_entry(change, &update->changes, list) {
1081 struct ksplice_section *s;
1082 if (update->stage == STAGE_PREPARING)
1083 clear_list(&change->safety_records,
1084 struct safety_record, list);
1085 for (s = change->old_code.sections;
1086 s < change->old_code.sections_end; s++) {
1087 if (s->match_map != NULL) {
1088 vfree(s->match_map);
1089 s->match_map = NULL;
1093 if (update->stage == STAGE_PREPARING)
1094 cleanup_module_list_entries(update);
1096 if (ret == OK)
1097 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
1098 update->kid);
1099 return ret;
1102 static abort_t reverse_update(struct update *update)
1104 abort_t ret;
1105 struct ksplice_mod_change *change;
1107 clear_debug_buf(update);
1108 ret = init_debug_buf(update);
1109 if (ret != OK)
1110 return ret;
1112 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1114 ret = patch_action(update, KS_REVERSE);
1115 if (ret != OK)
1116 return ret;
1118 list_for_each_entry(change, &update->changes, list)
1119 clear_list(&change->safety_records, struct safety_record, list);
1121 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
1122 update->kid);
1123 return OK;
1126 static int compare_symbolp_names(const void *a, const void *b)
1128 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1129 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1130 return 0;
1131 if ((*sympa)->name == NULL)
1132 return -1;
1133 if ((*sympb)->name == NULL)
1134 return 1;
1135 return strcmp((*sympa)->name, (*sympb)->name);
1138 static int compare_symbolp_labels(const void *a, const void *b)
1140 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1141 return strcmp((*sympa)->label, (*sympb)->label);
1144 static int symbolp_bsearch_compare(const void *key, const void *elt)
1146 const char *name = key;
1147 const struct ksplice_symbol *const *symp = elt;
1148 const struct ksplice_symbol *sym = *symp;
1149 if (sym->name == NULL)
1150 return 1;
1151 return strcmp(name, sym->name);
1154 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1155 const char *sym_name, unsigned long sym_val)
1157 struct ksplice_symbol **symp;
1158 abort_t ret;
1160 symp = bsearch(sym_name, lookup->arr, lookup->size,
1161 sizeof(*lookup->arr), symbolp_bsearch_compare);
1162 if (symp == NULL)
1163 return OK;
1165 while (symp > lookup->arr &&
1166 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1167 symp--;
1169 for (; symp < lookup->arr + lookup->size; symp++) {
1170 struct ksplice_symbol *sym = *symp;
1171 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1172 break;
1173 ret = add_candidate_val(lookup->change,
1174 sym->candidate_vals, sym_val);
1175 if (ret != OK)
1176 return ret;
1178 return OK;
1181 #ifdef CONFIG_KALLSYMS
1182 static int add_kallsyms_values(void *data, const char *name,
1183 struct module *owner, unsigned long val)
1185 struct ksplice_lookup *lookup = data;
1186 if (owner == lookup->change->new_code_mod ||
1187 !patches_module(owner, lookup->change->target))
1188 return (__force int)OK;
1189 return (__force int)add_matching_values(lookup, name, val);
1191 #endif /* CONFIG_KALLSYMS */
1193 static bool add_export_values(const struct symsearch *syms,
1194 struct module *owner,
1195 unsigned int symnum, void *data)
1197 struct ksplice_lookup *lookup = data;
1198 abort_t ret;
1200 ret = add_matching_values(lookup, syms->start[symnum].name,
1201 syms->start[symnum].value);
1202 if (ret != OK) {
1203 lookup->ret = ret;
1204 return true;
1206 return false;
1209 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1211 struct ksplice_symbol *sym;
1212 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1213 sym++) {
1214 if (sym->candidate_vals != NULL) {
1215 clear_list(sym->candidate_vals, struct candidate_val,
1216 list);
1217 kfree(sym->candidate_vals);
1218 sym->candidate_vals = NULL;
1221 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1222 sym++) {
1223 if (sym->candidate_vals != NULL) {
1224 clear_list(sym->candidate_vals, struct candidate_val,
1225 list);
1226 kfree(sym->candidate_vals);
1227 sym->candidate_vals = NULL;
1233 * The new_code and old_code modules each have their own independent
1234 * ksplice_symbol structures. uniquify_symbols unifies these separate
1235 * pieces of kernel symbol information by replacing all references to
1236 * the old_code copy of symbols with references to the new_code copy.
1238 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1240 struct ksplice_reloc *r;
1241 struct ksplice_section *s;
1242 struct ksplice_symbol *sym, **sym_arr, **symp;
1243 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1245 if (size == 0)
1246 return OK;
1248 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1249 if (sym_arr == NULL)
1250 return OUT_OF_MEMORY;
1252 for (symp = sym_arr, sym = change->new_code.symbols;
1253 symp < sym_arr + size && sym < change->new_code.symbols_end;
1254 sym++, symp++)
1255 *symp = sym;
1257 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1259 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1260 r++) {
1261 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1262 compare_symbolp_labels);
1263 if (symp != NULL) {
1264 if ((*symp)->name == NULL)
1265 (*symp)->name = r->symbol->name;
1266 r->symbol = *symp;
1270 for (s = change->old_code.sections; s < change->old_code.sections_end;
1271 s++) {
1272 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1273 compare_symbolp_labels);
1274 if (symp != NULL) {
1275 if ((*symp)->name == NULL)
1276 (*symp)->name = s->symbol->name;
1277 s->symbol = *symp;
1281 vfree(sym_arr);
1282 return OK;
1286 * Initialize the ksplice_symbol structures in the given array using
1287 * the kallsyms and exported symbol tables.
1289 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1290 struct ksplice_symbol *start,
1291 struct ksplice_symbol *end)
1293 struct ksplice_symbol *sym, **sym_arr, **symp;
1294 struct ksplice_lookup lookup;
1295 size_t size = end - start;
1296 abort_t ret;
1298 if (size == 0)
1299 return OK;
1301 for (sym = start; sym < end; sym++) {
1302 if (strstarts(sym->label, "__ksymtab")) {
1303 const struct kernel_symbol *ksym;
1304 const char *colon = strchr(sym->label, ':');
1305 const char *name = colon + 1;
1306 if (colon == NULL)
1307 continue;
1308 ksym = find_symbol(name, NULL, NULL, true, false);
1309 if (ksym == NULL) {
1310 ksdebug(change, "Could not find kernel_symbol "
1311 "structure for %s\n", name);
1312 continue;
1314 sym->value = (unsigned long)ksym;
1315 sym->candidate_vals = NULL;
1316 continue;
1319 sym->candidate_vals = kmalloc(sizeof(*sym->candidate_vals),
1320 GFP_KERNEL);
1321 if (sym->candidate_vals == NULL)
1322 return OUT_OF_MEMORY;
1323 INIT_LIST_HEAD(sym->candidate_vals);
1324 sym->value = 0;
1327 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1328 if (sym_arr == NULL)
1329 return OUT_OF_MEMORY;
1331 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1332 sym++, symp++)
1333 *symp = sym;
1335 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1337 lookup.change = change;
1338 lookup.arr = sym_arr;
1339 lookup.size = size;
1340 lookup.ret = OK;
1342 each_symbol(add_export_values, &lookup);
1343 ret = lookup.ret;
1344 #ifdef CONFIG_KALLSYMS
1345 if (ret == OK)
1346 ret = (__force abort_t)
1347 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1348 #endif /* CONFIG_KALLSYMS */
1349 vfree(sym_arr);
1350 return ret;
1353 /* Prepare the change's ksplice_symbol structures for run-pre matching */
1354 static abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1356 abort_t ret;
1358 ret = uniquify_symbols(change);
1359 if (ret != OK)
1360 return ret;
1362 ret = init_symbol_array(change, change->old_code.symbols,
1363 change->old_code.symbols_end);
1364 if (ret != OK)
1365 return ret;
1367 ret = init_symbol_array(change, change->new_code.symbols,
1368 change->new_code.symbols_end);
1369 if (ret != OK)
1370 return ret;
1372 return OK;
1375 static abort_t prepare_change(struct ksplice_mod_change *change)
1377 abort_t ret;
1379 ksdebug(change, "Preparing and checking %s\n", change->name);
1380 ret = match_change_sections(change, false);
1381 if (ret == NO_MATCH) {
1382 /* It is possible that by using relocations from .data sections
1383 * we can successfully run-pre match the rest of the sections.
1384 * To avoid using any symbols obtained from .data sections
1385 * (which may be unreliable) in the post code, we first prepare
1386 * the post code and then try to run-pre match the remaining
1387 * sections with the help of .data sections.
1389 ksdebug(change, "Continuing without some sections; we might "
1390 "find them later.\n");
1391 ret = finalize_change(change);
1392 if (ret != OK) {
1393 ksdebug(change, "Aborted. Unable to continue without "
1394 "the unmatched sections.\n");
1395 return ret;
1398 ksdebug(change, "run-pre: Considering .data sections to find "
1399 "the unmatched sections\n");
1400 ret = match_change_sections(change, true);
1401 if (ret != OK)
1402 return ret;
1404 ksdebug(change, "run-pre: Found all previously unmatched "
1405 "sections\n");
1406 return OK;
1407 } else if (ret != OK) {
1408 return ret;
1411 return finalize_change(change);
1415 * Finish preparing the change for insertion into the kernel.
1416 * Afterwards, the replacement code should be ready to run and the
1417 * ksplice_patches should all be ready for trampoline insertion.
1419 static abort_t finalize_change(struct ksplice_mod_change *change)
1421 abort_t ret;
1422 ret = apply_relocs(change, change->new_code.relocs,
1423 change->new_code.relocs_end);
1424 if (ret != OK)
1425 return ret;
1427 ret = finalize_patches(change);
1428 if (ret != OK)
1429 return ret;
1431 return OK;
1434 static abort_t finalize_patches(struct ksplice_mod_change *change)
1436 struct ksplice_patch *p;
1437 struct safety_record *rec;
1438 abort_t ret;
1440 for (p = change->patches; p < change->patches_end; p++) {
1441 bool found = false;
1442 list_for_each_entry(rec, &change->safety_records, list) {
1443 if (rec->addr <= p->oldaddr &&
1444 p->oldaddr < rec->addr + rec->size) {
1445 found = true;
1446 break;
1449 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1450 const struct ksplice_reloc *r = patch_reloc(change, p);
1451 if (r == NULL) {
1452 ksdebug(change, "A patch with no reloc at its "
1453 "oldaddr has no safety record\n");
1454 return NO_MATCH;
1456 ksdebug(change, "No safety record for patch with"
1457 "oldaddr %s+%lx\n", r->symbol->label,
1458 r->target_addend);
1459 return NO_MATCH;
1462 if (p->type == KSPLICE_PATCH_TEXT) {
1463 ret = prepare_trampoline(change, p);
1464 if (ret != OK)
1465 return ret;
1468 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1469 ksdebug(change, "Safety record %s is too short for "
1470 "patch\n", rec->label);
1471 return UNEXPECTED;
1474 if (p->type == KSPLICE_PATCH_TEXT) {
1475 if (p->repladdr == 0)
1476 p->repladdr = (unsigned long)ksplice_deleted;
1479 return OK;
1482 /* noinline to prevent garbage on the stack from confusing check_stack */
1483 static noinline abort_t map_trampoline_pages(struct update *update)
1485 struct ksplice_mod_change *change;
1486 list_for_each_entry(change, &update->changes, list) {
1487 struct ksplice_patch *p;
1488 for (p = change->patches; p < change->patches_end; p++) {
1489 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1490 if (p->vaddr == NULL) {
1491 ksdebug(change,
1492 "Unable to map oldaddr read/write\n");
1493 unmap_trampoline_pages(update);
1494 return UNEXPECTED;
1498 return OK;
1501 static void unmap_trampoline_pages(struct update *update)
1503 struct ksplice_mod_change *change;
1504 list_for_each_entry(change, &update->changes, list) {
1505 struct ksplice_patch *p;
1506 for (p = change->patches; p < change->patches_end; p++) {
1507 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1508 p->vaddr = NULL;
1514 * map_writable creates a shadow page mapping of the range
1515 * [addr, addr + len) so that we can write to code mapped read-only.
1517 * It is similar to a generalized version of x86's text_poke. But
1518 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1519 * map_writable to map the pages before stop_machine, then use the
1520 * mapping inside stop_machine, and unmap the pages afterwards.
1522 static void *map_writable(void *addr, size_t len)
1524 void *vaddr;
1525 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1526 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1527 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1528 int i;
1530 if (pages == NULL)
1531 return NULL;
1533 for (i = 0; i < nr_pages; i++) {
1534 if (__module_address((unsigned long)page_addr) == NULL) {
1535 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1536 pages[i] = virt_to_page(page_addr);
1537 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1538 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1539 pages[i] =
1540 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1541 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1542 WARN_ON(!PageReserved(pages[i]));
1543 } else {
1544 pages[i] = vmalloc_to_page(addr);
1546 if (pages[i] == NULL) {
1547 kfree(pages);
1548 return NULL;
1550 page_addr += PAGE_SIZE;
1552 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1553 kfree(pages);
1554 if (vaddr == NULL)
1555 return NULL;
1556 return vaddr + offset_in_page(addr);
1560 * Ksplice adds a dependency on any symbol address used to resolve
1561 * relocations in the new_code module.
1563 * Be careful to follow_trampolines so that we always depend on the
1564 * latest version of the target function, since that's the code that
1565 * will run if we call addr.
1567 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1568 unsigned long addr)
1570 struct ksplice_mod_change *c;
1571 struct module *m =
1572 __module_text_address(follow_trampolines(change, addr));
1573 if (m == NULL)
1574 return OK;
1575 list_for_each_entry(c, &change->update->changes, list) {
1576 if (m == c->new_code_mod)
1577 return OK;
1579 if (use_module(change->new_code_mod, m) != 1)
1580 return MODULE_BUSY;
1581 return OK;
1584 static abort_t apply_relocs(struct ksplice_mod_change *change,
1585 const struct ksplice_reloc *relocs,
1586 const struct ksplice_reloc *relocs_end)
1588 const struct ksplice_reloc *r;
1589 for (r = relocs; r < relocs_end; r++) {
1590 abort_t ret = apply_reloc(change, r);
1591 if (ret != OK)
1592 return ret;
1594 return OK;
1597 static abort_t apply_reloc(struct ksplice_mod_change *change,
1598 const struct ksplice_reloc *r)
1600 switch (r->howto->type) {
1601 case KSPLICE_HOWTO_RELOC:
1602 case KSPLICE_HOWTO_RELOC_PATCH:
1603 return apply_howto_reloc(change, r);
1604 case KSPLICE_HOWTO_DATE:
1605 case KSPLICE_HOWTO_TIME:
1606 return apply_howto_date(change, r);
1607 default:
1608 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1609 return UNEXPECTED;
1614 * Applies a relocation. Aborts if the symbol referenced in it has
1615 * not been uniquely resolved.
1617 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1618 const struct ksplice_reloc *r)
1620 abort_t ret;
1621 int canary_ret;
1622 unsigned long sym_addr;
1623 LIST_HEAD(vals);
1625 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1626 if (canary_ret < 0)
1627 return UNEXPECTED;
1628 if (canary_ret == 0) {
1629 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1630 r->blank_addr, r->symbol->label, r->target_addend);
1631 return OK;
1634 #ifdef KSPLICE_STANDALONE
1635 if (!bootstrapped) {
1636 ret = add_system_map_candidates(change,
1637 change->new_code.system_map,
1638 change->new_code.system_map_end,
1639 r->symbol->label, &vals);
1640 if (ret != OK) {
1641 release_vals(&vals);
1642 return ret;
1645 #endif /* KSPLICE_STANDALONE */
1646 ret = lookup_symbol(change, r->symbol, &vals);
1647 if (ret != OK) {
1648 release_vals(&vals);
1649 return ret;
1652 * Relocations for the oldaddr fields of patches must have
1653 * been resolved via run-pre matching.
1655 if (!singular(&vals) || (r->symbol->candidate_vals != NULL &&
1656 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1657 release_vals(&vals);
1658 ksdebug(change, "Failed to find %s for reloc\n",
1659 r->symbol->label);
1660 return FAILED_TO_FIND;
1662 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1663 release_vals(&vals);
1665 ret = write_reloc_value(change, r, r->blank_addr,
1666 r->howto->pcrel ? sym_addr - r->blank_addr :
1667 sym_addr);
1668 if (ret != OK)
1669 return ret;
1671 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1672 r->symbol->label, r->target_addend, sym_addr);
1673 switch (r->howto->size) {
1674 case 1:
1675 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1676 break;
1677 case 2:
1678 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1679 break;
1680 case 4:
1681 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1682 break;
1683 #if BITS_PER_LONG >= 64
1684 case 8:
1685 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1686 break;
1687 #endif /* BITS_PER_LONG */
1688 default:
1689 ksdebug(change, "Aborted. Invalid relocation size.\n");
1690 return UNEXPECTED;
1692 #ifdef KSPLICE_STANDALONE
1693 if (!bootstrapped)
1694 return OK;
1695 #endif /* KSPLICE_STANDALONE */
1698 * Create labelvals so that we can verify our choices in the
1699 * second round of run-pre matching that considers data sections.
1701 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1702 if (ret != OK)
1703 return ret;
1705 return add_dependency_on_address(change, sym_addr);
1709 * Date relocations are created wherever __DATE__ or __TIME__ is used
1710 * in the kernel; we resolve them by simply copying in the date/time
1711 * obtained from run-pre matching the relevant compilation unit.
1713 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1714 const struct ksplice_reloc *r)
1716 if (r->symbol->candidate_vals != NULL) {
1717 ksdebug(change, "Failed to find %s for date\n",
1718 r->symbol->label);
1719 return FAILED_TO_FIND;
1721 memcpy((unsigned char *)r->blank_addr,
1722 (const unsigned char *)r->symbol->value, r->howto->size);
1723 return OK;
1727 * Given a relocation and its run address, compute the address of the
1728 * symbol the relocation referenced, and store it in *valp.
1730 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1731 const struct ksplice_reloc *r,
1732 unsigned long addr, unsigned long *valp)
1734 unsigned char bytes[sizeof(long)];
1735 unsigned long val;
1736 const struct ksplice_reloc_howto *howto = r->howto;
1738 if (howto->size <= 0 || howto->size > sizeof(long)) {
1739 ksdebug(change, "Aborted. Invalid relocation size.\n");
1740 return UNEXPECTED;
1743 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1744 return NO_MATCH;
1746 switch (howto->size) {
1747 case 1:
1748 val = *(uint8_t *)bytes;
1749 break;
1750 case 2:
1751 val = *(uint16_t *)bytes;
1752 break;
1753 case 4:
1754 val = *(uint32_t *)bytes;
1755 break;
1756 #if BITS_PER_LONG >= 64
1757 case 8:
1758 val = *(uint64_t *)bytes;
1759 break;
1760 #endif /* BITS_PER_LONG */
1761 default:
1762 ksdebug(change, "Aborted. Invalid relocation size.\n");
1763 return UNEXPECTED;
1766 val &= howto->dst_mask;
1767 if (howto->signed_addend)
1768 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1769 val <<= howto->rightshift;
1770 val -= r->insn_addend + r->target_addend;
1771 *valp = val;
1772 return OK;
1776 * Given a relocation, the address of its storage unit, and the
1777 * address of the symbol the relocation references, write the
1778 * relocation's final value into the storage unit.
1780 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1781 const struct ksplice_reloc *r,
1782 unsigned long addr, unsigned long sym_addr)
1784 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1785 const struct ksplice_reloc_howto *howto = r->howto;
1786 val >>= howto->rightshift;
1787 switch (howto->size) {
1788 case 1:
1789 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1790 (val & howto->dst_mask);
1791 break;
1792 case 2:
1793 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1794 (val & howto->dst_mask);
1795 break;
1796 case 4:
1797 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1798 (val & howto->dst_mask);
1799 break;
1800 #if BITS_PER_LONG >= 64
1801 case 8:
1802 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1803 (val & howto->dst_mask);
1804 break;
1805 #endif /* BITS_PER_LONG */
1806 default:
1807 ksdebug(change, "Aborted. Invalid relocation size.\n");
1808 return UNEXPECTED;
1811 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1812 ksdebug(change, "Aborted. Relocation overflow.\n");
1813 return UNEXPECTED;
1816 return OK;
1819 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1820 bool to_be_applied)
1822 struct ksplice_module_list_entry *entry =
1823 kmalloc(sizeof(*entry), GFP_KERNEL);
1824 if (entry == NULL)
1825 return OUT_OF_MEMORY;
1826 entry->new_code_mod_name =
1827 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1828 if (entry->new_code_mod_name == NULL) {
1829 kfree(entry);
1830 return OUT_OF_MEMORY;
1832 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1833 if (entry->target_mod_name == NULL) {
1834 kfree(entry->new_code_mod_name);
1835 kfree(entry);
1836 return OUT_OF_MEMORY;
1838 /* The update's kid is guaranteed to outlast the module_list_entry */
1839 entry->kid = change->update->kid;
1840 entry->applied = to_be_applied;
1841 list_add(&entry->update_list, &change->update->ksplice_module_list);
1842 return OK;
1845 static void cleanup_module_list_entries(struct update *update)
1847 struct ksplice_module_list_entry *entry;
1848 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1849 kfree(entry->target_mod_name);
1850 kfree(entry->new_code_mod_name);
1852 clear_list(&update->ksplice_module_list,
1853 struct ksplice_module_list_entry, update_list);
1856 /* Replacement address used for functions deleted by the patch */
1857 static void __attribute__((noreturn)) ksplice_deleted(void)
1859 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1860 BUG();
1861 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1862 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1863 for (;;);
1864 #endif
1867 /* Floodfill to run-pre match the sections within a change. */
1868 static abort_t match_change_sections(struct ksplice_mod_change *change,
1869 bool consider_data_sections)
1871 struct ksplice_section *sect;
1872 abort_t ret;
1873 int remaining = 0;
1874 bool progress;
1876 for (sect = change->old_code.sections;
1877 sect < change->old_code.sections_end; sect++) {
1878 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1879 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1880 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1881 remaining++;
1884 while (remaining > 0) {
1885 progress = false;
1886 for (sect = change->old_code.sections;
1887 sect < change->old_code.sections_end; sect++) {
1888 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1889 continue;
1890 if ((!consider_data_sections &&
1891 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1892 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1893 continue;
1894 ret = find_section(change, sect);
1895 if (ret == OK) {
1896 sect->flags |= KSPLICE_SECTION_MATCHED;
1897 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1898 remaining--;
1899 progress = true;
1900 } else if (ret != NO_MATCH) {
1901 return ret;
1905 if (progress)
1906 continue;
1908 for (sect = change->old_code.sections;
1909 sect < change->old_code.sections_end; sect++) {
1910 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1911 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1912 continue;
1913 ksdebug(change, "run-pre: could not match %s "
1914 "section %s\n",
1915 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1916 "data" :
1917 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1918 "rodata" : "text", sect->symbol->label);
1920 ksdebug(change, "Aborted. run-pre: could not match some "
1921 "sections.\n");
1922 return NO_MATCH;
1924 return OK;
1928 * Search for the section in the running kernel. Returns OK if and
1929 * only if it finds precisely one address in the kernel matching the
1930 * section.
1932 static abort_t find_section(struct ksplice_mod_change *change,
1933 struct ksplice_section *sect)
1935 int i;
1936 abort_t ret;
1937 unsigned long run_addr;
1938 LIST_HEAD(vals);
1939 struct candidate_val *v, *n;
1941 #ifdef KSPLICE_STANDALONE
1942 ret = add_system_map_candidates(change, change->old_code.system_map,
1943 change->old_code.system_map_end,
1944 sect->symbol->label, &vals);
1945 if (ret != OK) {
1946 release_vals(&vals);
1947 return ret;
1949 #endif /* KSPLICE_STANDALONE */
1950 ret = lookup_symbol(change, sect->symbol, &vals);
1951 if (ret != OK) {
1952 release_vals(&vals);
1953 return ret;
1956 ksdebug(change, "run-pre: starting sect search for %s\n",
1957 sect->symbol->label);
1959 list_for_each_entry_safe(v, n, &vals, list) {
1960 run_addr = v->val;
1962 yield();
1963 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
1964 if (ret == NO_MATCH) {
1965 list_del(&v->list);
1966 kfree(v);
1967 } else if (ret != OK) {
1968 release_vals(&vals);
1969 return ret;
1973 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1974 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1975 ret = brute_search_all(change, sect, &vals);
1976 if (ret != OK) {
1977 release_vals(&vals);
1978 return ret;
1981 * Make sure run-pre matching output is displayed if
1982 * brute_search succeeds.
1984 if (singular(&vals)) {
1985 run_addr = list_entry(vals.next, struct candidate_val,
1986 list)->val;
1987 ret = try_addr(change, sect, run_addr, NULL,
1988 RUN_PRE_INITIAL);
1989 if (ret != OK) {
1990 ksdebug(change, "run-pre: Debug run failed for "
1991 "sect %s:\n", sect->symbol->label);
1992 release_vals(&vals);
1993 return ret;
1997 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1999 if (singular(&vals)) {
2000 LIST_HEAD(safety_records);
2001 run_addr = list_entry(vals.next, struct candidate_val,
2002 list)->val;
2003 ret = try_addr(change, sect, run_addr, &safety_records,
2004 RUN_PRE_FINAL);
2005 release_vals(&vals);
2006 if (ret != OK) {
2007 clear_list(&safety_records, struct safety_record, list);
2008 ksdebug(change, "run-pre: Final run failed for sect "
2009 "%s:\n", sect->symbol->label);
2010 } else {
2011 list_splice(&safety_records, &change->safety_records);
2013 return ret;
2014 } else if (!list_empty(&vals)) {
2015 struct candidate_val *val;
2016 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
2017 sect->symbol->label);
2018 i = 0;
2019 list_for_each_entry(val, &vals, list) {
2020 i++;
2021 ksdebug(change, "%lx\n", val->val);
2022 if (i > 5) {
2023 ksdebug(change, "...\n");
2024 break;
2027 release_vals(&vals);
2028 return NO_MATCH;
2030 release_vals(&vals);
2031 return NO_MATCH;
2035 * try_addr is the the interface to run-pre matching. Its primary
2036 * purpose is to manage debugging information for run-pre matching;
2037 * all the hard work is in run_pre_cmp.
2039 static abort_t try_addr(struct ksplice_mod_change *change,
2040 struct ksplice_section *sect,
2041 unsigned long run_addr,
2042 struct list_head *safety_records,
2043 enum run_pre_mode mode)
2045 abort_t ret;
2046 const struct module *run_module = __module_address(run_addr);
2048 if (run_module == change->new_code_mod) {
2049 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2050 "module %s for sect %s\n", run_addr, run_module->name,
2051 sect->symbol->label);
2052 return UNEXPECTED;
2054 if (!patches_module(run_module, change->target)) {
2055 ksdebug(change, "run-pre: ignoring address %lx in other module "
2056 "%s for sect %s\n", run_addr, run_module == NULL ?
2057 "vmlinux" : run_module->name, sect->symbol->label);
2058 return NO_MATCH;
2061 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2062 if (ret != OK)
2063 return ret;
2065 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2066 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2067 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2068 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2069 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2070 mode);
2071 else
2072 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2073 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2074 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2075 set_temp_labelvals(change, NOVAL);
2076 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2077 "p_a=%lx s=%lx)\n",
2078 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2079 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2080 "text", sect->symbol->label, run_addr, sect->address,
2081 sect->size);
2082 ksdebug(change, "run-pre: ");
2083 if (change->update->debug >= 1) {
2084 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2085 ret = run_pre_cmp(change, sect, run_addr,
2086 safety_records, RUN_PRE_DEBUG);
2087 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2088 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2089 ret = arch_run_pre_cmp(change, sect, run_addr,
2090 safety_records,
2091 RUN_PRE_DEBUG);
2092 else
2093 ret = run_pre_cmp(change, sect, run_addr,
2094 safety_records,
2095 RUN_PRE_DEBUG);
2096 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2097 set_temp_labelvals(change, NOVAL);
2099 ksdebug(change, "\n");
2100 return ret;
2101 } else if (ret != OK) {
2102 set_temp_labelvals(change, NOVAL);
2103 return ret;
2106 if (mode != RUN_PRE_FINAL) {
2107 set_temp_labelvals(change, NOVAL);
2108 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2109 sect->symbol->label, run_addr);
2110 return OK;
2113 set_temp_labelvals(change, VAL);
2114 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2115 run_addr);
2116 return OK;
2120 * run_pre_cmp is the primary run-pre matching function; it determines
2121 * whether the given ksplice_section matches the code or data in the
2122 * running kernel starting at run_addr.
2124 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2125 * section is created.
2127 * The run_pre_mode is also used to determine what debugging
2128 * information to display.
2130 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2131 const struct ksplice_section *sect,
2132 unsigned long run_addr,
2133 struct list_head *safety_records,
2134 enum run_pre_mode mode)
2136 int matched = 0;
2137 abort_t ret;
2138 const struct ksplice_reloc *r, *finger;
2139 const unsigned char *pre, *run, *pre_start, *run_start;
2140 unsigned char runval;
2142 pre_start = (const unsigned char *)sect->address;
2143 run_start = (const unsigned char *)run_addr;
2145 finger = init_reloc_search(change, sect);
2147 pre = pre_start;
2148 run = run_start;
2149 while (pre < pre_start + sect->size) {
2150 unsigned long offset = pre - pre_start;
2151 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2152 if (ret == OK) {
2153 ret = handle_reloc(change, sect, r, (unsigned long)run,
2154 mode);
2155 if (ret != OK) {
2156 if (mode == RUN_PRE_INITIAL)
2157 ksdebug(change, "reloc in sect does "
2158 "not match after %lx/%lx "
2159 "bytes\n", offset, sect->size);
2160 return ret;
2162 if (mode == RUN_PRE_DEBUG)
2163 print_bytes(change, run, r->howto->size, pre,
2164 r->howto->size);
2165 pre += r->howto->size;
2166 run += r->howto->size;
2167 finger++;
2168 continue;
2169 } else if (ret != NO_MATCH) {
2170 return ret;
2173 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2174 ret = handle_paravirt(change, (unsigned long)pre,
2175 (unsigned long)run, &matched);
2176 if (ret != OK)
2177 return ret;
2178 if (matched != 0) {
2179 if (mode == RUN_PRE_DEBUG)
2180 print_bytes(change, run, matched, pre,
2181 matched);
2182 pre += matched;
2183 run += matched;
2184 continue;
2188 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2189 if (mode == RUN_PRE_INITIAL)
2190 ksdebug(change, "sect unmapped after %lx/%lx "
2191 "bytes\n", offset, sect->size);
2192 return NO_MATCH;
2195 if (runval != *pre &&
2196 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2197 if (mode == RUN_PRE_INITIAL)
2198 ksdebug(change, "sect does not match after "
2199 "%lx/%lx bytes\n", offset, sect->size);
2200 if (mode == RUN_PRE_DEBUG) {
2201 print_bytes(change, run, 1, pre, 1);
2202 ksdebug(change, "[p_o=%lx] ! ", offset);
2203 print_bytes(change, run + 1, 2, pre + 1, 2);
2205 return NO_MATCH;
2207 if (mode == RUN_PRE_DEBUG)
2208 print_bytes(change, run, 1, pre, 1);
2209 pre++;
2210 run++;
2212 return create_safety_record(change, sect, safety_records, run_addr,
2213 run - run_start);
2216 static void print_bytes(struct ksplice_mod_change *change,
2217 const unsigned char *run, int runc,
2218 const unsigned char *pre, int prec)
2220 int o;
2221 int matched = min(runc, prec);
2222 for (o = 0; o < matched; o++) {
2223 if (run[o] == pre[o])
2224 ksdebug(change, "%02x ", run[o]);
2225 else
2226 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2228 for (o = matched; o < runc; o++)
2229 ksdebug(change, "%02x/ ", run[o]);
2230 for (o = matched; o < prec; o++)
2231 ksdebug(change, "/%02x ", pre[o]);
2234 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2235 static abort_t brute_search(struct ksplice_mod_change *change,
2236 struct ksplice_section *sect,
2237 const void *start, unsigned long len,
2238 struct list_head *vals)
2240 unsigned long addr;
2241 char run, pre;
2242 abort_t ret;
2244 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2245 addr++) {
2246 if (addr % 100000 == 0)
2247 yield();
2249 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2250 return OK;
2252 pre = *(const unsigned char *)(sect->address);
2254 if (run != pre)
2255 continue;
2257 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2258 if (ret == OK) {
2259 ret = add_candidate_val(change, vals, addr);
2260 if (ret != OK)
2261 return ret;
2262 } else if (ret != NO_MATCH) {
2263 return ret;
2267 return OK;
2270 static abort_t brute_search_all(struct ksplice_mod_change *change,
2271 struct ksplice_section *sect,
2272 struct list_head *vals)
2274 struct module *m;
2275 abort_t ret = OK;
2276 int saved_debug;
2278 ksdebug(change, "brute_search: searching for %s\n",
2279 sect->symbol->label);
2280 saved_debug = change->update->debug;
2281 change->update->debug = 0;
2283 list_for_each_entry(m, &modules, list) {
2284 if (!patches_module(m, change->target) ||
2285 m == change->new_code_mod)
2286 continue;
2287 ret = brute_search(change, sect, m->module_core, m->core_size,
2288 vals);
2289 if (ret != OK)
2290 goto out;
2291 ret = brute_search(change, sect, m->module_init, m->init_size,
2292 vals);
2293 if (ret != OK)
2294 goto out;
2297 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2298 init_mm.end_code - init_mm.start_code, vals);
2300 out:
2301 change->update->debug = saved_debug;
2302 return ret;
2304 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2306 struct range {
2307 unsigned long address;
2308 unsigned long size;
2311 static int reloc_bsearch_compare(const void *key, const void *elt)
2313 const struct range *range = key;
2314 const struct ksplice_reloc *r = elt;
2315 if (range->address + range->size <= r->blank_addr)
2316 return -1;
2317 if (range->address > r->blank_addr)
2318 return 1;
2319 return 0;
2322 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2323 const struct ksplice_reloc *end,
2324 unsigned long address,
2325 unsigned long size)
2327 const struct ksplice_reloc *r;
2328 struct range range = { address, size };
2329 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2330 reloc_bsearch_compare);
2331 if (r == NULL)
2332 return NULL;
2333 while (r > start && (r - 1)->blank_addr >= address)
2334 r--;
2335 return r;
2338 static const struct ksplice_reloc *
2339 init_reloc_search(struct ksplice_mod_change *change,
2340 const struct ksplice_section *sect)
2342 const struct ksplice_reloc *r;
2343 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2344 sect->address, sect->size);
2345 if (r == NULL)
2346 return change->old_code.relocs_end;
2347 return r;
2351 * lookup_reloc implements an amortized O(1) lookup for the next
2352 * old_code relocation. It must be called with a strictly increasing
2353 * sequence of addresses.
2355 * The fingerp is private data for lookup_reloc, and needs to have
2356 * been initialized as a pointer to the result of find_reloc (or
2357 * init_reloc_search).
2359 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2360 const struct ksplice_reloc **fingerp,
2361 unsigned long addr,
2362 const struct ksplice_reloc **relocp)
2364 const struct ksplice_reloc *r = *fingerp;
2365 int canary_ret;
2367 while (r < change->old_code.relocs_end &&
2368 addr >= r->blank_addr + r->howto->size &&
2369 !(addr == r->blank_addr && r->howto->size == 0))
2370 r++;
2371 *fingerp = r;
2372 if (r == change->old_code.relocs_end)
2373 return NO_MATCH;
2374 if (addr < r->blank_addr)
2375 return NO_MATCH;
2376 *relocp = r;
2377 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2378 return OK;
2380 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2381 if (canary_ret < 0)
2382 return UNEXPECTED;
2383 if (canary_ret == 0) {
2384 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2385 "(altinstr)\n", r->blank_addr, r->symbol->label,
2386 r->target_addend);
2387 return NO_MATCH;
2389 if (addr != r->blank_addr) {
2390 ksdebug(change, "Invalid nonzero relocation offset\n");
2391 return UNEXPECTED;
2393 return OK;
2396 static abort_t handle_reloc(struct ksplice_mod_change *change,
2397 const struct ksplice_section *sect,
2398 const struct ksplice_reloc *r,
2399 unsigned long run_addr, enum run_pre_mode mode)
2401 switch (r->howto->type) {
2402 case KSPLICE_HOWTO_RELOC:
2403 return handle_howto_reloc(change, sect, r, run_addr, mode);
2404 case KSPLICE_HOWTO_DATE:
2405 case KSPLICE_HOWTO_TIME:
2406 return handle_howto_date(change, sect, r, run_addr, mode);
2407 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2408 #ifdef CONFIG_BUG
2409 case KSPLICE_HOWTO_BUG:
2410 return handle_bug(change, r, run_addr);
2411 #endif /* CONFIG_BUG */
2412 #else /* LINUX_VERSION_CODE < */
2413 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2414 #endif /* LINUX_VERSION_CODE */
2415 case KSPLICE_HOWTO_EXTABLE:
2416 return handle_extable(change, r, run_addr);
2417 default:
2418 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2419 return UNEXPECTED;
2424 * For date/time relocations, we check that the sequence of bytes
2425 * matches the format of a date or time.
2427 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2428 const struct ksplice_section *sect,
2429 const struct ksplice_reloc *r,
2430 unsigned long run_addr, enum run_pre_mode mode)
2432 abort_t ret;
2433 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2435 if (buf == NULL)
2436 return OUT_OF_MEMORY;
2437 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2438 ret = NO_MATCH;
2439 goto out;
2442 switch (r->howto->type) {
2443 case KSPLICE_HOWTO_TIME:
2444 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2445 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2446 isdigit(buf[6]) && isdigit(buf[7]))
2447 ret = OK;
2448 else
2449 ret = NO_MATCH;
2450 break;
2451 case KSPLICE_HOWTO_DATE:
2452 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2453 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2454 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2455 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2456 ret = OK;
2457 else
2458 ret = NO_MATCH;
2459 break;
2460 default:
2461 ret = UNEXPECTED;
2463 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2464 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2465 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2466 r->howto->size, buf);
2468 if (ret != OK)
2469 goto out;
2470 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2471 out:
2472 kfree(buf);
2473 return ret;
2477 * Extract the value of a symbol used in a relocation in the pre code
2478 * during run-pre matching, giving an error if it conflicts with a
2479 * previously found value of that symbol
2481 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2482 const struct ksplice_section *sect,
2483 const struct ksplice_reloc *r,
2484 unsigned long run_addr,
2485 enum run_pre_mode mode)
2487 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2488 unsigned long offset = r->target_addend;
2489 unsigned long val;
2490 abort_t ret;
2492 ret = read_reloc_value(change, r, run_addr, &val);
2493 if (ret != OK)
2494 return ret;
2495 if (r->howto->pcrel)
2496 val += run_addr;
2498 #ifdef KSPLICE_STANDALONE
2499 /* The match_map is only used in KSPLICE_STANDALONE */
2500 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2502 } else if (offset < 0 || offset >= sym_sect->size) {
2503 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2504 sect->symbol->label, r->blank_addr - sect->address,
2505 r->symbol->label, offset);
2506 return NO_MATCH;
2507 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2508 sym_sect->match_map[offset] =
2509 (const unsigned char *)r->symbol->value + offset;
2510 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2511 r->symbol->value + offset) {
2513 } else if (sect == sym_sect) {
2514 ksdebug(change, "Relocations to nonmatching locations within "
2515 "section %s: %lx does not match %lx\n",
2516 sect->symbol->label, offset,
2517 (unsigned long)sect->match_map[offset] -
2518 r->symbol->value);
2519 return NO_MATCH;
2520 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2521 if (mode == RUN_PRE_INITIAL)
2522 ksdebug(change, "Delaying matching of %s due to reloc "
2523 "from to unmatching section: %s+%lx\n",
2524 sect->symbol->label, r->symbol->label, offset);
2525 return NO_MATCH;
2526 } else if (sym_sect->match_map[offset] == NULL) {
2527 if (mode == RUN_PRE_INITIAL)
2528 ksdebug(change, "Relocation not to instruction "
2529 "boundary: %s+%lx -> %s+%lx",
2530 sect->symbol->label, r->blank_addr -
2531 sect->address, r->symbol->label, offset);
2532 return NO_MATCH;
2533 } else if ((unsigned long)sym_sect->match_map[offset] !=
2534 r->symbol->value + offset) {
2535 if (mode == RUN_PRE_INITIAL)
2536 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2537 r->symbol->label, offset,
2538 r->symbol->value + offset,
2539 (unsigned long)sym_sect->match_map[offset]);
2540 val += r->symbol->value + offset -
2541 (unsigned long)sym_sect->match_map[offset];
2543 #endif /* KSPLICE_STANDALONE */
2545 if (mode == RUN_PRE_INITIAL)
2546 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2547 "found %s = %lx\n", run_addr, r->blank_addr,
2548 r->symbol->label, offset, r->symbol->label, val);
2550 if (contains_canary(change, run_addr, r->howto) != 0) {
2551 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2552 "\n", run_addr);
2553 return UNEXPECTED;
2556 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2557 sect->symbol == r->symbol)
2558 return OK;
2559 ret = create_labelval(change, r->symbol, val, TEMP);
2560 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2561 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2562 "%s = %lx does not match expected %lx\n", run_addr,
2563 r->blank_addr, r->symbol->label, r->symbol->value, val);
2565 if (ret != OK)
2566 return ret;
2567 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2568 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2569 if (mode == RUN_PRE_INITIAL)
2570 ksdebug(change, "Recursively comparing string section "
2571 "%s\n", sym_sect->symbol->label);
2572 else if (mode == RUN_PRE_DEBUG)
2573 ksdebug(change, "[str start] ");
2574 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2575 if (mode == RUN_PRE_DEBUG)
2576 ksdebug(change, "[str end] ");
2577 if (ret == OK && mode == RUN_PRE_INITIAL)
2578 ksdebug(change, "Successfully matched string section %s"
2579 "\n", sym_sect->symbol->label);
2580 else if (mode == RUN_PRE_INITIAL)
2581 ksdebug(change, "Failed to match string section %s\n",
2582 sym_sect->symbol->label);
2584 return ret;
2587 #ifdef CONFIG_GENERIC_BUG
2588 static abort_t handle_bug(struct ksplice_mod_change *change,
2589 const struct ksplice_reloc *r, unsigned long run_addr)
2591 const struct bug_entry *run_bug = find_bug(run_addr);
2592 struct ksplice_section *bug_sect = symbol_section(change, r->symbol);
2593 if (run_bug == NULL)
2594 return NO_MATCH;
2595 if (bug_sect == NULL)
2596 return UNEXPECTED;
2597 return create_labelval(change, bug_sect->symbol, (unsigned long)run_bug,
2598 TEMP);
2600 #endif /* CONFIG_GENERIC_BUG */
2602 static abort_t handle_extable(struct ksplice_mod_change *change,
2603 const struct ksplice_reloc *r,
2604 unsigned long run_addr)
2606 const struct exception_table_entry *run_ent =
2607 search_exception_tables(run_addr);
2608 struct ksplice_section *ex_sect = symbol_section(change, r->symbol);
2609 if (run_ent == NULL)
2610 return NO_MATCH;
2611 if (ex_sect == NULL)
2612 return UNEXPECTED;
2613 return create_labelval(change, ex_sect->symbol, (unsigned long)run_ent,
2614 TEMP);
2617 static int symbol_section_bsearch_compare(const void *a, const void *b)
2619 const struct ksplice_symbol *sym = a;
2620 const struct ksplice_section *sect = b;
2621 return strcmp(sym->label, sect->symbol->label);
2624 static int compare_section_labels(const void *va, const void *vb)
2626 const struct ksplice_section *a = va, *b = vb;
2627 return strcmp(a->symbol->label, b->symbol->label);
2630 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2631 const struct ksplice_symbol *sym)
2633 return bsearch(sym, change->old_code.sections,
2634 change->old_code.sections_end -
2635 change->old_code.sections,
2636 sizeof(struct ksplice_section),
2637 symbol_section_bsearch_compare);
2640 /* Find the relocation for the oldaddr of a ksplice_patch */
2641 static const struct ksplice_reloc *
2642 patch_reloc(struct ksplice_mod_change *change,
2643 const struct ksplice_patch *p)
2645 unsigned long addr = (unsigned long)&p->oldaddr;
2646 const struct ksplice_reloc *r =
2647 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2648 addr, sizeof(addr));
2649 if (r == NULL || r->blank_addr < addr ||
2650 r->blank_addr >= addr + sizeof(addr))
2651 return NULL;
2652 return r;
2656 * Populates vals with the possible values for ksym from the various
2657 * sources Ksplice uses to resolve symbols
2659 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2660 const struct ksplice_symbol *ksym,
2661 struct list_head *vals)
2663 abort_t ret;
2665 #ifdef KSPLICE_STANDALONE
2666 if (!bootstrapped)
2667 return OK;
2668 #endif /* KSPLICE_STANDALONE */
2670 if (ksym->candidate_vals == NULL) {
2671 release_vals(vals);
2672 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2673 ksym->value);
2674 return add_candidate_val(change, vals, ksym->value);
2677 #ifdef CONFIG_MODULE_UNLOAD
2678 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2679 && change->target->exit != NULL) {
2680 ret = add_candidate_val(change, vals,
2681 (unsigned long)change->target->exit);
2682 if (ret != OK)
2683 return ret;
2685 #endif
2687 if (ksym->name != NULL) {
2688 struct candidate_val *val;
2689 list_for_each_entry(val, ksym->candidate_vals, list) {
2690 ret = add_candidate_val(change, vals, val->val);
2691 if (ret != OK)
2692 return ret;
2695 ret = new_export_lookup(change, ksym->name, vals);
2696 if (ret != OK)
2697 return ret;
2700 return OK;
2703 #ifdef KSPLICE_STANDALONE
2704 static abort_t
2705 add_system_map_candidates(struct ksplice_mod_change *change,
2706 const struct ksplice_system_map *start,
2707 const struct ksplice_system_map *end,
2708 const char *label, struct list_head *vals)
2710 abort_t ret;
2711 long off;
2712 int i;
2713 const struct ksplice_system_map *smap;
2715 /* Some Fedora kernel releases have System.map files whose symbol
2716 * addresses disagree with the running kernel by a constant address
2717 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2718 * values used to compile these kernels. This constant address offset
2719 * is always a multiple of 0x100000.
2721 * If we observe an offset that is NOT a multiple of 0x100000, then the
2722 * user provided us with an incorrect System.map file, and we should
2723 * abort.
2724 * If we observe an offset that is a multiple of 0x100000, then we can
2725 * adjust the System.map address values accordingly and proceed.
2727 off = (unsigned long)printk - change->map_printk;
2728 if (off & 0xfffff) {
2729 ksdebug(change,
2730 "Aborted. System.map does not match kernel.\n");
2731 return BAD_SYSTEM_MAP;
2734 smap = bsearch(label, start, end - start, sizeof(*smap),
2735 system_map_bsearch_compare);
2736 if (smap == NULL)
2737 return OK;
2739 for (i = 0; i < smap->nr_candidates; i++) {
2740 ret = add_candidate_val(change, vals,
2741 smap->candidates[i] + off);
2742 if (ret != OK)
2743 return ret;
2745 return OK;
2748 static int system_map_bsearch_compare(const void *key, const void *elt)
2750 const struct ksplice_system_map *map = elt;
2751 const char *label = key;
2752 return strcmp(label, map->label);
2754 #endif /* !KSPLICE_STANDALONE */
2757 * An update could one module to export a symbol and at the same time
2758 * change another module to use that symbol. This violates the normal
2759 * situation where the changes can be handled independently.
2761 * new_export_lookup obtains symbol values from the changes to the
2762 * exported symbol table made by other changes.
2764 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2765 const char *name, struct list_head *vals)
2767 struct ksplice_mod_change *change;
2768 struct ksplice_patch *p;
2769 list_for_each_entry(change, &ichange->update->changes, list) {
2770 for (p = change->patches; p < change->patches_end; p++) {
2771 const struct kernel_symbol *sym;
2772 const struct ksplice_reloc *r;
2773 if (p->type != KSPLICE_PATCH_EXPORT ||
2774 strcmp(name, *(const char **)p->contents) != 0)
2775 continue;
2777 /* Check that the p->oldaddr reloc has been resolved. */
2778 r = patch_reloc(change, p);
2779 if (r == NULL ||
2780 contains_canary(change, r->blank_addr,
2781 r->howto) != 0)
2782 continue;
2783 sym = (const struct kernel_symbol *)r->symbol->value;
2786 * Check that the sym->value reloc has been resolved,
2787 * if there is a Ksplice relocation there.
2789 r = find_reloc(change->new_code.relocs,
2790 change->new_code.relocs_end,
2791 (unsigned long)&sym->value,
2792 sizeof(&sym->value));
2793 if (r != NULL &&
2794 r->blank_addr == (unsigned long)&sym->value &&
2795 contains_canary(change, r->blank_addr,
2796 r->howto) != 0)
2797 continue;
2798 return add_candidate_val(ichange, vals, sym->value);
2801 return OK;
2805 * When patch_action is called, the update should be fully prepared.
2806 * patch_action will try to actually insert or remove trampolines for
2807 * the update.
2809 static abort_t patch_action(struct update *update, enum ksplice_action action)
2811 static int (*const __patch_actions[KS_ACTIONS])(void *) = {
2812 [KS_APPLY] = __apply_patches,
2813 [KS_REVERSE] = __reverse_patches,
2815 int i;
2816 abort_t ret;
2817 struct ksplice_mod_change *change;
2819 ret = map_trampoline_pages(update);
2820 if (ret != OK)
2821 return ret;
2823 list_for_each_entry(change, &update->changes, list) {
2824 const typeof(int (*)(void)) *f;
2825 for (f = change->hooks[action].pre;
2826 f < change->hooks[action].pre_end; f++) {
2827 if ((*f)() != 0) {
2828 ret = CALL_FAILED;
2829 goto out;
2834 for (i = 0; i < 5; i++) {
2835 cleanup_conflicts(update);
2836 #ifdef KSPLICE_STANDALONE
2837 bust_spinlocks(1);
2838 #endif /* KSPLICE_STANDALONE */
2839 ret = (__force abort_t)stop_machine(__patch_actions[action],
2840 update, NULL);
2841 #ifdef KSPLICE_STANDALONE
2842 bust_spinlocks(0);
2843 #endif /* KSPLICE_STANDALONE */
2844 if (ret != CODE_BUSY)
2845 break;
2846 set_current_state(TASK_INTERRUPTIBLE);
2847 schedule_timeout(msecs_to_jiffies(1000));
2849 out:
2850 unmap_trampoline_pages(update);
2852 if (ret == CODE_BUSY) {
2853 print_conflicts(update);
2854 _ksdebug(update, "Aborted %s. stack check: to-be-%s "
2855 "code is busy.\n", update->kid,
2856 action == KS_APPLY ? "replaced" : "reversed");
2857 } else if (ret == ALREADY_REVERSED) {
2858 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2859 "reversed.\n", update->kid, update->kid);
2860 } else if (ret == MODULE_BUSY) {
2861 _ksdebug(update, "Update %s is in use by another module\n",
2862 update->kid);
2865 if (ret != OK) {
2866 list_for_each_entry(change, &update->changes, list) {
2867 const typeof(void (*)(void)) *f;
2868 for (f = change->hooks[action].fail;
2869 f < change->hooks[action].fail_end; f++)
2870 (*f)();
2873 return ret;
2876 list_for_each_entry(change, &update->changes, list) {
2877 const typeof(void (*)(void)) *f;
2878 for (f = change->hooks[action].post;
2879 f < change->hooks[action].post_end; f++)
2880 (*f)();
2883 _ksdebug(update, "Atomic patch %s for %s complete\n",
2884 action == KS_APPLY ? "insertion" : "removal", update->kid);
2885 return OK;
2888 /* Atomically insert the update; run from within stop_machine */
2889 static int __apply_patches(void *updateptr)
2891 struct update *update = updateptr;
2892 struct ksplice_mod_change *change;
2893 struct ksplice_module_list_entry *entry;
2894 struct ksplice_patch *p;
2895 abort_t ret;
2897 if (update->stage == STAGE_APPLIED)
2898 return (__force int)OK;
2900 if (update->stage != STAGE_PREPARING)
2901 return (__force int)UNEXPECTED;
2903 ret = check_each_task(update);
2904 if (ret != OK)
2905 return (__force int)ret;
2907 list_for_each_entry(change, &update->changes, list) {
2908 if (try_module_get(change->new_code_mod) != 1) {
2909 struct ksplice_mod_change *change1;
2910 list_for_each_entry(change1, &update->changes, list) {
2911 if (change1 == change)
2912 break;
2913 module_put(change1->new_code_mod);
2915 module_put(THIS_MODULE);
2916 return (__force int)UNEXPECTED;
2920 list_for_each_entry(change, &update->changes, list) {
2921 const typeof(int (*)(void)) *f;
2922 for (f = change->hooks[KS_APPLY].check;
2923 f < change->hooks[KS_APPLY].check_end; f++) {
2924 if ((*f)() != 0)
2925 return (__force int)CALL_FAILED;
2929 /* Commit point: the update application will succeed. */
2931 update->stage = STAGE_APPLIED;
2932 #ifdef TAINT_KSPLICE
2933 add_taint(TAINT_KSPLICE);
2934 #endif
2936 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2937 list_add(&entry->list, &ksplice_modules);
2939 list_for_each_entry(change, &update->changes, list) {
2940 for (p = change->patches; p < change->patches_end; p++)
2941 insert_trampoline(p);
2944 list_for_each_entry(change, &update->changes, list) {
2945 const typeof(void (*)(void)) *f;
2946 for (f = change->hooks[KS_APPLY].intra;
2947 f < change->hooks[KS_APPLY].intra_end; f++)
2948 (*f)();
2951 return (__force int)OK;
2954 /* Atomically remove the update; run from within stop_machine */
2955 static int __reverse_patches(void *updateptr)
2957 struct update *update = updateptr;
2958 struct ksplice_mod_change *change;
2959 struct ksplice_module_list_entry *entry;
2960 const struct ksplice_patch *p;
2961 abort_t ret;
2963 if (update->stage != STAGE_APPLIED)
2964 return (__force int)OK;
2966 #ifdef CONFIG_MODULE_UNLOAD
2967 list_for_each_entry(change, &update->changes, list) {
2968 if (module_refcount(change->new_code_mod) != 1)
2969 return (__force int)MODULE_BUSY;
2971 #endif /* CONFIG_MODULE_UNLOAD */
2973 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2974 if (!entry->applied &&
2975 find_module(entry->target_mod_name) != NULL)
2976 return COLD_UPDATE_LOADED;
2979 ret = check_each_task(update);
2980 if (ret != OK)
2981 return (__force int)ret;
2983 list_for_each_entry(change, &update->changes, list) {
2984 for (p = change->patches; p < change->patches_end; p++) {
2985 ret = verify_trampoline(change, p);
2986 if (ret != OK)
2987 return (__force int)ret;
2991 list_for_each_entry(change, &update->changes, list) {
2992 const typeof(int (*)(void)) *f;
2993 for (f = change->hooks[KS_REVERSE].check;
2994 f < change->hooks[KS_REVERSE].check_end; f++) {
2995 if ((*f)() != 0)
2996 return (__force int)CALL_FAILED;
3000 /* Commit point: the update reversal will succeed. */
3002 update->stage = STAGE_REVERSED;
3004 list_for_each_entry(change, &update->changes, list)
3005 module_put(change->new_code_mod);
3007 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3008 list_del(&entry->list);
3010 list_for_each_entry(change, &update->changes, list) {
3011 const typeof(void (*)(void)) *f;
3012 for (f = change->hooks[KS_REVERSE].intra;
3013 f < change->hooks[KS_REVERSE].intra_end; f++)
3014 (*f)();
3017 list_for_each_entry(change, &update->changes, list) {
3018 for (p = change->patches; p < change->patches_end; p++)
3019 remove_trampoline(p);
3022 return (__force int)OK;
3026 * Check whether any thread's instruction pointer or any address of
3027 * its stack is contained in one of the safety_records associated with
3028 * the update.
3030 * check_each_task must be called from inside stop_machine, because it
3031 * does not take tasklist_lock (which cannot be held by anyone else
3032 * during stop_machine).
3034 static abort_t check_each_task(struct update *update)
3036 const struct task_struct *g, *p;
3037 abort_t status = OK, ret;
3038 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3039 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3040 read_lock(&tasklist_lock);
3041 #endif /* LINUX_VERSION_CODE */
3042 do_each_thread(g, p) {
3043 /* do_each_thread is a double loop! */
3044 ret = check_task(update, p, false);
3045 if (ret != OK) {
3046 check_task(update, p, true);
3047 status = ret;
3049 if (ret != OK && ret != CODE_BUSY)
3050 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3051 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3052 goto out;
3053 #else /* LINUX_VERSION_CODE < */
3054 return ret;
3055 #endif /* LINUX_VERSION_CODE */
3056 } while_each_thread(g, p);
3057 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3058 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3059 out:
3060 read_unlock(&tasklist_lock);
3061 #endif /* LINUX_VERSION_CODE */
3062 return status;
3065 static abort_t check_task(struct update *update,
3066 const struct task_struct *t, bool rerun)
3068 abort_t status, ret;
3069 struct conflict *conf = NULL;
3071 if (rerun) {
3072 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3073 if (conf == NULL)
3074 return OUT_OF_MEMORY;
3075 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3076 if (conf->process_name == NULL) {
3077 kfree(conf);
3078 return OUT_OF_MEMORY;
3080 conf->pid = t->pid;
3081 INIT_LIST_HEAD(&conf->stack);
3082 list_add(&conf->list, &update->conflicts);
3085 status = check_address(update, conf, KSPLICE_IP(t));
3086 if (t == current) {
3087 ret = check_stack(update, conf, task_thread_info(t),
3088 (unsigned long *)__builtin_frame_address(0));
3089 if (status == OK)
3090 status = ret;
3091 } else if (!task_curr(t)) {
3092 ret = check_stack(update, conf, task_thread_info(t),
3093 (unsigned long *)KSPLICE_SP(t));
3094 if (status == OK)
3095 status = ret;
3096 } else if (!is_stop_machine(t)) {
3097 status = UNEXPECTED_RUNNING_TASK;
3099 return status;
3102 static abort_t check_stack(struct update *update, struct conflict *conf,
3103 const struct thread_info *tinfo,
3104 const unsigned long *stack)
3106 abort_t status = OK, ret;
3107 unsigned long addr;
3109 while (valid_stack_ptr(tinfo, stack)) {
3110 addr = *stack++;
3111 ret = check_address(update, conf, addr);
3112 if (ret != OK)
3113 status = ret;
3115 return status;
3118 static abort_t check_address(struct update *update,
3119 struct conflict *conf, unsigned long addr)
3121 abort_t status = OK, ret;
3122 const struct safety_record *rec;
3123 struct ksplice_mod_change *change;
3124 struct conflict_addr *ca = NULL;
3126 if (conf != NULL) {
3127 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3128 if (ca == NULL)
3129 return OUT_OF_MEMORY;
3130 ca->addr = addr;
3131 ca->has_conflict = false;
3132 ca->label = NULL;
3133 list_add(&ca->list, &conf->stack);
3136 list_for_each_entry(change, &update->changes, list) {
3137 unsigned long tramp_addr = follow_trampolines(change, addr);
3138 list_for_each_entry(rec, &change->safety_records, list) {
3139 ret = check_record(ca, rec, tramp_addr);
3140 if (ret != OK)
3141 status = ret;
3144 return status;
3147 static abort_t check_record(struct conflict_addr *ca,
3148 const struct safety_record *rec, unsigned long addr)
3150 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3151 if (ca != NULL) {
3152 ca->label = rec->label;
3153 ca->has_conflict = true;
3155 return CODE_BUSY;
3157 return OK;
3160 /* Is the task one of the stop_machine tasks? */
3161 static bool is_stop_machine(const struct task_struct *t)
3163 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3164 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3165 const char *kstop_prefix = "kstop/";
3166 #else /* LINUX_VERSION_CODE < */
3167 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3168 const char *kstop_prefix = "kstop";
3169 #endif /* LINUX_VERSION_CODE */
3170 const char *num;
3171 if (!strstarts(t->comm, kstop_prefix))
3172 return false;
3173 num = t->comm + strlen(kstop_prefix);
3174 return num[strspn(num, "0123456789")] == '\0';
3175 #else /* LINUX_VERSION_CODE < */
3176 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3177 return strcmp(t->comm, "kstopmachine") == 0;
3178 #endif /* LINUX_VERSION_CODE */
3181 static void cleanup_conflicts(struct update *update)
3183 struct conflict *conf;
3184 list_for_each_entry(conf, &update->conflicts, list) {
3185 clear_list(&conf->stack, struct conflict_addr, list);
3186 kfree(conf->process_name);
3188 clear_list(&update->conflicts, struct conflict, list);
3191 static void print_conflicts(struct update *update)
3193 const struct conflict *conf;
3194 const struct conflict_addr *ca;
3195 list_for_each_entry(conf, &update->conflicts, list) {
3196 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3197 conf->process_name);
3198 list_for_each_entry(ca, &conf->stack, list) {
3199 _ksdebug(update, " %lx", ca->addr);
3200 if (ca->has_conflict)
3201 _ksdebug(update, " [<-CONFLICT]");
3203 _ksdebug(update, "\n");
3207 static void insert_trampoline(struct ksplice_patch *p)
3209 mm_segment_t old_fs = get_fs();
3210 set_fs(KERNEL_DS);
3211 memcpy(p->saved, p->vaddr, p->size);
3212 memcpy(p->vaddr, p->contents, p->size);
3213 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3214 set_fs(old_fs);
3217 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3218 const struct ksplice_patch *p)
3220 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3221 ksdebug(change, "Aborted. Trampoline at %lx has been "
3222 "overwritten.\n", p->oldaddr);
3223 return CODE_BUSY;
3225 return OK;
3228 static void remove_trampoline(const struct ksplice_patch *p)
3230 mm_segment_t old_fs = get_fs();
3231 set_fs(KERNEL_DS);
3232 memcpy(p->vaddr, p->saved, p->size);
3233 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3234 set_fs(old_fs);
3237 /* Returns NO_MATCH if there's already a labelval with a different value */
3238 static abort_t create_labelval(struct ksplice_mod_change *change,
3239 struct ksplice_symbol *ksym,
3240 unsigned long val, int status)
3242 val = follow_trampolines(change, val);
3243 if (ksym->candidate_vals == NULL)
3244 return ksym->value == val ? OK : NO_MATCH;
3246 ksym->value = val;
3247 if (status == TEMP) {
3248 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3249 if (lv == NULL)
3250 return OUT_OF_MEMORY;
3251 lv->symbol = ksym;
3252 lv->saved_vals = ksym->candidate_vals;
3253 list_add(&lv->list, &change->temp_labelvals);
3255 ksym->candidate_vals = NULL;
3256 return OK;
3260 * Creates a new safety_record for a old_code section based on its
3261 * ksplice_section and run-pre matching information.
3263 static abort_t create_safety_record(struct ksplice_mod_change *change,
3264 const struct ksplice_section *sect,
3265 struct list_head *record_list,
3266 unsigned long run_addr,
3267 unsigned long run_size)
3269 struct safety_record *rec;
3270 struct ksplice_patch *p;
3272 if (record_list == NULL)
3273 return OK;
3275 for (p = change->patches; p < change->patches_end; p++) {
3276 const struct ksplice_reloc *r = patch_reloc(change, p);
3277 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3278 break;
3280 if (p >= change->patches_end)
3281 return OK;
3283 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3284 if (rec == NULL)
3285 return OUT_OF_MEMORY;
3287 * The old_code might be unloaded when checking reversing
3288 * patches, so we need to kstrdup the label here.
3290 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3291 if (rec->label == NULL) {
3292 kfree(rec);
3293 return OUT_OF_MEMORY;
3295 rec->addr = run_addr;
3296 rec->size = run_size;
3298 list_add(&rec->list, record_list);
3299 return OK;
3302 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3303 struct list_head *vals, unsigned long val)
3305 struct candidate_val *tmp, *new;
3308 * Careful: follow trampolines before comparing values so that we do
3309 * not mistake the obsolete function for another copy of the function.
3311 val = follow_trampolines(change, val);
3313 list_for_each_entry(tmp, vals, list) {
3314 if (tmp->val == val)
3315 return OK;
3317 new = kmalloc(sizeof(*new), GFP_KERNEL);
3318 if (new == NULL)
3319 return OUT_OF_MEMORY;
3320 new->val = val;
3321 list_add(&new->list, vals);
3322 return OK;
3325 static void release_vals(struct list_head *vals)
3327 clear_list(vals, struct candidate_val, list);
3331 * The temp_labelvals list is used to cache those temporary labelvals
3332 * that have been created to cross-check the symbol values obtained
3333 * from different relocations within a single section being matched.
3335 * If status is VAL, commit the temp_labelvals as final values.
3337 * If status is NOVAL, restore the list of possible values to the
3338 * ksplice_symbol, so that it no longer has a known value.
3340 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3342 struct labelval *lv, *n;
3343 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3344 if (status == NOVAL) {
3345 lv->symbol->candidate_vals = lv->saved_vals;
3346 } else {
3347 release_vals(lv->saved_vals);
3348 kfree(lv->saved_vals);
3350 list_del(&lv->list);
3351 kfree(lv);
3355 /* Is there a Ksplice canary with given howto at blank_addr? */
3356 static int contains_canary(struct ksplice_mod_change *change,
3357 unsigned long blank_addr,
3358 const struct ksplice_reloc_howto *howto)
3360 switch (howto->size) {
3361 case 1:
3362 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3363 (KSPLICE_CANARY & howto->dst_mask);
3364 case 2:
3365 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3366 (KSPLICE_CANARY & howto->dst_mask);
3367 case 4:
3368 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3369 (KSPLICE_CANARY & howto->dst_mask);
3370 #if BITS_PER_LONG >= 64
3371 case 8:
3372 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3373 (KSPLICE_CANARY & howto->dst_mask);
3374 #endif /* BITS_PER_LONG */
3375 default:
3376 ksdebug(change, "Aborted. Invalid relocation size.\n");
3377 return -1;
3382 * Compute the address of the code you would actually run if you were
3383 * to call the function at addr (i.e., follow the sequence of jumps
3384 * starting at addr)
3386 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3387 unsigned long addr)
3389 unsigned long new_addr;
3390 struct module *m;
3392 while (1) {
3393 #ifdef KSPLICE_STANDALONE
3394 if (!bootstrapped)
3395 return addr;
3396 #endif /* KSPLICE_STANDALONE */
3397 if (!__kernel_text_address(addr) ||
3398 trampoline_target(change, addr, &new_addr) != OK)
3399 return addr;
3400 m = __module_text_address(new_addr);
3401 if (m == NULL || m == change->target ||
3402 !strstarts(m->name, "ksplice"))
3403 return addr;
3404 addr = new_addr;
3408 /* Does module a patch module b? */
3409 static bool patches_module(const struct module *a, const struct module *b)
3411 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3412 const char *name;
3413 const char *modname = b == NULL ? "vmlinux" : b->name;
3414 if (a == b)
3415 return true;
3416 if (a == NULL || !strstarts(a->name, "ksplice_"))
3417 return false;
3418 name = a->name + strlen("ksplice_");
3419 name += strcspn(name, "_");
3420 if (name[0] != '_')
3421 return false;
3422 name++;
3423 return strstarts(name, modname) &&
3424 strcmp(name + strlen(modname), "_new") == 0;
3425 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3426 struct ksplice_module_list_entry *entry;
3427 if (a == b)
3428 return true;
3429 list_for_each_entry(entry, &ksplice_modules, list) {
3430 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3431 strcmp(entry->new_code_mod_name, a->name) == 0)
3432 return true;
3434 return false;
3435 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3438 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3439 static bool strstarts(const char *str, const char *prefix)
3441 return strncmp(str, prefix, strlen(prefix)) == 0;
3443 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3445 static bool singular(struct list_head *list)
3447 return !list_empty(list) && list->next->next == list;
3450 static void *bsearch(const void *key, const void *base, size_t n,
3451 size_t size, int (*cmp)(const void *key, const void *elt))
3453 int start = 0, end = n - 1, mid, result;
3454 if (n == 0)
3455 return NULL;
3456 while (start <= end) {
3457 mid = (start + end) / 2;
3458 result = cmp(key, base + mid * size);
3459 if (result < 0)
3460 end = mid - 1;
3461 else if (result > 0)
3462 start = mid + 1;
3463 else
3464 return (void *)base + mid * size;
3466 return NULL;
3469 static int compare_relocs(const void *a, const void *b)
3471 const struct ksplice_reloc *ra = a, *rb = b;
3472 if (ra->blank_addr > rb->blank_addr)
3473 return 1;
3474 else if (ra->blank_addr < rb->blank_addr)
3475 return -1;
3476 else
3477 return ra->howto->size - rb->howto->size;
3480 #ifdef KSPLICE_STANDALONE
3481 static int compare_system_map(const void *a, const void *b)
3483 const struct ksplice_system_map *sa = a, *sb = b;
3484 return strcmp(sa->label, sb->label);
3486 #endif /* KSPLICE_STANDALONE */
3488 #ifdef CONFIG_DEBUG_FS
3489 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3490 /* Old kernels don't have debugfs_create_blob */
3491 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3492 size_t count, loff_t *ppos)
3494 struct debugfs_blob_wrapper *blob = file->private_data;
3495 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3496 blob->size);
3499 static int blob_open(struct inode *inode, struct file *file)
3501 if (inode->i_private)
3502 file->private_data = inode->i_private;
3503 return 0;
3506 static struct file_operations fops_blob = {
3507 .read = read_file_blob,
3508 .open = blob_open,
3511 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3512 struct dentry *parent,
3513 struct debugfs_blob_wrapper *blob)
3515 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3517 #endif /* LINUX_VERSION_CODE */
3519 static abort_t init_debug_buf(struct update *update)
3521 update->debug_blob.size = 0;
3522 update->debug_blob.data = NULL;
3523 update->debugfs_dentry =
3524 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3525 &update->debug_blob);
3526 if (update->debugfs_dentry == NULL)
3527 return OUT_OF_MEMORY;
3528 return OK;
3531 static void clear_debug_buf(struct update *update)
3533 if (update->debugfs_dentry == NULL)
3534 return;
3535 debugfs_remove(update->debugfs_dentry);
3536 update->debugfs_dentry = NULL;
3537 update->debug_blob.size = 0;
3538 vfree(update->debug_blob.data);
3539 update->debug_blob.data = NULL;
3542 static int _ksdebug(struct update *update, const char *fmt, ...)
3544 va_list args;
3545 unsigned long size, old_size, new_size;
3547 if (update->debug == 0)
3548 return 0;
3550 /* size includes the trailing '\0' */
3551 va_start(args, fmt);
3552 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3553 va_end(args);
3554 old_size = update->debug_blob.size == 0 ? 0 :
3555 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3556 new_size = update->debug_blob.size + size == 0 ? 0 :
3557 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3558 if (new_size > old_size) {
3559 char *buf = vmalloc(new_size);
3560 if (buf == NULL)
3561 return -ENOMEM;
3562 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3563 vfree(update->debug_blob.data);
3564 update->debug_blob.data = buf;
3566 va_start(args, fmt);
3567 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3568 update->debug_blob.size,
3569 size, fmt, args);
3570 va_end(args);
3571 return 0;
3573 #else /* CONFIG_DEBUG_FS */
3574 static abort_t init_debug_buf(struct update *update)
3576 return OK;
3579 static void clear_debug_buf(struct update *update)
3581 return;
3584 static int _ksdebug(struct update *update, const char *fmt, ...)
3586 va_list args;
3588 if (update->debug == 0)
3589 return 0;
3591 if (!update->debug_continue_line)
3592 printk(KERN_DEBUG "ksplice: ");
3594 va_start(args, fmt);
3595 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3596 vprintk(fmt, args);
3597 #else /* LINUX_VERSION_CODE < */
3598 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3600 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3601 printk("%s", buf);
3602 kfree(buf);
3604 #endif /* LINUX_VERSION_CODE */
3605 va_end(args);
3607 update->debug_continue_line =
3608 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3609 return 0;
3611 #endif /* CONFIG_DEBUG_FS */
3613 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3614 #ifdef CONFIG_KALLSYMS
3615 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3616 struct module *, unsigned long),
3617 void *data)
3619 char namebuf[KSYM_NAME_LEN];
3620 unsigned long i;
3621 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3622 unsigned int off;
3623 #endif /* LINUX_VERSION_CODE */
3624 int ret;
3626 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3627 * 2.6.10 was the first release after this commit
3629 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3630 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3631 off = kallsyms_expand_symbol(off, namebuf);
3632 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3633 if (ret != 0)
3634 return ret;
3636 #else /* LINUX_VERSION_CODE < */
3637 char *knames;
3639 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3640 unsigned prefix = *knames++;
3642 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3644 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3645 if (ret != OK)
3646 return ret;
3648 knames += strlen(knames) + 1;
3650 #endif /* LINUX_VERSION_CODE */
3651 return module_kallsyms_on_each_symbol(fn, data);
3654 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3655 * 2.6.10 was the first release after this commit
3657 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3658 extern u8 kallsyms_token_table[];
3659 extern u16 kallsyms_token_index[];
3661 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3663 long len, skipped_first = 0;
3664 const u8 *tptr, *data;
3666 data = &kallsyms_names[off];
3667 len = *data;
3668 data++;
3670 off += len + 1;
3672 while (len) {
3673 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3674 data++;
3675 len--;
3677 while (*tptr) {
3678 if (skipped_first) {
3679 *result = *tptr;
3680 result++;
3681 } else
3682 skipped_first = 1;
3683 tptr++;
3687 *result = '\0';
3689 return off;
3691 #endif /* LINUX_VERSION_CODE */
3693 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3694 struct module *,
3695 unsigned long),
3696 void *data)
3698 struct module *mod;
3699 unsigned int i;
3700 int ret;
3702 list_for_each_entry(mod, &modules, list) {
3703 for (i = 0; i < mod->num_symtab; i++) {
3704 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3705 mod, mod->symtab[i].st_value);
3706 if (ret != 0)
3707 return ret;
3710 return 0;
3712 #endif /* CONFIG_KALLSYMS */
3714 static struct module *find_module(const char *name)
3716 struct module *mod;
3718 list_for_each_entry(mod, &modules, list) {
3719 if (strcmp(mod->name, name) == 0)
3720 return mod;
3722 return NULL;
3725 #ifdef CONFIG_MODULE_UNLOAD
3726 struct module_use {
3727 struct list_head list;
3728 struct module *module_which_uses;
3731 /* I'm not yet certain whether we need the strong form of this. */
3732 static inline int strong_try_module_get(struct module *mod)
3734 if (mod && mod->state != MODULE_STATE_LIVE)
3735 return -EBUSY;
3736 if (try_module_get(mod))
3737 return 0;
3738 return -ENOENT;
3741 /* Does a already use b? */
3742 static int already_uses(struct module *a, struct module *b)
3744 struct module_use *use;
3745 list_for_each_entry(use, &b->modules_which_use_me, list) {
3746 if (use->module_which_uses == a)
3747 return 1;
3749 return 0;
3752 /* Make it so module a uses b. Must be holding module_mutex */
3753 static int use_module(struct module *a, struct module *b)
3755 struct module_use *use;
3756 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3757 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3758 int no_warn;
3759 #endif /* LINUX_VERSION_CODE */
3760 if (b == NULL || already_uses(a, b))
3761 return 1;
3763 if (strong_try_module_get(b) < 0)
3764 return 0;
3766 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3767 if (!use) {
3768 module_put(b);
3769 return 0;
3771 use->module_which_uses = a;
3772 list_add(&use->list, &b->modules_which_use_me);
3773 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3774 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3775 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3776 #endif /* LINUX_VERSION_CODE */
3777 return 1;
3779 #else /* CONFIG_MODULE_UNLOAD */
3780 static int use_module(struct module *a, struct module *b)
3782 return 1;
3784 #endif /* CONFIG_MODULE_UNLOAD */
3786 #ifndef CONFIG_MODVERSIONS
3787 #define symversion(base, idx) NULL
3788 #else
3789 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3790 #endif
3792 static bool each_symbol_in_section(const struct symsearch *arr,
3793 unsigned int arrsize,
3794 struct module *owner,
3795 bool (*fn)(const struct symsearch *syms,
3796 struct module *owner,
3797 unsigned int symnum, void *data),
3798 void *data)
3800 unsigned int i, j;
3802 for (j = 0; j < arrsize; j++) {
3803 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3804 if (fn(&arr[j], owner, i, data))
3805 return true;
3808 return false;
3811 /* Returns true as soon as fn returns true, otherwise false. */
3812 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3813 struct module *owner,
3814 unsigned int symnum, void *data),
3815 void *data)
3817 struct module *mod;
3818 const struct symsearch arr[] = {
3819 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3820 NOT_GPL_ONLY, false },
3821 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3822 __start___kcrctab_gpl,
3823 GPL_ONLY, false },
3824 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3825 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3826 __start___kcrctab_gpl_future,
3827 WILL_BE_GPL_ONLY, false },
3828 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3829 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3830 { __start___ksymtab_unused, __stop___ksymtab_unused,
3831 __start___kcrctab_unused,
3832 NOT_GPL_ONLY, true },
3833 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3834 __start___kcrctab_unused_gpl,
3835 GPL_ONLY, true },
3836 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3839 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3840 return 1;
3842 list_for_each_entry(mod, &modules, list) {
3843 struct symsearch module_arr[] = {
3844 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3845 NOT_GPL_ONLY, false },
3846 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3847 mod->gpl_crcs,
3848 GPL_ONLY, false },
3849 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3850 { mod->gpl_future_syms,
3851 mod->gpl_future_syms + mod->num_gpl_future_syms,
3852 mod->gpl_future_crcs,
3853 WILL_BE_GPL_ONLY, false },
3854 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3855 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3856 { mod->unused_syms,
3857 mod->unused_syms + mod->num_unused_syms,
3858 mod->unused_crcs,
3859 NOT_GPL_ONLY, true },
3860 { mod->unused_gpl_syms,
3861 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3862 mod->unused_gpl_crcs,
3863 GPL_ONLY, true },
3864 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3867 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3868 mod, fn, data))
3869 return true;
3871 return false;
3874 struct find_symbol_arg {
3875 /* Input */
3876 const char *name;
3877 bool gplok;
3878 bool warn;
3880 /* Output */
3881 struct module *owner;
3882 const unsigned long *crc;
3883 const struct kernel_symbol *sym;
3886 static bool find_symbol_in_section(const struct symsearch *syms,
3887 struct module *owner,
3888 unsigned int symnum, void *data)
3890 struct find_symbol_arg *fsa = data;
3892 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3893 return false;
3895 if (!fsa->gplok) {
3896 if (syms->licence == GPL_ONLY)
3897 return false;
3898 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3899 printk(KERN_WARNING "Symbol %s is being used "
3900 "by a non-GPL module, which will not "
3901 "be allowed in the future\n", fsa->name);
3902 printk(KERN_WARNING "Please see the file "
3903 "Documentation/feature-removal-schedule.txt "
3904 "in the kernel source tree for more details.\n");
3908 #ifdef CONFIG_UNUSED_SYMBOLS
3909 if (syms->unused && fsa->warn) {
3910 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3911 "however this module is using it.\n", fsa->name);
3912 printk(KERN_WARNING
3913 "This symbol will go away in the future.\n");
3914 printk(KERN_WARNING
3915 "Please evalute if this is the right api to use and if "
3916 "it really is, submit a report the linux kernel "
3917 "mailinglist together with submitting your code for "
3918 "inclusion.\n");
3920 #endif
3922 fsa->owner = owner;
3923 fsa->crc = symversion(syms->crcs, symnum);
3924 fsa->sym = &syms->start[symnum];
3925 return true;
3928 /* Find a symbol and return it, along with, (optional) crc and
3929 * (optional) module which owns it */
3930 static const struct kernel_symbol *find_symbol(const char *name,
3931 struct module **owner,
3932 const unsigned long **crc,
3933 bool gplok, bool warn)
3935 struct find_symbol_arg fsa;
3937 fsa.name = name;
3938 fsa.gplok = gplok;
3939 fsa.warn = warn;
3941 if (each_symbol(find_symbol_in_section, &fsa)) {
3942 if (owner)
3943 *owner = fsa.owner;
3944 if (crc)
3945 *crc = fsa.crc;
3946 return fsa.sym;
3949 return NULL;
3952 static inline int within_module_core(unsigned long addr, struct module *mod)
3954 return (unsigned long)mod->module_core <= addr &&
3955 addr < (unsigned long)mod->module_core + mod->core_size;
3958 static inline int within_module_init(unsigned long addr, struct module *mod)
3960 return (unsigned long)mod->module_init <= addr &&
3961 addr < (unsigned long)mod->module_init + mod->init_size;
3964 static struct module *__module_address(unsigned long addr)
3966 struct module *mod;
3968 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3969 list_for_each_entry_rcu(mod, &modules, list)
3970 #else
3971 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3972 list_for_each_entry(mod, &modules, list)
3973 #endif
3974 if (within_module_core(addr, mod) ||
3975 within_module_init(addr, mod))
3976 return mod;
3977 return NULL;
3979 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3981 struct update_attribute {
3982 struct attribute attr;
3983 ssize_t (*show)(struct update *update, char *buf);
3984 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3987 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
3988 char *buf)
3990 struct update_attribute *attribute =
3991 container_of(attr, struct update_attribute, attr);
3992 struct update *update = container_of(kobj, struct update, kobj);
3993 if (attribute->show == NULL)
3994 return -EIO;
3995 return attribute->show(update, buf);
3998 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
3999 const char *buf, size_t len)
4001 struct update_attribute *attribute =
4002 container_of(attr, struct update_attribute, attr);
4003 struct update *update = container_of(kobj, struct update, kobj);
4004 if (attribute->store == NULL)
4005 return -EIO;
4006 return attribute->store(update, buf, len);
4009 static struct sysfs_ops update_sysfs_ops = {
4010 .show = update_attr_show,
4011 .store = update_attr_store,
4014 static void update_release(struct kobject *kobj)
4016 struct update *update;
4017 update = container_of(kobj, struct update, kobj);
4018 cleanup_ksplice_update(update);
4021 static ssize_t stage_show(struct update *update, char *buf)
4023 switch (update->stage) {
4024 case STAGE_PREPARING:
4025 return snprintf(buf, PAGE_SIZE, "preparing\n");
4026 case STAGE_APPLIED:
4027 return snprintf(buf, PAGE_SIZE, "applied\n");
4028 case STAGE_REVERSED:
4029 return snprintf(buf, PAGE_SIZE, "reversed\n");
4031 return 0;
4034 static ssize_t abort_cause_show(struct update *update, char *buf)
4036 switch (update->abort_cause) {
4037 case OK:
4038 return snprintf(buf, PAGE_SIZE, "ok\n");
4039 case NO_MATCH:
4040 return snprintf(buf, PAGE_SIZE, "no_match\n");
4041 #ifdef KSPLICE_STANDALONE
4042 case BAD_SYSTEM_MAP:
4043 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4044 #endif /* KSPLICE_STANDALONE */
4045 case CODE_BUSY:
4046 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4047 case MODULE_BUSY:
4048 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4049 case OUT_OF_MEMORY:
4050 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4051 case FAILED_TO_FIND:
4052 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4053 case ALREADY_REVERSED:
4054 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4055 case MISSING_EXPORT:
4056 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4057 case UNEXPECTED_RUNNING_TASK:
4058 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4059 case TARGET_NOT_LOADED:
4060 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4061 case CALL_FAILED:
4062 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4063 case COLD_UPDATE_LOADED:
4064 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4065 case UNEXPECTED:
4066 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4067 default:
4068 return snprintf(buf, PAGE_SIZE, "unknown\n");
4070 return 0;
4073 static ssize_t conflict_show(struct update *update, char *buf)
4075 const struct conflict *conf;
4076 const struct conflict_addr *ca;
4077 int used = 0;
4078 mutex_lock(&module_mutex);
4079 list_for_each_entry(conf, &update->conflicts, list) {
4080 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4081 conf->process_name, conf->pid);
4082 list_for_each_entry(ca, &conf->stack, list) {
4083 if (!ca->has_conflict)
4084 continue;
4085 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4086 ca->label);
4088 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4090 mutex_unlock(&module_mutex);
4091 return used;
4094 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4095 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4097 struct update *update = updateptr;
4098 mutex_lock(&module_mutex);
4099 maybe_cleanup_ksplice_update(update);
4100 mutex_unlock(&module_mutex);
4101 return 0;
4104 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4106 enum stage old_stage;
4107 mutex_lock(&module_mutex);
4108 old_stage = update->stage;
4109 if ((strncmp(buf, "applied", len) == 0 ||
4110 strncmp(buf, "applied\n", len) == 0) &&
4111 update->stage == STAGE_PREPARING)
4112 update->abort_cause = apply_update(update);
4113 else if ((strncmp(buf, "reversed", len) == 0 ||
4114 strncmp(buf, "reversed\n", len) == 0) &&
4115 update->stage == STAGE_APPLIED)
4116 update->abort_cause = reverse_update(update);
4117 else if ((strncmp(buf, "cleanup", len) == 0 ||
4118 strncmp(buf, "cleanup\n", len) == 0) &&
4119 update->stage == STAGE_REVERSED)
4120 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4121 "ksplice_cleanup_%s", update->kid);
4123 mutex_unlock(&module_mutex);
4124 return len;
4127 static ssize_t debug_show(struct update *update, char *buf)
4129 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4132 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4134 unsigned long l;
4135 int ret = strict_strtoul(buf, 10, &l);
4136 if (ret != 0)
4137 return ret;
4138 update->debug = l;
4139 return len;
4142 static ssize_t partial_show(struct update *update, char *buf)
4144 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4147 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4149 unsigned long l;
4150 int ret = strict_strtoul(buf, 10, &l);
4151 if (ret != 0)
4152 return ret;
4153 update->partial = l;
4154 return len;
4157 static struct update_attribute stage_attribute =
4158 __ATTR(stage, 0600, stage_show, stage_store);
4159 static struct update_attribute abort_cause_attribute =
4160 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4161 static struct update_attribute debug_attribute =
4162 __ATTR(debug, 0600, debug_show, debug_store);
4163 static struct update_attribute partial_attribute =
4164 __ATTR(partial, 0600, partial_show, partial_store);
4165 static struct update_attribute conflict_attribute =
4166 __ATTR(conflicts, 0400, conflict_show, NULL);
4168 static struct attribute *update_attrs[] = {
4169 &stage_attribute.attr,
4170 &abort_cause_attribute.attr,
4171 &debug_attribute.attr,
4172 &partial_attribute.attr,
4173 &conflict_attribute.attr,
4174 NULL
4177 static struct kobj_type update_ktype = {
4178 .sysfs_ops = &update_sysfs_ops,
4179 .release = update_release,
4180 .default_attrs = update_attrs,
4183 #ifdef KSPLICE_STANDALONE
4184 static int debug;
4185 module_param(debug, int, 0600);
4186 MODULE_PARM_DESC(debug, "Debug level");
4188 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4190 static struct ksplice_mod_change bootstrap_mod_change = {
4191 .name = "ksplice_" __stringify(KSPLICE_KID),
4192 .kid = "init_" __stringify(KSPLICE_KID),
4193 .target_name = NULL,
4194 .target = NULL,
4195 .map_printk = MAP_PRINTK,
4196 .new_code_mod = THIS_MODULE,
4197 .new_code.system_map = ksplice_system_map,
4198 .new_code.system_map_end = ksplice_system_map_end,
4200 #endif /* KSPLICE_STANDALONE */
4202 static int init_ksplice(void)
4204 #ifdef KSPLICE_STANDALONE
4205 struct ksplice_mod_change *change = &bootstrap_mod_change;
4206 change->update = init_ksplice_update(change->kid);
4207 sort(change->new_code.system_map,
4208 change->new_code.system_map_end - change->new_code.system_map,
4209 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4210 if (change->update == NULL)
4211 return -ENOMEM;
4212 add_to_update(change, change->update);
4213 change->update->debug = debug;
4214 change->update->abort_cause =
4215 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4216 if (change->update->abort_cause == OK)
4217 bootstrapped = true;
4218 cleanup_ksplice_update(bootstrap_mod_change.update);
4219 #else /* !KSPLICE_STANDALONE */
4220 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4221 if (ksplice_kobj == NULL)
4222 return -ENOMEM;
4223 #endif /* KSPLICE_STANDALONE */
4224 return 0;
4227 static void cleanup_ksplice(void)
4229 #ifndef KSPLICE_STANDALONE
4230 kobject_put(ksplice_kobj);
4231 #endif /* KSPLICE_STANDALONE */
4234 module_init(init_ksplice);
4235 module_exit(cleanup_ksplice);
4237 MODULE_AUTHOR("Ksplice, Inc.");
4238 MODULE_DESCRIPTION("Ksplice rebootless update system");
4239 #ifdef KSPLICE_VERSION
4240 MODULE_VERSION(KSPLICE_VERSION);
4241 #endif
4242 MODULE_LICENSE("GPL v2");