Remove unnecessary clear_list(&update->conflicts) from reverse_patches().
[ksplice.git] / kmodsrc / ksplice.c
blobf32dc8acee3fb19fbc507e951a42c52f518f6d02
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 enum stage {
67 STAGE_PREPARING, /* the update is not yet applied */
68 STAGE_APPLIED, /* the update is applied */
69 STAGE_REVERSED, /* the update has been applied and reversed */
72 /* parameter to modify run-pre matching */
73 enum run_pre_mode {
74 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
75 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
76 RUN_PRE_FINAL, /* finalizes the matching */
77 #ifdef KSPLICE_STANDALONE
78 RUN_PRE_SILENT,
79 #endif /* KSPLICE_STANDALONE */
82 enum { NOVAL, TEMP, VAL };
84 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
85 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #define __bitwise__
87 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
88 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
89 #define __bitwise__ __bitwise
90 #endif
92 typedef int __bitwise__ abort_t;
94 #define OK ((__force abort_t) 0)
95 #define NO_MATCH ((__force abort_t) 1)
96 #define CODE_BUSY ((__force abort_t) 2)
97 #define MODULE_BUSY ((__force abort_t) 3)
98 #define OUT_OF_MEMORY ((__force abort_t) 4)
99 #define FAILED_TO_FIND ((__force abort_t) 5)
100 #define ALREADY_REVERSED ((__force abort_t) 6)
101 #define MISSING_EXPORT ((__force abort_t) 7)
102 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
103 #define UNEXPECTED ((__force abort_t) 9)
104 #define TARGET_NOT_LOADED ((__force abort_t) 10)
105 #define CALL_FAILED ((__force abort_t) 11)
106 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
107 #ifdef KSPLICE_STANDALONE
108 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
109 #endif /* KSPLICE_STANDALONE */
111 struct update {
112 const char *kid;
113 const char *name;
114 struct kobject kobj;
115 enum stage stage;
116 abort_t abort_cause;
117 int debug;
118 #ifdef CONFIG_DEBUG_FS
119 struct debugfs_blob_wrapper debug_blob;
120 struct dentry *debugfs_dentry;
121 #else /* !CONFIG_DEBUG_FS */
122 bool debug_continue_line;
123 #endif /* CONFIG_DEBUG_FS */
124 bool partial; /* is it OK if some target mods aren't loaded */
125 struct list_head changes, /* changes for loaded target mods */
126 unused_changes; /* changes for non-loaded target mods */
127 struct list_head conflicts;
128 struct list_head list;
129 struct list_head ksplice_module_list;
132 /* a process conflicting with an update */
133 struct conflict {
134 const char *process_name;
135 pid_t pid;
136 struct list_head stack;
137 struct list_head list;
140 /* an address on the stack of a conflict */
141 struct conflict_addr {
142 unsigned long addr; /* the address on the stack */
143 bool has_conflict; /* does this address in particular conflict? */
144 const char *label; /* the label of the conflicting safety_record */
145 struct list_head list;
148 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
149 /* Old kernels don't have debugfs_create_blob */
150 struct debugfs_blob_wrapper {
151 void *data;
152 unsigned long size;
154 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
157 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
158 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
159 #endif
161 struct labelval {
162 struct list_head list;
163 struct ksplice_symbol *symbol;
164 struct list_head *saved_vals;
167 /* region to be checked for conflicts in the stack check */
168 struct safety_record {
169 struct list_head list;
170 const char *label;
171 unsigned long addr; /* the address to be checked for conflicts
172 * (e.g. an obsolete function's starting addr)
174 unsigned long size; /* the size of the region to be checked */
177 /* possible value for a symbol */
178 struct candidate_val {
179 struct list_head list;
180 unsigned long val;
183 /* private struct used by init_symbol_array */
184 struct ksplice_lookup {
185 /* input */
186 struct ksplice_mod_change *change;
187 struct ksplice_symbol **arr;
188 size_t size;
189 /* output */
190 abort_t ret;
193 #ifdef KSPLICE_NO_KERNEL_SUPPORT
194 struct symsearch {
195 const struct kernel_symbol *start, *stop;
196 const unsigned long *crcs;
197 enum {
198 NOT_GPL_ONLY,
199 GPL_ONLY,
200 WILL_BE_GPL_ONLY,
201 } licence;
202 bool unused;
204 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
207 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
210 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
211 static bool virtual_address_mapped(unsigned long addr)
213 char retval;
214 return probe_kernel_address(addr, retval) != -EFAULT;
216 #else /* LINUX_VERSION_CODE < */
217 static bool virtual_address_mapped(unsigned long addr);
218 #endif /* LINUX_VERSION_CODE */
220 static long probe_kernel_read(void *dst, void *src, size_t size)
222 if (size == 0)
223 return 0;
224 if (!virtual_address_mapped((unsigned long)src) ||
225 !virtual_address_mapped((unsigned long)src + size - 1))
226 return -EFAULT;
228 memcpy(dst, src, size);
229 return 0;
231 #endif /* LINUX_VERSION_CODE */
233 static LIST_HEAD(updates);
234 #ifdef KSPLICE_STANDALONE
235 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
236 extern struct list_head ksplice_modules;
237 #else /* !CONFIG_KSPLICE */
238 LIST_HEAD(ksplice_modules);
239 #endif /* CONFIG_KSPLICE */
240 #else /* !KSPLICE_STANDALONE */
241 LIST_HEAD(ksplice_modules);
242 EXPORT_SYMBOL_GPL(ksplice_modules);
243 static struct kobject *ksplice_kobj;
244 #endif /* KSPLICE_STANDALONE */
246 static struct kobj_type update_ktype;
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
249 /* Old kernels do not have kcalloc
250 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
252 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
254 char *mem;
255 if (n != 0 && size > ULONG_MAX / n)
256 return NULL;
257 mem = kmalloc(n * size, flags);
258 if (mem)
259 memset(mem, 0, n * size);
260 return mem;
262 #endif /* LINUX_VERSION_CODE */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
265 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
266 static void u32_swap(void *a, void *b, int size)
268 u32 t = *(u32 *)a;
269 *(u32 *)a = *(u32 *)b;
270 *(u32 *)b = t;
273 static void generic_swap(void *a, void *b, int size)
275 char t;
277 do {
278 t = *(char *)a;
279 *(char *)a++ = *(char *)b;
280 *(char *)b++ = t;
281 } while (--size > 0);
285 * sort - sort an array of elements
286 * @base: pointer to data to sort
287 * @num: number of elements
288 * @size: size of each element
289 * @cmp: pointer to comparison function
290 * @swap: pointer to swap function or NULL
292 * This function does a heapsort on the given array. You may provide a
293 * swap function optimized to your element type.
295 * Sorting time is O(n log n) both on average and worst-case. While
296 * qsort is about 20% faster on average, it suffers from exploitable
297 * O(n*n) worst-case behavior and extra memory requirements that make
298 * it less suitable for kernel use.
301 void sort(void *base, size_t num, size_t size,
302 int (*cmp)(const void *, const void *),
303 void (*swap)(void *, void *, int size))
305 /* pre-scale counters for performance */
306 int i = (num / 2 - 1) * size, n = num * size, c, r;
308 if (!swap)
309 swap = (size == 4 ? u32_swap : generic_swap);
311 /* heapify */
312 for (; i >= 0; i -= size) {
313 for (r = i; r * 2 + size < n; r = c) {
314 c = r * 2 + size;
315 if (c < n - size && cmp(base + c, base + c + size) < 0)
316 c += size;
317 if (cmp(base + r, base + c) >= 0)
318 break;
319 swap(base + r, base + c, size);
323 /* sort */
324 for (i = n - size; i > 0; i -= size) {
325 swap(base, base + i, size);
326 for (r = 0; r * 2 + size < i; r = c) {
327 c = r * 2 + size;
328 if (c < i - size && cmp(base + c, base + c + size) < 0)
329 c += size;
330 if (cmp(base + r, base + c) >= 0)
331 break;
332 swap(base + r, base + c, size);
336 #endif /* LINUX_VERSION_CODE < */
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
339 /* Old kernels do not have kstrdup
340 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
342 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
344 size_t len;
345 char *buf;
347 if (!s)
348 return NULL;
350 len = strlen(s) + 1;
351 buf = kmalloc(len, gfp);
352 if (buf)
353 memcpy(buf, s, len);
354 return buf;
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
359 /* Old kernels use semaphore instead of mutex
360 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
362 #define mutex semaphore
363 #define mutex_lock down
364 #define mutex_unlock up
365 #endif /* LINUX_VERSION_CODE */
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
368 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
369 static char * __attribute_used__
370 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
372 unsigned int len;
373 char *p, dummy[1];
374 va_list aq;
376 va_copy(aq, ap);
377 len = vsnprintf(dummy, 0, fmt, aq);
378 va_end(aq);
380 p = kmalloc(len + 1, gfp);
381 if (!p)
382 return NULL;
384 vsnprintf(p, len + 1, fmt, ap);
386 return p;
388 #endif
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
391 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
392 static char * __attribute__((format (printf, 2, 3)))
393 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
395 va_list ap;
396 char *p;
398 va_start(ap, fmt);
399 p = kvasprintf(gfp, fmt, ap);
400 va_end(ap);
402 return p;
404 #endif
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
407 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
408 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
410 char *tail;
411 unsigned long val;
412 size_t len;
414 *res = 0;
415 len = strlen(cp);
416 if (len == 0)
417 return -EINVAL;
419 val = simple_strtoul(cp, &tail, base);
420 if ((*tail == '\0') ||
421 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
422 *res = val;
423 return 0;
426 return -EINVAL;
428 #endif
430 #ifndef task_thread_info
431 #define task_thread_info(task) (task)->thread_info
432 #endif /* !task_thread_info */
434 #ifdef KSPLICE_STANDALONE
436 static bool bootstrapped = false;
438 #ifdef CONFIG_KALLSYMS
439 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
440 extern u8 kallsyms_names[];
441 #endif /* CONFIG_KALLSYMS */
443 /* defined by ksplice-create */
444 extern const struct ksplice_reloc ksplice_init_relocs[],
445 ksplice_init_relocs_end[];
447 /* Obtained via System.map */
448 extern struct list_head modules;
449 extern struct mutex module_mutex;
450 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
451 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
452 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
453 #endif /* LINUX_VERSION_CODE */
454 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
455 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
456 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
457 #endif /* LINUX_VERSION_CODE */
458 extern const struct kernel_symbol __start___ksymtab[];
459 extern const struct kernel_symbol __stop___ksymtab[];
460 extern const unsigned long __start___kcrctab[];
461 extern const struct kernel_symbol __start___ksymtab_gpl[];
462 extern const struct kernel_symbol __stop___ksymtab_gpl[];
463 extern const unsigned long __start___kcrctab_gpl[];
464 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
465 extern const struct kernel_symbol __start___ksymtab_unused[];
466 extern const struct kernel_symbol __stop___ksymtab_unused[];
467 extern const unsigned long __start___kcrctab_unused[];
468 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
469 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
470 extern const unsigned long __start___kcrctab_unused_gpl[];
471 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
472 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
473 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
474 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
475 extern const unsigned long __start___kcrctab_gpl_future[];
476 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
478 #endif /* KSPLICE_STANDALONE */
480 static struct update *init_ksplice_update(const char *kid);
481 static void cleanup_ksplice_update(struct update *update);
482 static void maybe_cleanup_ksplice_update(struct update *update);
483 static void add_to_update(struct ksplice_mod_change *change,
484 struct update *update);
485 static int ksplice_sysfs_init(struct update *update);
487 /* Preparing the relocations and patches for application */
488 static abort_t apply_update(struct update *update);
489 static abort_t prepare_change(struct ksplice_mod_change *change);
490 static abort_t finalize_change(struct ksplice_mod_change *change);
491 static abort_t finalize_patches(struct ksplice_mod_change *change);
492 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
493 unsigned long addr);
494 static abort_t map_trampoline_pages(struct update *update);
495 static void unmap_trampoline_pages(struct update *update);
496 static void *map_writable(void *addr, size_t len);
497 static abort_t apply_relocs(struct ksplice_mod_change *change,
498 const struct ksplice_reloc *relocs,
499 const struct ksplice_reloc *relocs_end);
500 static abort_t apply_reloc(struct ksplice_mod_change *change,
501 const struct ksplice_reloc *r);
502 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
503 const struct ksplice_reloc *r);
504 static abort_t apply_howto_date(struct ksplice_mod_change *change,
505 const struct ksplice_reloc *r);
506 static abort_t read_reloc_value(struct ksplice_mod_change *change,
507 const struct ksplice_reloc *r,
508 unsigned long addr, unsigned long *valp);
509 static abort_t write_reloc_value(struct ksplice_mod_change *change,
510 const struct ksplice_reloc *r,
511 unsigned long addr, unsigned long sym_addr);
512 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
513 bool to_be_applied);
514 static void cleanup_module_list_entries(struct update *update);
515 static void __attribute__((noreturn)) ksplice_deleted(void);
517 /* run-pre matching */
518 static abort_t match_change_sections(struct ksplice_mod_change *change,
519 bool consider_data_sections);
520 static abort_t find_section(struct ksplice_mod_change *change,
521 struct ksplice_section *sect);
522 static abort_t try_addr(struct ksplice_mod_change *change,
523 struct ksplice_section *sect,
524 unsigned long run_addr,
525 struct list_head *safety_records,
526 enum run_pre_mode mode);
527 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
528 const struct ksplice_section *sect,
529 unsigned long run_addr,
530 struct list_head *safety_records,
531 enum run_pre_mode mode);
532 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
533 /* defined in arch/ARCH/kernel/ksplice-arch.c */
534 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
535 struct ksplice_section *sect,
536 unsigned long run_addr,
537 struct list_head *safety_records,
538 enum run_pre_mode mode);
539 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
540 static void print_bytes(struct ksplice_mod_change *change,
541 const unsigned char *run, int runc,
542 const unsigned char *pre, int prec);
543 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
544 static abort_t brute_search(struct ksplice_mod_change *change,
545 struct ksplice_section *sect,
546 const void *start, unsigned long len,
547 struct list_head *vals);
548 static abort_t brute_search_all(struct ksplice_mod_change *change,
549 struct ksplice_section *sect,
550 struct list_head *vals);
551 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
552 static const struct ksplice_reloc *
553 init_reloc_search(struct ksplice_mod_change *change,
554 const struct ksplice_section *sect);
555 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
556 const struct ksplice_reloc *end,
557 unsigned long address,
558 unsigned long size);
559 static abort_t lookup_reloc(struct ksplice_mod_change *change,
560 const struct ksplice_reloc **fingerp,
561 unsigned long addr,
562 const struct ksplice_reloc **relocp);
563 static abort_t handle_reloc(struct ksplice_mod_change *change,
564 const struct ksplice_section *sect,
565 const struct ksplice_reloc *r,
566 unsigned long run_addr, enum run_pre_mode mode);
567 static abort_t handle_howto_date(struct ksplice_mod_change *change,
568 const struct ksplice_section *sect,
569 const struct ksplice_reloc *r,
570 unsigned long run_addr,
571 enum run_pre_mode mode);
572 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
573 const struct ksplice_section *sect,
574 const struct ksplice_reloc *r,
575 unsigned long run_addr,
576 enum run_pre_mode mode);
577 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
578 const struct ksplice_symbol *sym);
579 static int compare_section_labels(const void *va, const void *vb);
580 static int symbol_section_bsearch_compare(const void *a, const void *b);
581 static const struct ksplice_reloc *
582 patch_reloc(struct ksplice_mod_change *change,
583 const struct ksplice_patch *p);
585 /* Computing possible addresses for symbols */
586 static abort_t lookup_symbol(struct ksplice_mod_change *change,
587 const struct ksplice_symbol *ksym,
588 struct list_head *vals);
589 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
590 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
591 static abort_t init_symbol_array(struct ksplice_mod_change *change,
592 struct ksplice_symbol *start,
593 struct ksplice_symbol *end);
594 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
595 static abort_t add_matching_values(struct ksplice_lookup *lookup,
596 const char *sym_name, unsigned long sym_val);
597 static bool add_export_values(const struct symsearch *syms,
598 struct module *owner,
599 unsigned int symnum, void *data);
600 static int symbolp_bsearch_compare(const void *key, const void *elt);
601 static int compare_symbolp_names(const void *a, const void *b);
602 static int compare_symbolp_labels(const void *a, const void *b);
603 #ifdef CONFIG_KALLSYMS
604 static int add_kallsyms_values(void *data, const char *name,
605 struct module *owner, unsigned long val);
606 #endif /* CONFIG_KALLSYMS */
607 #ifdef KSPLICE_STANDALONE
608 static abort_t
609 add_system_map_candidates(struct ksplice_mod_change *change,
610 const struct ksplice_system_map *start,
611 const struct ksplice_system_map *end,
612 const char *label, struct list_head *vals);
613 static int compare_system_map(const void *a, const void *b);
614 static int system_map_bsearch_compare(const void *key, const void *elt);
615 #endif /* KSPLICE_STANDALONE */
616 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
617 const char *name, struct list_head *vals);
619 /* Atomic update trampoline insertion and removal */
620 static abort_t apply_patches(struct update *update);
621 static abort_t reverse_patches(struct update *update);
622 static int __apply_patches(void *update);
623 static int __reverse_patches(void *update);
624 static abort_t check_each_task(struct update *update);
625 static abort_t check_task(struct update *update,
626 const struct task_struct *t, bool rerun);
627 static abort_t check_stack(struct update *update, struct conflict *conf,
628 const struct thread_info *tinfo,
629 const unsigned long *stack);
630 static abort_t check_address(struct update *update,
631 struct conflict *conf, unsigned long addr);
632 static abort_t check_record(struct conflict_addr *ca,
633 const struct safety_record *rec,
634 unsigned long addr);
635 static bool is_stop_machine(const struct task_struct *t);
636 static void cleanup_conflicts(struct update *update);
637 static void print_conflicts(struct update *update);
638 static void insert_trampoline(struct ksplice_patch *p);
639 static abort_t verify_trampoline(struct ksplice_mod_change *change,
640 const struct ksplice_patch *p);
641 static void remove_trampoline(const struct ksplice_patch *p);
643 static abort_t create_labelval(struct ksplice_mod_change *change,
644 struct ksplice_symbol *ksym,
645 unsigned long val, int status);
646 static abort_t create_safety_record(struct ksplice_mod_change *change,
647 const struct ksplice_section *sect,
648 struct list_head *record_list,
649 unsigned long run_addr,
650 unsigned long run_size);
651 static abort_t add_candidate_val(struct ksplice_mod_change *change,
652 struct list_head *vals, unsigned long val);
653 static void release_vals(struct list_head *vals);
654 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
656 static int contains_canary(struct ksplice_mod_change *change,
657 unsigned long blank_addr,
658 const struct ksplice_reloc_howto *howto);
659 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
660 unsigned long addr);
661 static bool patches_module(const struct module *a, const struct module *b);
662 static bool strstarts(const char *str, const char *prefix);
663 static bool singular(struct list_head *list);
664 static void *bsearch(const void *key, const void *base, size_t n,
665 size_t size, int (*cmp)(const void *key, const void *elt));
666 static int compare_relocs(const void *a, const void *b);
667 static int reloc_bsearch_compare(const void *key, const void *elt);
669 /* Debugging */
670 static abort_t init_debug_buf(struct update *update);
671 static void clear_debug_buf(struct update *update);
672 static int __attribute__((format(printf, 2, 3)))
673 _ksdebug(struct update *update, const char *fmt, ...);
674 #define ksdebug(change, fmt, ...) \
675 _ksdebug(change->update, fmt, ## __VA_ARGS__)
677 #ifdef KSPLICE_NO_KERNEL_SUPPORT
678 /* Functions defined here that will be exported in later kernels */
679 #ifdef CONFIG_KALLSYMS
680 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
681 struct module *, unsigned long),
682 void *data);
683 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
684 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
685 #endif /* LINUX_VERSION_CODE */
686 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
687 struct module *,
688 unsigned long),
689 void *data);
690 #endif /* CONFIG_KALLSYMS */
691 static struct module *find_module(const char *name);
692 static int use_module(struct module *a, struct module *b);
693 static const struct kernel_symbol *find_symbol(const char *name,
694 struct module **owner,
695 const unsigned long **crc,
696 bool gplok, bool warn);
697 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
698 struct module *owner,
699 unsigned int symnum, void *data),
700 void *data);
701 static struct module *__module_address(unsigned long addr);
702 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
704 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
706 /* Prepare a trampoline for the given patch */
707 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
708 struct ksplice_patch *p);
709 /* What address does the trampoline at addr jump to? */
710 static abort_t trampoline_target(struct ksplice_mod_change *change,
711 unsigned long addr, unsigned long *new_addr);
712 /* Hook to handle pc-relative jumps inserted by parainstructions */
713 static abort_t handle_paravirt(struct ksplice_mod_change *change,
714 unsigned long pre, unsigned long run,
715 int *matched);
716 /* Called for relocations of type KSPLICE_HOWTO_BUG */
717 static abort_t handle_bug(struct ksplice_mod_change *change,
718 const struct ksplice_reloc *r,
719 unsigned long run_addr);
720 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
721 static abort_t handle_extable(struct ksplice_mod_change *change,
722 const struct ksplice_reloc *r,
723 unsigned long run_addr);
724 /* Is address p on the stack of the given thread? */
725 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
727 #ifndef KSPLICE_STANDALONE
728 #include "ksplice-arch.c"
729 #elif defined CONFIG_X86
730 #include "x86/ksplice-arch.c"
731 #elif defined CONFIG_ARM
732 #include "arm/ksplice-arch.c"
733 #endif /* KSPLICE_STANDALONE */
735 #define clear_list(head, type, member) \
736 do { \
737 struct list_head *_pos, *_n; \
738 list_for_each_safe(_pos, _n, head) { \
739 list_del(_pos); \
740 kfree(list_entry(_pos, type, member)); \
742 } while (0)
745 * init_ksplice_mod_change() - Initializes a ksplice change
746 * @change: The change to be initialized. All of the public fields of the
747 * change and its associated data structures should be populated
748 * before this function is called. The values of the private
749 * fields will be ignored.
751 int init_ksplice_mod_change(struct ksplice_mod_change *change)
753 struct update *update;
754 struct ksplice_patch *p;
755 struct ksplice_section *s;
756 int ret = 0;
758 #ifdef KSPLICE_STANDALONE
759 if (!bootstrapped)
760 return -1;
761 #endif /* KSPLICE_STANDALONE */
763 INIT_LIST_HEAD(&change->temp_labelvals);
764 INIT_LIST_HEAD(&change->safety_records);
766 sort(change->old_code.relocs,
767 change->old_code.relocs_end - change->old_code.relocs,
768 sizeof(*change->old_code.relocs), compare_relocs, NULL);
769 sort(change->new_code.relocs,
770 change->new_code.relocs_end - change->new_code.relocs,
771 sizeof(*change->new_code.relocs), compare_relocs, NULL);
772 sort(change->old_code.sections,
773 change->old_code.sections_end - change->old_code.sections,
774 sizeof(*change->old_code.sections), compare_section_labels, NULL);
775 #ifdef KSPLICE_STANDALONE
776 sort(change->new_code.system_map,
777 change->new_code.system_map_end - change->new_code.system_map,
778 sizeof(*change->new_code.system_map), compare_system_map, NULL);
779 sort(change->old_code.system_map,
780 change->old_code.system_map_end - change->old_code.system_map,
781 sizeof(*change->old_code.system_map), compare_system_map, NULL);
782 #endif /* KSPLICE_STANDALONE */
784 for (p = change->patches; p < change->patches_end; p++)
785 p->vaddr = NULL;
786 for (s = change->old_code.sections; s < change->old_code.sections_end;
787 s++)
788 s->match_map = NULL;
789 for (p = change->patches; p < change->patches_end; p++) {
790 const struct ksplice_reloc *r = patch_reloc(change, p);
791 if (r == NULL)
792 return -ENOENT;
793 if (p->type == KSPLICE_PATCH_DATA) {
794 s = symbol_section(change, r->symbol);
795 if (s == NULL)
796 return -ENOENT;
797 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
798 * to modify rodata sections that have been explicitly
799 * marked for patching using the ksplice-patch.h macro
800 * ksplice_assume_rodata. Here we modify the section
801 * flags appropriately.
803 if (s->flags & KSPLICE_SECTION_DATA)
804 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
805 KSPLICE_SECTION_RODATA;
809 mutex_lock(&module_mutex);
810 list_for_each_entry(update, &updates, list) {
811 if (strcmp(change->kid, update->kid) == 0) {
812 if (update->stage != STAGE_PREPARING) {
813 ret = -EPERM;
814 goto out;
816 add_to_update(change, update);
817 ret = 0;
818 goto out;
821 update = init_ksplice_update(change->kid);
822 if (update == NULL) {
823 ret = -ENOMEM;
824 goto out;
826 ret = ksplice_sysfs_init(update);
827 if (ret != 0) {
828 cleanup_ksplice_update(update);
829 goto out;
831 add_to_update(change, update);
832 out:
833 mutex_unlock(&module_mutex);
834 return ret;
836 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
839 * cleanup_ksplice_mod_change() - Cleans up a change
840 * @change: The change to be cleaned up
842 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
844 if (change->update == NULL)
845 return;
847 mutex_lock(&module_mutex);
848 if (change->update->stage == STAGE_APPLIED) {
849 /* If the change wasn't actually applied (because we
850 * only applied this update to loaded modules and this
851 * target was not loaded), then unregister the change
852 * from the list of unused changes.
854 struct ksplice_mod_change *c;
855 bool found = false;
857 list_for_each_entry(c, &change->update->unused_changes, list) {
858 if (c == change)
859 found = true;
861 if (found)
862 list_del(&change->list);
863 mutex_unlock(&module_mutex);
864 return;
866 list_del(&change->list);
867 if (change->update->stage == STAGE_PREPARING)
868 maybe_cleanup_ksplice_update(change->update);
869 change->update = NULL;
870 mutex_unlock(&module_mutex);
872 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
874 static struct update *init_ksplice_update(const char *kid)
876 struct update *update;
877 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
878 if (update == NULL)
879 return NULL;
880 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
881 if (update->name == NULL) {
882 kfree(update);
883 return NULL;
885 update->kid = kstrdup(kid, GFP_KERNEL);
886 if (update->kid == NULL) {
887 kfree(update->name);
888 kfree(update);
889 return NULL;
891 if (try_module_get(THIS_MODULE) != 1) {
892 kfree(update->kid);
893 kfree(update->name);
894 kfree(update);
895 return NULL;
897 INIT_LIST_HEAD(&update->changes);
898 INIT_LIST_HEAD(&update->unused_changes);
899 INIT_LIST_HEAD(&update->ksplice_module_list);
900 if (init_debug_buf(update) != OK) {
901 module_put(THIS_MODULE);
902 kfree(update->kid);
903 kfree(update->name);
904 kfree(update);
905 return NULL;
907 list_add(&update->list, &updates);
908 update->stage = STAGE_PREPARING;
909 update->abort_cause = OK;
910 update->partial = 0;
911 INIT_LIST_HEAD(&update->conflicts);
912 return update;
915 static void cleanup_ksplice_update(struct update *update)
917 list_del(&update->list);
918 cleanup_conflicts(update);
919 clear_debug_buf(update);
920 cleanup_module_list_entries(update);
921 kfree(update->kid);
922 kfree(update->name);
923 kfree(update);
924 module_put(THIS_MODULE);
927 /* Clean up the update if it no longer has any changes */
928 static void maybe_cleanup_ksplice_update(struct update *update)
930 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
931 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
932 kobject_put(&update->kobj);
933 #else /* LINUX_VERSION_CODE < */
934 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
935 kobject_unregister(&update->kobj);
936 #endif /* LINUX_VERSION_CODE */
939 static void add_to_update(struct ksplice_mod_change *change,
940 struct update *update)
942 change->update = update;
943 list_add(&change->list, &update->unused_changes);
946 static int ksplice_sysfs_init(struct update *update)
948 int ret = 0;
949 memset(&update->kobj, 0, sizeof(update->kobj));
950 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
951 #ifndef KSPLICE_STANDALONE
952 ret = kobject_init_and_add(&update->kobj, &update_ktype,
953 ksplice_kobj, "%s", update->kid);
954 #else /* KSPLICE_STANDALONE */
955 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
956 ret = kobject_init_and_add(&update->kobj, &update_ktype,
957 &THIS_MODULE->mkobj.kobj, "ksplice");
958 #endif /* KSPLICE_STANDALONE */
959 #else /* LINUX_VERSION_CODE < */
960 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
961 if (ret != 0)
962 return ret;
963 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
964 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
965 #else /* LINUX_VERSION_CODE < */
966 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
967 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
968 #endif /* LINUX_VERSION_CODE */
969 update->kobj.ktype = &update_ktype;
970 ret = kobject_register(&update->kobj);
971 #endif /* LINUX_VERSION_CODE */
972 if (ret != 0)
973 return ret;
974 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
975 kobject_uevent(&update->kobj, KOBJ_ADD);
976 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
977 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
978 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
979 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
980 #endif /* LINUX_VERSION_CODE */
981 return 0;
984 static abort_t apply_update(struct update *update)
986 struct ksplice_mod_change *change, *n;
987 abort_t ret;
988 int retval;
990 list_for_each_entry(change, &update->changes, list) {
991 ret = create_module_list_entry(change, true);
992 if (ret != OK)
993 goto out;
996 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
997 if (strcmp(change->target_name, "vmlinux") == 0) {
998 change->target = NULL;
999 } else if (change->target == NULL) {
1000 change->target = find_module(change->target_name);
1001 if (change->target == NULL ||
1002 !module_is_live(change->target)) {
1003 if (!update->partial) {
1004 ret = TARGET_NOT_LOADED;
1005 goto out;
1007 ret = create_module_list_entry(change, false);
1008 if (ret != OK)
1009 goto out;
1010 continue;
1012 retval = use_module(change->new_code_mod,
1013 change->target);
1014 if (retval != 1) {
1015 ret = UNEXPECTED;
1016 goto out;
1019 ret = create_module_list_entry(change, true);
1020 if (ret != OK)
1021 goto out;
1022 list_del(&change->list);
1023 list_add_tail(&change->list, &update->changes);
1025 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1026 if (change->target == NULL) {
1027 apply_paravirt(change->new_code.parainstructions,
1028 change->new_code.parainstructions_end);
1029 apply_paravirt(change->old_code.parainstructions,
1030 change->old_code.parainstructions_end);
1032 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1035 list_for_each_entry(change, &update->changes, list) {
1036 const struct ksplice_section *sect;
1037 for (sect = change->new_code.sections;
1038 sect < change->new_code.sections_end; sect++) {
1039 struct safety_record *rec = kmalloc(sizeof(*rec),
1040 GFP_KERNEL);
1041 if (rec == NULL) {
1042 ret = OUT_OF_MEMORY;
1043 goto out;
1045 rec->addr = sect->address;
1046 rec->size = sect->size;
1047 rec->label = sect->symbol->label;
1048 list_add(&rec->list, &change->safety_records);
1052 list_for_each_entry(change, &update->changes, list) {
1053 ret = init_symbol_arrays(change);
1054 if (ret != OK) {
1055 cleanup_symbol_arrays(change);
1056 goto out;
1058 ret = prepare_change(change);
1059 cleanup_symbol_arrays(change);
1060 if (ret != OK)
1061 goto out;
1063 ret = apply_patches(update);
1064 out:
1065 list_for_each_entry(change, &update->changes, list) {
1066 struct ksplice_section *s;
1067 if (update->stage == STAGE_PREPARING)
1068 clear_list(&change->safety_records,
1069 struct safety_record, list);
1070 for (s = change->old_code.sections;
1071 s < change->old_code.sections_end; s++) {
1072 if (s->match_map != NULL) {
1073 vfree(s->match_map);
1074 s->match_map = NULL;
1078 if (update->stage == STAGE_PREPARING)
1079 cleanup_module_list_entries(update);
1080 return ret;
1083 static int compare_symbolp_names(const void *a, const void *b)
1085 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1086 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1087 return 0;
1088 if ((*sympa)->name == NULL)
1089 return -1;
1090 if ((*sympb)->name == NULL)
1091 return 1;
1092 return strcmp((*sympa)->name, (*sympb)->name);
1095 static int compare_symbolp_labels(const void *a, const void *b)
1097 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1098 return strcmp((*sympa)->label, (*sympb)->label);
1101 static int symbolp_bsearch_compare(const void *key, const void *elt)
1103 const char *name = key;
1104 const struct ksplice_symbol *const *symp = elt;
1105 const struct ksplice_symbol *sym = *symp;
1106 if (sym->name == NULL)
1107 return 1;
1108 return strcmp(name, sym->name);
1111 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1112 const char *sym_name, unsigned long sym_val)
1114 struct ksplice_symbol **symp;
1115 abort_t ret;
1117 symp = bsearch(sym_name, lookup->arr, lookup->size,
1118 sizeof(*lookup->arr), symbolp_bsearch_compare);
1119 if (symp == NULL)
1120 return OK;
1122 while (symp > lookup->arr &&
1123 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1124 symp--;
1126 for (; symp < lookup->arr + lookup->size; symp++) {
1127 struct ksplice_symbol *sym = *symp;
1128 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1129 break;
1130 ret = add_candidate_val(lookup->change, sym->vals, sym_val);
1131 if (ret != OK)
1132 return ret;
1134 return OK;
1137 #ifdef CONFIG_KALLSYMS
1138 static int add_kallsyms_values(void *data, const char *name,
1139 struct module *owner, unsigned long val)
1141 struct ksplice_lookup *lookup = data;
1142 if (owner == lookup->change->new_code_mod ||
1143 !patches_module(owner, lookup->change->target))
1144 return (__force int)OK;
1145 return (__force int)add_matching_values(lookup, name, val);
1147 #endif /* CONFIG_KALLSYMS */
1149 static bool add_export_values(const struct symsearch *syms,
1150 struct module *owner,
1151 unsigned int symnum, void *data)
1153 struct ksplice_lookup *lookup = data;
1154 abort_t ret;
1156 ret = add_matching_values(lookup, syms->start[symnum].name,
1157 syms->start[symnum].value);
1158 if (ret != OK) {
1159 lookup->ret = ret;
1160 return true;
1162 return false;
1165 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1167 struct ksplice_symbol *sym;
1168 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1169 sym++) {
1170 if (sym->vals != NULL) {
1171 clear_list(sym->vals, struct candidate_val, list);
1172 kfree(sym->vals);
1173 sym->vals = NULL;
1176 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1177 sym++) {
1178 if (sym->vals != NULL) {
1179 clear_list(sym->vals, struct candidate_val, list);
1180 kfree(sym->vals);
1181 sym->vals = NULL;
1187 * The new_code and old_code modules each have their own independent
1188 * ksplice_symbol structures. uniquify_symbols unifies these separate
1189 * pieces of kernel symbol information by replacing all references to
1190 * the old_code copy of symbols with references to the new_code copy.
1192 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1194 struct ksplice_reloc *r;
1195 struct ksplice_section *s;
1196 struct ksplice_symbol *sym, **sym_arr, **symp;
1197 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1199 if (size == 0)
1200 return OK;
1202 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1203 if (sym_arr == NULL)
1204 return OUT_OF_MEMORY;
1206 for (symp = sym_arr, sym = change->new_code.symbols;
1207 symp < sym_arr + size && sym < change->new_code.symbols_end;
1208 sym++, symp++)
1209 *symp = sym;
1211 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1213 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1214 r++) {
1215 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1216 compare_symbolp_labels);
1217 if (symp != NULL) {
1218 if ((*symp)->name == NULL)
1219 (*symp)->name = r->symbol->name;
1220 r->symbol = *symp;
1224 for (s = change->old_code.sections; s < change->old_code.sections_end;
1225 s++) {
1226 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1227 compare_symbolp_labels);
1228 if (symp != NULL) {
1229 if ((*symp)->name == NULL)
1230 (*symp)->name = s->symbol->name;
1231 s->symbol = *symp;
1235 vfree(sym_arr);
1236 return OK;
1240 * Initialize the ksplice_symbol structures in the given array using
1241 * the kallsyms and exported symbol tables.
1243 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1244 struct ksplice_symbol *start,
1245 struct ksplice_symbol *end)
1247 struct ksplice_symbol *sym, **sym_arr, **symp;
1248 struct ksplice_lookup lookup;
1249 size_t size = end - start;
1250 abort_t ret;
1252 if (size == 0)
1253 return OK;
1255 for (sym = start; sym < end; sym++) {
1256 if (strstarts(sym->label, "__ksymtab")) {
1257 const struct kernel_symbol *ksym;
1258 const char *colon = strchr(sym->label, ':');
1259 const char *name = colon + 1;
1260 if (colon == NULL)
1261 continue;
1262 ksym = find_symbol(name, NULL, NULL, true, false);
1263 if (ksym == NULL) {
1264 ksdebug(change, "Could not find kernel_symbol "
1265 "structure for %s\n", name);
1266 continue;
1268 sym->value = (unsigned long)ksym;
1269 sym->vals = NULL;
1270 continue;
1273 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1274 if (sym->vals == NULL)
1275 return OUT_OF_MEMORY;
1276 INIT_LIST_HEAD(sym->vals);
1277 sym->value = 0;
1280 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1281 if (sym_arr == NULL)
1282 return OUT_OF_MEMORY;
1284 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1285 sym++, symp++)
1286 *symp = sym;
1288 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1290 lookup.change = change;
1291 lookup.arr = sym_arr;
1292 lookup.size = size;
1293 lookup.ret = OK;
1295 each_symbol(add_export_values, &lookup);
1296 ret = lookup.ret;
1297 #ifdef CONFIG_KALLSYMS
1298 if (ret == OK)
1299 ret = (__force abort_t)
1300 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1301 #endif /* CONFIG_KALLSYMS */
1302 vfree(sym_arr);
1303 return ret;
1306 /* Prepare the change's ksplice_symbol structures for run-pre matching */
1307 static abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1309 abort_t ret;
1311 ret = uniquify_symbols(change);
1312 if (ret != OK)
1313 return ret;
1315 ret = init_symbol_array(change, change->old_code.symbols,
1316 change->old_code.symbols_end);
1317 if (ret != OK)
1318 return ret;
1320 ret = init_symbol_array(change, change->new_code.symbols,
1321 change->new_code.symbols_end);
1322 if (ret != OK)
1323 return ret;
1325 return OK;
1328 static abort_t prepare_change(struct ksplice_mod_change *change)
1330 abort_t ret;
1332 ksdebug(change, "Preparing and checking %s\n", change->name);
1333 ret = match_change_sections(change, false);
1334 if (ret == NO_MATCH) {
1335 /* It is possible that by using relocations from .data sections
1336 * we can successfully run-pre match the rest of the sections.
1337 * To avoid using any symbols obtained from .data sections
1338 * (which may be unreliable) in the post code, we first prepare
1339 * the post code and then try to run-pre match the remaining
1340 * sections with the help of .data sections.
1342 ksdebug(change, "Continuing without some sections; we might "
1343 "find them later.\n");
1344 ret = finalize_change(change);
1345 if (ret != OK) {
1346 ksdebug(change, "Aborted. Unable to continue without "
1347 "the unmatched sections.\n");
1348 return ret;
1351 ksdebug(change, "run-pre: Considering .data sections to find "
1352 "the unmatched sections\n");
1353 ret = match_change_sections(change, true);
1354 if (ret != OK)
1355 return ret;
1357 ksdebug(change, "run-pre: Found all previously unmatched "
1358 "sections\n");
1359 return OK;
1360 } else if (ret != OK) {
1361 return ret;
1364 return finalize_change(change);
1368 * Finish preparing the change for insertion into the kernel.
1369 * Afterwards, the replacement code should be ready to run and the
1370 * ksplice_patches should all be ready for trampoline insertion.
1372 static abort_t finalize_change(struct ksplice_mod_change *change)
1374 abort_t ret;
1375 ret = apply_relocs(change, change->new_code.relocs,
1376 change->new_code.relocs_end);
1377 if (ret != OK)
1378 return ret;
1380 ret = finalize_patches(change);
1381 if (ret != OK)
1382 return ret;
1384 return OK;
1387 static abort_t finalize_patches(struct ksplice_mod_change *change)
1389 struct ksplice_patch *p;
1390 struct safety_record *rec;
1391 abort_t ret;
1393 for (p = change->patches; p < change->patches_end; p++) {
1394 bool found = false;
1395 list_for_each_entry(rec, &change->safety_records, list) {
1396 if (rec->addr <= p->oldaddr &&
1397 p->oldaddr < rec->addr + rec->size) {
1398 found = true;
1399 break;
1402 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1403 const struct ksplice_reloc *r = patch_reloc(change, p);
1404 if (r == NULL) {
1405 ksdebug(change, "A patch with no reloc at its "
1406 "oldaddr has no safety record\n");
1407 return NO_MATCH;
1409 ksdebug(change, "No safety record for patch with"
1410 "oldaddr %s+%lx\n", r->symbol->label,
1411 r->target_addend);
1412 return NO_MATCH;
1415 if (p->type == KSPLICE_PATCH_TEXT) {
1416 ret = prepare_trampoline(change, p);
1417 if (ret != OK)
1418 return ret;
1421 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1422 ksdebug(change, "Safety record %s is too short for "
1423 "patch\n", rec->label);
1424 return UNEXPECTED;
1427 if (p->type == KSPLICE_PATCH_TEXT) {
1428 if (p->repladdr == 0)
1429 p->repladdr = (unsigned long)ksplice_deleted;
1432 return OK;
1435 static abort_t map_trampoline_pages(struct update *update)
1437 struct ksplice_mod_change *change;
1438 list_for_each_entry(change, &update->changes, list) {
1439 struct ksplice_patch *p;
1440 for (p = change->patches; p < change->patches_end; p++) {
1441 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1442 if (p->vaddr == NULL) {
1443 ksdebug(change,
1444 "Unable to map oldaddr read/write\n");
1445 unmap_trampoline_pages(update);
1446 return UNEXPECTED;
1450 return OK;
1453 static void unmap_trampoline_pages(struct update *update)
1455 struct ksplice_mod_change *change;
1456 list_for_each_entry(change, &update->changes, list) {
1457 struct ksplice_patch *p;
1458 for (p = change->patches; p < change->patches_end; p++) {
1459 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1460 p->vaddr = NULL;
1466 * map_writable creates a shadow page mapping of the range
1467 * [addr, addr + len) so that we can write to code mapped read-only.
1469 * It is similar to a generalized version of x86's text_poke. But
1470 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1471 * map_writable to map the pages before stop_machine, then use the
1472 * mapping inside stop_machine, and unmap the pages afterwards.
1474 static void *map_writable(void *addr, size_t len)
1476 void *vaddr;
1477 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1478 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1479 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1480 int i;
1482 if (pages == NULL)
1483 return NULL;
1485 for (i = 0; i < nr_pages; i++) {
1486 if (__module_address((unsigned long)page_addr) == NULL) {
1487 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1488 pages[i] = virt_to_page(page_addr);
1489 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1490 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1491 pages[i] =
1492 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1493 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1494 WARN_ON(!PageReserved(pages[i]));
1495 } else {
1496 pages[i] = vmalloc_to_page(addr);
1498 if (pages[i] == NULL) {
1499 kfree(pages);
1500 return NULL;
1502 page_addr += PAGE_SIZE;
1504 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1505 kfree(pages);
1506 if (vaddr == NULL)
1507 return NULL;
1508 return vaddr + offset_in_page(addr);
1512 * Ksplice adds a dependency on any symbol address used to resolve
1513 * relocations in the new_code module.
1515 * Be careful to follow_trampolines so that we always depend on the
1516 * latest version of the target function, since that's the code that
1517 * will run if we call addr.
1519 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1520 unsigned long addr)
1522 struct ksplice_mod_change *c;
1523 struct module *m =
1524 __module_text_address(follow_trampolines(change, addr));
1525 if (m == NULL)
1526 return OK;
1527 list_for_each_entry(c, &change->update->changes, list) {
1528 if (m == c->new_code_mod)
1529 return OK;
1531 if (use_module(change->new_code_mod, m) != 1)
1532 return MODULE_BUSY;
1533 return OK;
1536 static abort_t apply_relocs(struct ksplice_mod_change *change,
1537 const struct ksplice_reloc *relocs,
1538 const struct ksplice_reloc *relocs_end)
1540 const struct ksplice_reloc *r;
1541 for (r = relocs; r < relocs_end; r++) {
1542 abort_t ret = apply_reloc(change, r);
1543 if (ret != OK)
1544 return ret;
1546 return OK;
1549 static abort_t apply_reloc(struct ksplice_mod_change *change,
1550 const struct ksplice_reloc *r)
1552 switch (r->howto->type) {
1553 case KSPLICE_HOWTO_RELOC:
1554 case KSPLICE_HOWTO_RELOC_PATCH:
1555 return apply_howto_reloc(change, r);
1556 case KSPLICE_HOWTO_DATE:
1557 case KSPLICE_HOWTO_TIME:
1558 return apply_howto_date(change, r);
1559 default:
1560 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1561 return UNEXPECTED;
1566 * Applies a relocation. Aborts if the symbol referenced in it has
1567 * not been uniquely resolved.
1569 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1570 const struct ksplice_reloc *r)
1572 abort_t ret;
1573 int canary_ret;
1574 unsigned long sym_addr;
1575 LIST_HEAD(vals);
1577 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1578 if (canary_ret < 0)
1579 return UNEXPECTED;
1580 if (canary_ret == 0) {
1581 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1582 r->blank_addr, r->symbol->label, r->target_addend);
1583 return OK;
1586 #ifdef KSPLICE_STANDALONE
1587 if (!bootstrapped) {
1588 ret = add_system_map_candidates(change,
1589 change->new_code.system_map,
1590 change->new_code.system_map_end,
1591 r->symbol->label, &vals);
1592 if (ret != OK) {
1593 release_vals(&vals);
1594 return ret;
1597 #endif /* KSPLICE_STANDALONE */
1598 ret = lookup_symbol(change, r->symbol, &vals);
1599 if (ret != OK) {
1600 release_vals(&vals);
1601 return ret;
1604 * Relocations for the oldaddr fields of patches must have
1605 * been resolved via run-pre matching.
1607 if (!singular(&vals) || (r->symbol->vals != NULL &&
1608 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1609 release_vals(&vals);
1610 ksdebug(change, "Failed to find %s for reloc\n",
1611 r->symbol->label);
1612 return FAILED_TO_FIND;
1614 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1615 release_vals(&vals);
1617 ret = write_reloc_value(change, r, r->blank_addr,
1618 r->howto->pcrel ? sym_addr - r->blank_addr :
1619 sym_addr);
1620 if (ret != OK)
1621 return ret;
1623 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1624 r->symbol->label, r->target_addend, sym_addr);
1625 switch (r->howto->size) {
1626 case 1:
1627 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1628 break;
1629 case 2:
1630 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1631 break;
1632 case 4:
1633 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1634 break;
1635 #if BITS_PER_LONG >= 64
1636 case 8:
1637 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1638 break;
1639 #endif /* BITS_PER_LONG */
1640 default:
1641 ksdebug(change, "Aborted. Invalid relocation size.\n");
1642 return UNEXPECTED;
1644 #ifdef KSPLICE_STANDALONE
1645 if (!bootstrapped)
1646 return OK;
1647 #endif /* KSPLICE_STANDALONE */
1650 * Create labelvals so that we can verify our choices in the
1651 * second round of run-pre matching that considers data sections.
1653 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1654 if (ret != OK)
1655 return ret;
1657 return add_dependency_on_address(change, sym_addr);
1661 * Date relocations are created wherever __DATE__ or __TIME__ is used
1662 * in the kernel; we resolve them by simply copying in the date/time
1663 * obtained from run-pre matching the relevant compilation unit.
1665 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1666 const struct ksplice_reloc *r)
1668 if (r->symbol->vals != NULL) {
1669 ksdebug(change, "Failed to find %s for date\n",
1670 r->symbol->label);
1671 return FAILED_TO_FIND;
1673 memcpy((unsigned char *)r->blank_addr,
1674 (const unsigned char *)r->symbol->value, r->howto->size);
1675 return OK;
1679 * Given a relocation and its run address, compute the address of the
1680 * symbol the relocation referenced, and store it in *valp.
1682 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1683 const struct ksplice_reloc *r,
1684 unsigned long addr, unsigned long *valp)
1686 unsigned char bytes[sizeof(long)];
1687 unsigned long val;
1688 const struct ksplice_reloc_howto *howto = r->howto;
1690 if (howto->size <= 0 || howto->size > sizeof(long)) {
1691 ksdebug(change, "Aborted. Invalid relocation size.\n");
1692 return UNEXPECTED;
1695 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1696 return NO_MATCH;
1698 switch (howto->size) {
1699 case 1:
1700 val = *(uint8_t *)bytes;
1701 break;
1702 case 2:
1703 val = *(uint16_t *)bytes;
1704 break;
1705 case 4:
1706 val = *(uint32_t *)bytes;
1707 break;
1708 #if BITS_PER_LONG >= 64
1709 case 8:
1710 val = *(uint64_t *)bytes;
1711 break;
1712 #endif /* BITS_PER_LONG */
1713 default:
1714 ksdebug(change, "Aborted. Invalid relocation size.\n");
1715 return UNEXPECTED;
1718 val &= howto->dst_mask;
1719 if (howto->signed_addend)
1720 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1721 val <<= howto->rightshift;
1722 val -= r->insn_addend + r->target_addend;
1723 *valp = val;
1724 return OK;
1728 * Given a relocation, the address of its storage unit, and the
1729 * address of the symbol the relocation references, write the
1730 * relocation's final value into the storage unit.
1732 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1733 const struct ksplice_reloc *r,
1734 unsigned long addr, unsigned long sym_addr)
1736 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1737 const struct ksplice_reloc_howto *howto = r->howto;
1738 val >>= howto->rightshift;
1739 switch (howto->size) {
1740 case 1:
1741 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1742 (val & howto->dst_mask);
1743 break;
1744 case 2:
1745 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1746 (val & howto->dst_mask);
1747 break;
1748 case 4:
1749 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1750 (val & howto->dst_mask);
1751 break;
1752 #if BITS_PER_LONG >= 64
1753 case 8:
1754 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1755 (val & howto->dst_mask);
1756 break;
1757 #endif /* BITS_PER_LONG */
1758 default:
1759 ksdebug(change, "Aborted. Invalid relocation size.\n");
1760 return UNEXPECTED;
1763 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1764 ksdebug(change, "Aborted. Relocation overflow.\n");
1765 return UNEXPECTED;
1768 return OK;
1771 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1772 bool to_be_applied)
1774 struct ksplice_module_list_entry *entry =
1775 kmalloc(sizeof(*entry), GFP_KERNEL);
1776 if (entry == NULL)
1777 return OUT_OF_MEMORY;
1778 entry->new_code_mod_name =
1779 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1780 if (entry->new_code_mod_name == NULL) {
1781 kfree(entry);
1782 return OUT_OF_MEMORY;
1784 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1785 if (entry->target_mod_name == NULL) {
1786 kfree(entry->new_code_mod_name);
1787 kfree(entry);
1788 return OUT_OF_MEMORY;
1790 /* The update's kid is guaranteed to outlast the module_list_entry */
1791 entry->kid = change->update->kid;
1792 entry->applied = to_be_applied;
1793 list_add(&entry->update_list, &change->update->ksplice_module_list);
1794 return OK;
1797 static void cleanup_module_list_entries(struct update *update)
1799 struct ksplice_module_list_entry *entry;
1800 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1801 kfree(entry->target_mod_name);
1802 kfree(entry->new_code_mod_name);
1804 clear_list(&update->ksplice_module_list,
1805 struct ksplice_module_list_entry, update_list);
1808 /* Replacement address used for functions deleted by the patch */
1809 static void __attribute__((noreturn)) ksplice_deleted(void)
1811 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1812 BUG();
1813 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1814 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1815 for (;;);
1816 #endif
1819 /* Floodfill to run-pre match the sections within a change. */
1820 static abort_t match_change_sections(struct ksplice_mod_change *change,
1821 bool consider_data_sections)
1823 struct ksplice_section *sect;
1824 abort_t ret;
1825 int remaining = 0;
1826 bool progress;
1828 for (sect = change->old_code.sections;
1829 sect < change->old_code.sections_end; sect++) {
1830 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1831 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1832 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1833 remaining++;
1836 while (remaining > 0) {
1837 progress = false;
1838 for (sect = change->old_code.sections;
1839 sect < change->old_code.sections_end; sect++) {
1840 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1841 continue;
1842 if ((!consider_data_sections &&
1843 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1844 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1845 continue;
1846 ret = find_section(change, sect);
1847 if (ret == OK) {
1848 sect->flags |= KSPLICE_SECTION_MATCHED;
1849 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1850 remaining--;
1851 progress = true;
1852 } else if (ret != NO_MATCH) {
1853 return ret;
1857 if (progress)
1858 continue;
1860 for (sect = change->old_code.sections;
1861 sect < change->old_code.sections_end; sect++) {
1862 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1863 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1864 continue;
1865 ksdebug(change, "run-pre: could not match %s "
1866 "section %s\n",
1867 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1868 "data" :
1869 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1870 "rodata" : "text", sect->symbol->label);
1872 ksdebug(change, "Aborted. run-pre: could not match some "
1873 "sections.\n");
1874 return NO_MATCH;
1876 return OK;
1880 * Search for the section in the running kernel. Returns OK if and
1881 * only if it finds precisely one address in the kernel matching the
1882 * section.
1884 static abort_t find_section(struct ksplice_mod_change *change,
1885 struct ksplice_section *sect)
1887 int i;
1888 abort_t ret;
1889 unsigned long run_addr;
1890 LIST_HEAD(vals);
1891 struct candidate_val *v, *n;
1893 #ifdef KSPLICE_STANDALONE
1894 ret = add_system_map_candidates(change, change->old_code.system_map,
1895 change->old_code.system_map_end,
1896 sect->symbol->label, &vals);
1897 if (ret != OK) {
1898 release_vals(&vals);
1899 return ret;
1901 #endif /* KSPLICE_STANDALONE */
1902 ret = lookup_symbol(change, sect->symbol, &vals);
1903 if (ret != OK) {
1904 release_vals(&vals);
1905 return ret;
1908 ksdebug(change, "run-pre: starting sect search for %s\n",
1909 sect->symbol->label);
1911 list_for_each_entry_safe(v, n, &vals, list) {
1912 run_addr = v->val;
1914 yield();
1915 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
1916 if (ret == NO_MATCH) {
1917 list_del(&v->list);
1918 kfree(v);
1919 } else if (ret != OK) {
1920 release_vals(&vals);
1921 return ret;
1925 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1926 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1927 ret = brute_search_all(change, sect, &vals);
1928 if (ret != OK) {
1929 release_vals(&vals);
1930 return ret;
1933 * Make sure run-pre matching output is displayed if
1934 * brute_search succeeds.
1936 if (singular(&vals)) {
1937 run_addr = list_entry(vals.next, struct candidate_val,
1938 list)->val;
1939 ret = try_addr(change, sect, run_addr, NULL,
1940 RUN_PRE_INITIAL);
1941 if (ret != OK) {
1942 ksdebug(change, "run-pre: Debug run failed for "
1943 "sect %s:\n", sect->symbol->label);
1944 release_vals(&vals);
1945 return ret;
1949 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1951 if (singular(&vals)) {
1952 LIST_HEAD(safety_records);
1953 run_addr = list_entry(vals.next, struct candidate_val,
1954 list)->val;
1955 ret = try_addr(change, sect, run_addr, &safety_records,
1956 RUN_PRE_FINAL);
1957 release_vals(&vals);
1958 if (ret != OK) {
1959 clear_list(&safety_records, struct safety_record, list);
1960 ksdebug(change, "run-pre: Final run failed for sect "
1961 "%s:\n", sect->symbol->label);
1962 } else {
1963 list_splice(&safety_records, &change->safety_records);
1965 return ret;
1966 } else if (!list_empty(&vals)) {
1967 struct candidate_val *val;
1968 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
1969 sect->symbol->label);
1970 i = 0;
1971 list_for_each_entry(val, &vals, list) {
1972 i++;
1973 ksdebug(change, "%lx\n", val->val);
1974 if (i > 5) {
1975 ksdebug(change, "...\n");
1976 break;
1979 release_vals(&vals);
1980 return NO_MATCH;
1982 release_vals(&vals);
1983 return NO_MATCH;
1987 * try_addr is the the interface to run-pre matching. Its primary
1988 * purpose is to manage debugging information for run-pre matching;
1989 * all the hard work is in run_pre_cmp.
1991 static abort_t try_addr(struct ksplice_mod_change *change,
1992 struct ksplice_section *sect,
1993 unsigned long run_addr,
1994 struct list_head *safety_records,
1995 enum run_pre_mode mode)
1997 abort_t ret;
1998 const struct module *run_module = __module_address(run_addr);
2000 if (run_module == change->new_code_mod) {
2001 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2002 "module %s for sect %s\n", run_addr, run_module->name,
2003 sect->symbol->label);
2004 return UNEXPECTED;
2006 if (!patches_module(run_module, change->target)) {
2007 ksdebug(change, "run-pre: ignoring address %lx in other module "
2008 "%s for sect %s\n", run_addr, run_module == NULL ?
2009 "vmlinux" : run_module->name, sect->symbol->label);
2010 return NO_MATCH;
2013 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2014 if (ret != OK)
2015 return ret;
2017 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2018 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2019 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2020 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2021 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2022 mode);
2023 else
2024 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2025 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2026 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2027 set_temp_labelvals(change, NOVAL);
2028 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2029 "p_a=%lx s=%lx)\n",
2030 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2031 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2032 "text", sect->symbol->label, run_addr, sect->address,
2033 sect->size);
2034 ksdebug(change, "run-pre: ");
2035 if (change->update->debug >= 1) {
2036 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2037 ret = run_pre_cmp(change, sect, run_addr,
2038 safety_records, RUN_PRE_DEBUG);
2039 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2040 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2041 ret = arch_run_pre_cmp(change, sect, run_addr,
2042 safety_records,
2043 RUN_PRE_DEBUG);
2044 else
2045 ret = run_pre_cmp(change, sect, run_addr,
2046 safety_records,
2047 RUN_PRE_DEBUG);
2048 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2049 set_temp_labelvals(change, NOVAL);
2051 ksdebug(change, "\n");
2052 return ret;
2053 } else if (ret != OK) {
2054 set_temp_labelvals(change, NOVAL);
2055 return ret;
2058 if (mode != RUN_PRE_FINAL) {
2059 set_temp_labelvals(change, NOVAL);
2060 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2061 sect->symbol->label, run_addr);
2062 return OK;
2065 set_temp_labelvals(change, VAL);
2066 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2067 run_addr);
2068 return OK;
2072 * run_pre_cmp is the primary run-pre matching function; it determines
2073 * whether the given ksplice_section matches the code or data in the
2074 * running kernel starting at run_addr.
2076 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2077 * section is created.
2079 * The run_pre_mode is also used to determine what debugging
2080 * information to display.
2082 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2083 const struct ksplice_section *sect,
2084 unsigned long run_addr,
2085 struct list_head *safety_records,
2086 enum run_pre_mode mode)
2088 int matched = 0;
2089 abort_t ret;
2090 const struct ksplice_reloc *r, *finger;
2091 const unsigned char *pre, *run, *pre_start, *run_start;
2092 unsigned char runval;
2094 pre_start = (const unsigned char *)sect->address;
2095 run_start = (const unsigned char *)run_addr;
2097 finger = init_reloc_search(change, sect);
2099 pre = pre_start;
2100 run = run_start;
2101 while (pre < pre_start + sect->size) {
2102 unsigned long offset = pre - pre_start;
2103 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2104 if (ret == OK) {
2105 ret = handle_reloc(change, sect, r, (unsigned long)run,
2106 mode);
2107 if (ret != OK) {
2108 if (mode == RUN_PRE_INITIAL)
2109 ksdebug(change, "reloc in sect does "
2110 "not match after %lx/%lx "
2111 "bytes\n", offset, sect->size);
2112 return ret;
2114 if (mode == RUN_PRE_DEBUG)
2115 print_bytes(change, run, r->howto->size, pre,
2116 r->howto->size);
2117 pre += r->howto->size;
2118 run += r->howto->size;
2119 finger++;
2120 continue;
2121 } else if (ret != NO_MATCH) {
2122 return ret;
2125 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2126 ret = handle_paravirt(change, (unsigned long)pre,
2127 (unsigned long)run, &matched);
2128 if (ret != OK)
2129 return ret;
2130 if (matched != 0) {
2131 if (mode == RUN_PRE_DEBUG)
2132 print_bytes(change, run, matched, pre,
2133 matched);
2134 pre += matched;
2135 run += matched;
2136 continue;
2140 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2141 if (mode == RUN_PRE_INITIAL)
2142 ksdebug(change, "sect unmapped after %lx/%lx "
2143 "bytes\n", offset, sect->size);
2144 return NO_MATCH;
2147 if (runval != *pre &&
2148 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2149 if (mode == RUN_PRE_INITIAL)
2150 ksdebug(change, "sect does not match after "
2151 "%lx/%lx bytes\n", offset, sect->size);
2152 if (mode == RUN_PRE_DEBUG) {
2153 print_bytes(change, run, 1, pre, 1);
2154 ksdebug(change, "[p_o=%lx] ! ", offset);
2155 print_bytes(change, run + 1, 2, pre + 1, 2);
2157 return NO_MATCH;
2159 if (mode == RUN_PRE_DEBUG)
2160 print_bytes(change, run, 1, pre, 1);
2161 pre++;
2162 run++;
2164 return create_safety_record(change, sect, safety_records, run_addr,
2165 run - run_start);
2168 static void print_bytes(struct ksplice_mod_change *change,
2169 const unsigned char *run, int runc,
2170 const unsigned char *pre, int prec)
2172 int o;
2173 int matched = min(runc, prec);
2174 for (o = 0; o < matched; o++) {
2175 if (run[o] == pre[o])
2176 ksdebug(change, "%02x ", run[o]);
2177 else
2178 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2180 for (o = matched; o < runc; o++)
2181 ksdebug(change, "%02x/ ", run[o]);
2182 for (o = matched; o < prec; o++)
2183 ksdebug(change, "/%02x ", pre[o]);
2186 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2187 static abort_t brute_search(struct ksplice_mod_change *change,
2188 struct ksplice_section *sect,
2189 const void *start, unsigned long len,
2190 struct list_head *vals)
2192 unsigned long addr;
2193 char run, pre;
2194 abort_t ret;
2196 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2197 addr++) {
2198 if (addr % 100000 == 0)
2199 yield();
2201 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2202 return OK;
2204 pre = *(const unsigned char *)(sect->address);
2206 if (run != pre)
2207 continue;
2209 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2210 if (ret == OK) {
2211 ret = add_candidate_val(change, vals, addr);
2212 if (ret != OK)
2213 return ret;
2214 } else if (ret != NO_MATCH) {
2215 return ret;
2219 return OK;
2222 static abort_t brute_search_all(struct ksplice_mod_change *change,
2223 struct ksplice_section *sect,
2224 struct list_head *vals)
2226 struct module *m;
2227 abort_t ret = OK;
2228 int saved_debug;
2230 ksdebug(change, "brute_search: searching for %s\n",
2231 sect->symbol->label);
2232 saved_debug = change->update->debug;
2233 change->update->debug = 0;
2235 list_for_each_entry(m, &modules, list) {
2236 if (!patches_module(m, change->target) ||
2237 m == change->new_code_mod)
2238 continue;
2239 ret = brute_search(change, sect, m->module_core, m->core_size,
2240 vals);
2241 if (ret != OK)
2242 goto out;
2243 ret = brute_search(change, sect, m->module_init, m->init_size,
2244 vals);
2245 if (ret != OK)
2246 goto out;
2249 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2250 init_mm.end_code - init_mm.start_code, vals);
2252 out:
2253 change->update->debug = saved_debug;
2254 return ret;
2256 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2258 struct range {
2259 unsigned long address;
2260 unsigned long size;
2263 static int reloc_bsearch_compare(const void *key, const void *elt)
2265 const struct range *range = key;
2266 const struct ksplice_reloc *r = elt;
2267 if (range->address + range->size <= r->blank_addr)
2268 return -1;
2269 if (range->address > r->blank_addr)
2270 return 1;
2271 return 0;
2274 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2275 const struct ksplice_reloc *end,
2276 unsigned long address,
2277 unsigned long size)
2279 const struct ksplice_reloc *r;
2280 struct range range = { address, size };
2281 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2282 reloc_bsearch_compare);
2283 if (r == NULL)
2284 return NULL;
2285 while (r > start && (r - 1)->blank_addr >= address)
2286 r--;
2287 return r;
2290 static const struct ksplice_reloc *
2291 init_reloc_search(struct ksplice_mod_change *change,
2292 const struct ksplice_section *sect)
2294 const struct ksplice_reloc *r;
2295 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2296 sect->address, sect->size);
2297 if (r == NULL)
2298 return change->old_code.relocs_end;
2299 return r;
2303 * lookup_reloc implements an amortized O(1) lookup for the next
2304 * old_code relocation. It must be called with a strictly increasing
2305 * sequence of addresses.
2307 * The fingerp is private data for lookup_reloc, and needs to have
2308 * been initialized as a pointer to the result of find_reloc (or
2309 * init_reloc_search).
2311 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2312 const struct ksplice_reloc **fingerp,
2313 unsigned long addr,
2314 const struct ksplice_reloc **relocp)
2316 const struct ksplice_reloc *r = *fingerp;
2317 int canary_ret;
2319 while (r < change->old_code.relocs_end &&
2320 addr >= r->blank_addr + r->howto->size &&
2321 !(addr == r->blank_addr && r->howto->size == 0))
2322 r++;
2323 *fingerp = r;
2324 if (r == change->old_code.relocs_end)
2325 return NO_MATCH;
2326 if (addr < r->blank_addr)
2327 return NO_MATCH;
2328 *relocp = r;
2329 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2330 return OK;
2332 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2333 if (canary_ret < 0)
2334 return UNEXPECTED;
2335 if (canary_ret == 0) {
2336 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2337 "(altinstr)\n", r->blank_addr, r->symbol->label,
2338 r->target_addend);
2339 return NO_MATCH;
2341 if (addr != r->blank_addr) {
2342 ksdebug(change, "Invalid nonzero relocation offset\n");
2343 return UNEXPECTED;
2345 return OK;
2348 static abort_t handle_reloc(struct ksplice_mod_change *change,
2349 const struct ksplice_section *sect,
2350 const struct ksplice_reloc *r,
2351 unsigned long run_addr, enum run_pre_mode mode)
2353 switch (r->howto->type) {
2354 case KSPLICE_HOWTO_RELOC:
2355 return handle_howto_reloc(change, sect, r, run_addr, mode);
2356 case KSPLICE_HOWTO_DATE:
2357 case KSPLICE_HOWTO_TIME:
2358 return handle_howto_date(change, sect, r, run_addr, mode);
2359 case KSPLICE_HOWTO_BUG:
2360 return handle_bug(change, r, run_addr);
2361 case KSPLICE_HOWTO_EXTABLE:
2362 return handle_extable(change, r, run_addr);
2363 default:
2364 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2365 return UNEXPECTED;
2370 * For date/time relocations, we check that the sequence of bytes
2371 * matches the format of a date or time.
2373 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2374 const struct ksplice_section *sect,
2375 const struct ksplice_reloc *r,
2376 unsigned long run_addr, enum run_pre_mode mode)
2378 abort_t ret;
2379 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2381 if (buf == NULL)
2382 return OUT_OF_MEMORY;
2383 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2384 ret = NO_MATCH;
2385 goto out;
2388 switch (r->howto->type) {
2389 case KSPLICE_HOWTO_TIME:
2390 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2391 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2392 isdigit(buf[6]) && isdigit(buf[7]))
2393 ret = OK;
2394 else
2395 ret = NO_MATCH;
2396 break;
2397 case KSPLICE_HOWTO_DATE:
2398 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2399 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2400 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2401 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2402 ret = OK;
2403 else
2404 ret = NO_MATCH;
2405 break;
2406 default:
2407 ret = UNEXPECTED;
2409 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2410 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2411 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2412 r->howto->size, buf);
2414 if (ret != OK)
2415 goto out;
2416 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2417 out:
2418 kfree(buf);
2419 return ret;
2423 * Extract the value of a symbol used in a relocation in the pre code
2424 * during run-pre matching, giving an error if it conflicts with a
2425 * previously found value of that symbol
2427 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2428 const struct ksplice_section *sect,
2429 const struct ksplice_reloc *r,
2430 unsigned long run_addr,
2431 enum run_pre_mode mode)
2433 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2434 unsigned long offset = r->target_addend;
2435 unsigned long val;
2436 abort_t ret;
2438 ret = read_reloc_value(change, r, run_addr, &val);
2439 if (ret != OK)
2440 return ret;
2441 if (r->howto->pcrel)
2442 val += run_addr;
2444 #ifdef KSPLICE_STANDALONE
2445 /* The match_map is only used in KSPLICE_STANDALONE */
2446 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2448 } else if (offset < 0 || offset >= sym_sect->size) {
2449 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2450 sect->symbol->label, r->blank_addr - sect->address,
2451 r->symbol->label, offset);
2452 return NO_MATCH;
2453 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2454 sym_sect->match_map[offset] =
2455 (const unsigned char *)r->symbol->value + offset;
2456 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2457 r->symbol->value + offset) {
2459 } else if (sect == sym_sect) {
2460 ksdebug(change, "Relocations to nonmatching locations within "
2461 "section %s: %lx does not match %lx\n",
2462 sect->symbol->label, offset,
2463 (unsigned long)sect->match_map[offset] -
2464 r->symbol->value);
2465 return NO_MATCH;
2466 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2467 if (mode == RUN_PRE_INITIAL)
2468 ksdebug(change, "Delaying matching of %s due to reloc "
2469 "from to unmatching section: %s+%lx\n",
2470 sect->symbol->label, r->symbol->label, offset);
2471 return NO_MATCH;
2472 } else if (sym_sect->match_map[offset] == NULL) {
2473 if (mode == RUN_PRE_INITIAL)
2474 ksdebug(change, "Relocation not to instruction "
2475 "boundary: %s+%lx -> %s+%lx",
2476 sect->symbol->label, r->blank_addr -
2477 sect->address, r->symbol->label, offset);
2478 return NO_MATCH;
2479 } else if ((unsigned long)sym_sect->match_map[offset] !=
2480 r->symbol->value + offset) {
2481 if (mode == RUN_PRE_INITIAL)
2482 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2483 r->symbol->label, offset,
2484 r->symbol->value + offset,
2485 (unsigned long)sym_sect->match_map[offset]);
2486 val += r->symbol->value + offset -
2487 (unsigned long)sym_sect->match_map[offset];
2489 #endif /* KSPLICE_STANDALONE */
2491 if (mode == RUN_PRE_INITIAL)
2492 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2493 "found %s = %lx\n", run_addr, r->blank_addr,
2494 r->symbol->label, offset, r->symbol->label, val);
2496 if (contains_canary(change, run_addr, r->howto) != 0) {
2497 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2498 "\n", run_addr);
2499 return UNEXPECTED;
2502 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2503 sect->symbol == r->symbol)
2504 return OK;
2505 ret = create_labelval(change, r->symbol, val, TEMP);
2506 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2507 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2508 "%s = %lx does not match expected %lx\n", run_addr,
2509 r->blank_addr, r->symbol->label, r->symbol->value, val);
2511 if (ret != OK)
2512 return ret;
2513 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2514 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2515 if (mode == RUN_PRE_INITIAL)
2516 ksdebug(change, "Recursively comparing string section "
2517 "%s\n", sym_sect->symbol->label);
2518 else if (mode == RUN_PRE_DEBUG)
2519 ksdebug(change, "[str start] ");
2520 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2521 if (mode == RUN_PRE_DEBUG)
2522 ksdebug(change, "[str end] ");
2523 if (ret == OK && mode == RUN_PRE_INITIAL)
2524 ksdebug(change, "Successfully matched string section %s"
2525 "\n", sym_sect->symbol->label);
2526 else if (mode == RUN_PRE_INITIAL)
2527 ksdebug(change, "Failed to match string section %s\n",
2528 sym_sect->symbol->label);
2530 return ret;
2533 static int symbol_section_bsearch_compare(const void *a, const void *b)
2535 const struct ksplice_symbol *sym = a;
2536 const struct ksplice_section *sect = b;
2537 return strcmp(sym->label, sect->symbol->label);
2540 static int compare_section_labels(const void *va, const void *vb)
2542 const struct ksplice_section *a = va, *b = vb;
2543 return strcmp(a->symbol->label, b->symbol->label);
2546 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2547 const struct ksplice_symbol *sym)
2549 return bsearch(sym, change->old_code.sections,
2550 change->old_code.sections_end -
2551 change->old_code.sections,
2552 sizeof(struct ksplice_section),
2553 symbol_section_bsearch_compare);
2556 /* Find the relocation for the oldaddr of a ksplice_patch */
2557 static const struct ksplice_reloc *
2558 patch_reloc(struct ksplice_mod_change *change,
2559 const struct ksplice_patch *p)
2561 unsigned long addr = (unsigned long)&p->oldaddr;
2562 const struct ksplice_reloc *r =
2563 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2564 addr, sizeof(addr));
2565 if (r == NULL || r->blank_addr < addr ||
2566 r->blank_addr >= addr + sizeof(addr))
2567 return NULL;
2568 return r;
2572 * Populates vals with the possible values for ksym from the various
2573 * sources Ksplice uses to resolve symbols
2575 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2576 const struct ksplice_symbol *ksym,
2577 struct list_head *vals)
2579 abort_t ret;
2581 #ifdef KSPLICE_STANDALONE
2582 if (!bootstrapped)
2583 return OK;
2584 #endif /* KSPLICE_STANDALONE */
2586 if (ksym->vals == NULL) {
2587 release_vals(vals);
2588 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2589 ksym->value);
2590 return add_candidate_val(change, vals, ksym->value);
2593 #ifdef CONFIG_MODULE_UNLOAD
2594 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2595 && change->target->exit != NULL) {
2596 ret = add_candidate_val(change, vals,
2597 (unsigned long)change->target->exit);
2598 if (ret != OK)
2599 return ret;
2601 #endif
2603 if (ksym->name != NULL) {
2604 struct candidate_val *val;
2605 list_for_each_entry(val, ksym->vals, list) {
2606 ret = add_candidate_val(change, vals, val->val);
2607 if (ret != OK)
2608 return ret;
2611 ret = new_export_lookup(change, ksym->name, vals);
2612 if (ret != OK)
2613 return ret;
2616 return OK;
2619 #ifdef KSPLICE_STANDALONE
2620 static abort_t
2621 add_system_map_candidates(struct ksplice_mod_change *change,
2622 const struct ksplice_system_map *start,
2623 const struct ksplice_system_map *end,
2624 const char *label, struct list_head *vals)
2626 abort_t ret;
2627 long off;
2628 int i;
2629 const struct ksplice_system_map *smap;
2631 /* Some Fedora kernel releases have System.map files whose symbol
2632 * addresses disagree with the running kernel by a constant address
2633 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2634 * values used to compile these kernels. This constant address offset
2635 * is always a multiple of 0x100000.
2637 * If we observe an offset that is NOT a multiple of 0x100000, then the
2638 * user provided us with an incorrect System.map file, and we should
2639 * abort.
2640 * If we observe an offset that is a multiple of 0x100000, then we can
2641 * adjust the System.map address values accordingly and proceed.
2643 off = (unsigned long)printk - change->map_printk;
2644 if (off & 0xfffff) {
2645 ksdebug(change,
2646 "Aborted. System.map does not match kernel.\n");
2647 return BAD_SYSTEM_MAP;
2650 smap = bsearch(label, start, end - start, sizeof(*smap),
2651 system_map_bsearch_compare);
2652 if (smap == NULL)
2653 return OK;
2655 for (i = 0; i < smap->nr_candidates; i++) {
2656 ret = add_candidate_val(change, vals,
2657 smap->candidates[i] + off);
2658 if (ret != OK)
2659 return ret;
2661 return OK;
2664 static int system_map_bsearch_compare(const void *key, const void *elt)
2666 const struct ksplice_system_map *map = elt;
2667 const char *label = key;
2668 return strcmp(label, map->label);
2670 #endif /* !KSPLICE_STANDALONE */
2673 * An update could one module to export a symbol and at the same time
2674 * change another module to use that symbol. This violates the normal
2675 * situation where the changes can be handled independently.
2677 * new_export_lookup obtains symbol values from the changes to the
2678 * exported symbol table made by other changes.
2680 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2681 const char *name, struct list_head *vals)
2683 struct ksplice_mod_change *change;
2684 struct ksplice_patch *p;
2685 list_for_each_entry(change, &ichange->update->changes, list) {
2686 for (p = change->patches; p < change->patches_end; p++) {
2687 const struct kernel_symbol *sym;
2688 const struct ksplice_reloc *r;
2689 if (p->type != KSPLICE_PATCH_EXPORT ||
2690 strcmp(name, *(const char **)p->contents) != 0)
2691 continue;
2693 /* Check that the p->oldaddr reloc has been resolved. */
2694 r = patch_reloc(change, p);
2695 if (r == NULL ||
2696 contains_canary(change, r->blank_addr,
2697 r->howto) != 0)
2698 continue;
2699 sym = (const struct kernel_symbol *)r->symbol->value;
2702 * Check that the sym->value reloc has been resolved,
2703 * if there is a Ksplice relocation there.
2705 r = find_reloc(change->new_code.relocs,
2706 change->new_code.relocs_end,
2707 (unsigned long)&sym->value,
2708 sizeof(&sym->value));
2709 if (r != NULL &&
2710 r->blank_addr == (unsigned long)&sym->value &&
2711 contains_canary(change, r->blank_addr,
2712 r->howto) != 0)
2713 continue;
2714 return add_candidate_val(ichange, vals, sym->value);
2717 return OK;
2721 * When apply_patches is called, the update should be fully prepared.
2722 * apply_patches will try to actually insert trampolines for the
2723 * update.
2725 static abort_t apply_patches(struct update *update)
2727 int i;
2728 abort_t ret;
2729 struct ksplice_mod_change *change;
2731 ret = map_trampoline_pages(update);
2732 if (ret != OK)
2733 return ret;
2735 list_for_each_entry(change, &update->changes, list) {
2736 const typeof(int (*)(void)) *f;
2737 for (f = change->pre_apply; f < change->pre_apply_end; f++) {
2738 if ((*f)() != 0) {
2739 ret = CALL_FAILED;
2740 goto out;
2745 for (i = 0; i < 5; i++) {
2746 cleanup_conflicts(update);
2747 #ifdef KSPLICE_STANDALONE
2748 bust_spinlocks(1);
2749 #endif /* KSPLICE_STANDALONE */
2750 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2751 ret = (__force abort_t)stop_machine(__apply_patches, update,
2752 NULL);
2753 #else /* LINUX_VERSION_CODE < */
2754 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2755 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2756 NR_CPUS);
2757 #endif /* LINUX_VERSION_CODE */
2758 #ifdef KSPLICE_STANDALONE
2759 bust_spinlocks(0);
2760 #endif /* KSPLICE_STANDALONE */
2761 if (ret != CODE_BUSY)
2762 break;
2763 set_current_state(TASK_INTERRUPTIBLE);
2764 schedule_timeout(msecs_to_jiffies(1000));
2766 out:
2767 unmap_trampoline_pages(update);
2769 if (ret == CODE_BUSY) {
2770 print_conflicts(update);
2771 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2772 "code is busy.\n", update->kid);
2773 } else if (ret == ALREADY_REVERSED) {
2774 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2775 "reversed.\n", update->kid, update->kid);
2778 if (ret != OK) {
2779 list_for_each_entry(change, &update->changes, list) {
2780 const typeof(void (*)(void)) *f;
2781 for (f = change->fail_apply; f < change->fail_apply_end;
2782 f++)
2783 (*f)();
2786 return ret;
2789 list_for_each_entry(change, &update->changes, list) {
2790 const typeof(void (*)(void)) *f;
2791 for (f = change->post_apply; f < change->post_apply_end; f++)
2792 (*f)();
2795 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2796 update->kid);
2797 return OK;
2800 static abort_t reverse_patches(struct update *update)
2802 int i;
2803 abort_t ret;
2804 struct ksplice_mod_change *change;
2806 clear_debug_buf(update);
2807 ret = init_debug_buf(update);
2808 if (ret != OK)
2809 return ret;
2811 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2813 ret = map_trampoline_pages(update);
2814 if (ret != OK)
2815 return ret;
2817 list_for_each_entry(change, &update->changes, list) {
2818 const typeof(int (*)(void)) *f;
2819 for (f = change->pre_reverse; f < change->pre_reverse_end; f++) {
2820 if ((*f)() != 0) {
2821 ret = CALL_FAILED;
2822 goto out;
2827 for (i = 0; i < 5; i++) {
2828 cleanup_conflicts(update);
2829 #ifdef KSPLICE_STANDALONE
2830 bust_spinlocks(1);
2831 #endif /* KSPLICE_STANDALONE */
2832 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2833 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2834 NULL);
2835 #else /* LINUX_VERSION_CODE < */
2836 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2837 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2838 update, NR_CPUS);
2839 #endif /* LINUX_VERSION_CODE */
2840 #ifdef KSPLICE_STANDALONE
2841 bust_spinlocks(0);
2842 #endif /* KSPLICE_STANDALONE */
2843 if (ret != CODE_BUSY)
2844 break;
2845 set_current_state(TASK_INTERRUPTIBLE);
2846 schedule_timeout(msecs_to_jiffies(1000));
2848 out:
2849 unmap_trampoline_pages(update);
2851 if (ret == CODE_BUSY) {
2852 print_conflicts(update);
2853 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2854 "code is busy.\n", update->kid);
2855 } else if (ret == MODULE_BUSY) {
2856 _ksdebug(update, "Update %s is in use by another module\n",
2857 update->kid);
2860 if (ret != OK) {
2861 list_for_each_entry(change, &update->changes, list) {
2862 const typeof(void (*)(void)) *f;
2863 for (f = change->fail_reverse;
2864 f < change->fail_reverse_end; f++)
2865 (*f)();
2868 return ret;
2871 list_for_each_entry(change, &update->changes, list) {
2872 const typeof(void (*)(void)) *f;
2873 for (f = change->post_reverse; f < change->post_reverse_end;
2874 f++)
2875 (*f)();
2878 list_for_each_entry(change, &update->changes, list)
2879 clear_list(&change->safety_records, struct safety_record, list);
2881 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2882 return OK;
2885 /* Atomically insert the update; run from within stop_machine */
2886 static int __apply_patches(void *updateptr)
2888 struct update *update = updateptr;
2889 struct ksplice_mod_change *change;
2890 struct ksplice_module_list_entry *entry;
2891 struct ksplice_patch *p;
2892 abort_t ret;
2894 if (update->stage == STAGE_APPLIED)
2895 return (__force int)OK;
2897 if (update->stage != STAGE_PREPARING)
2898 return (__force int)UNEXPECTED;
2900 ret = check_each_task(update);
2901 if (ret != OK)
2902 return (__force int)ret;
2904 list_for_each_entry(change, &update->changes, list) {
2905 if (try_module_get(change->new_code_mod) != 1) {
2906 struct ksplice_mod_change *change1;
2907 list_for_each_entry(change1, &update->changes, list) {
2908 if (change1 == change)
2909 break;
2910 module_put(change1->new_code_mod);
2912 module_put(THIS_MODULE);
2913 return (__force int)UNEXPECTED;
2917 list_for_each_entry(change, &update->changes, list) {
2918 const typeof(int (*)(void)) *f;
2919 for (f = change->check_apply; f < change->check_apply_end; f++)
2920 if ((*f)() != 0)
2921 return (__force int)CALL_FAILED;
2924 /* Commit point: the update application will succeed. */
2926 update->stage = STAGE_APPLIED;
2927 #ifdef TAINT_KSPLICE
2928 add_taint(TAINT_KSPLICE);
2929 #endif
2931 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2932 list_add(&entry->list, &ksplice_modules);
2934 list_for_each_entry(change, &update->changes, list) {
2935 for (p = change->patches; p < change->patches_end; p++)
2936 insert_trampoline(p);
2939 list_for_each_entry(change, &update->changes, list) {
2940 const typeof(void (*)(void)) *f;
2941 for (f = change->apply; f < change->apply_end; f++)
2942 (*f)();
2945 return (__force int)OK;
2948 /* Atomically remove the update; run from within stop_machine */
2949 static int __reverse_patches(void *updateptr)
2951 struct update *update = updateptr;
2952 struct ksplice_mod_change *change;
2953 struct ksplice_module_list_entry *entry;
2954 const struct ksplice_patch *p;
2955 abort_t ret;
2957 if (update->stage != STAGE_APPLIED)
2958 return (__force int)OK;
2960 #ifdef CONFIG_MODULE_UNLOAD
2961 list_for_each_entry(change, &update->changes, list) {
2962 if (module_refcount(change->new_code_mod) != 1)
2963 return (__force int)MODULE_BUSY;
2965 #endif /* CONFIG_MODULE_UNLOAD */
2967 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2968 if (!entry->applied &&
2969 find_module(entry->target_mod_name) != NULL)
2970 return COLD_UPDATE_LOADED;
2973 ret = check_each_task(update);
2974 if (ret != OK)
2975 return (__force int)ret;
2977 list_for_each_entry(change, &update->changes, list) {
2978 for (p = change->patches; p < change->patches_end; p++) {
2979 ret = verify_trampoline(change, p);
2980 if (ret != OK)
2981 return (__force int)ret;
2985 list_for_each_entry(change, &update->changes, list) {
2986 const typeof(int (*)(void)) *f;
2987 for (f = change->check_reverse; f < change->check_reverse_end;
2988 f++) {
2989 if ((*f)() != 0)
2990 return (__force int)CALL_FAILED;
2994 /* Commit point: the update reversal will succeed. */
2996 update->stage = STAGE_REVERSED;
2998 list_for_each_entry(change, &update->changes, list)
2999 module_put(change->new_code_mod);
3001 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3002 list_del(&entry->list);
3004 list_for_each_entry(change, &update->changes, list) {
3005 const typeof(void (*)(void)) *f;
3006 for (f = change->reverse; f < change->reverse_end; f++)
3007 (*f)();
3010 list_for_each_entry(change, &update->changes, list) {
3011 for (p = change->patches; p < change->patches_end; p++)
3012 remove_trampoline(p);
3015 return (__force int)OK;
3019 * Check whether any thread's instruction pointer or any address of
3020 * its stack is contained in one of the safety_records associated with
3021 * the update.
3023 * check_each_task must be called from inside stop_machine, because it
3024 * does not take tasklist_lock (which cannot be held by anyone else
3025 * during stop_machine).
3027 static abort_t check_each_task(struct update *update)
3029 const struct task_struct *g, *p;
3030 abort_t status = OK, ret;
3031 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3032 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3033 read_lock(&tasklist_lock);
3034 #endif /* LINUX_VERSION_CODE */
3035 do_each_thread(g, p) {
3036 /* do_each_thread is a double loop! */
3037 ret = check_task(update, p, false);
3038 if (ret != OK) {
3039 check_task(update, p, true);
3040 status = ret;
3042 if (ret != OK && ret != CODE_BUSY)
3043 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3044 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3045 goto out;
3046 #else /* LINUX_VERSION_CODE < */
3047 return ret;
3048 #endif /* LINUX_VERSION_CODE */
3049 } while_each_thread(g, p);
3050 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3051 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3052 out:
3053 read_unlock(&tasklist_lock);
3054 #endif /* LINUX_VERSION_CODE */
3055 return status;
3058 static abort_t check_task(struct update *update,
3059 const struct task_struct *t, bool rerun)
3061 abort_t status, ret;
3062 struct conflict *conf = NULL;
3064 if (rerun) {
3065 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3066 if (conf == NULL)
3067 return OUT_OF_MEMORY;
3068 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3069 if (conf->process_name == NULL) {
3070 kfree(conf);
3071 return OUT_OF_MEMORY;
3073 conf->pid = t->pid;
3074 INIT_LIST_HEAD(&conf->stack);
3075 list_add(&conf->list, &update->conflicts);
3078 status = check_address(update, conf, KSPLICE_IP(t));
3079 if (t == current) {
3080 ret = check_stack(update, conf, task_thread_info(t),
3081 (unsigned long *)__builtin_frame_address(0));
3082 if (status == OK)
3083 status = ret;
3084 } else if (!task_curr(t)) {
3085 ret = check_stack(update, conf, task_thread_info(t),
3086 (unsigned long *)KSPLICE_SP(t));
3087 if (status == OK)
3088 status = ret;
3089 } else if (!is_stop_machine(t)) {
3090 status = UNEXPECTED_RUNNING_TASK;
3092 return status;
3095 static abort_t check_stack(struct update *update, struct conflict *conf,
3096 const struct thread_info *tinfo,
3097 const unsigned long *stack)
3099 abort_t status = OK, ret;
3100 unsigned long addr;
3102 while (valid_stack_ptr(tinfo, stack)) {
3103 addr = *stack++;
3104 ret = check_address(update, conf, addr);
3105 if (ret != OK)
3106 status = ret;
3108 return status;
3111 static abort_t check_address(struct update *update,
3112 struct conflict *conf, unsigned long addr)
3114 abort_t status = OK, ret;
3115 const struct safety_record *rec;
3116 struct ksplice_mod_change *change;
3117 struct conflict_addr *ca = NULL;
3119 if (conf != NULL) {
3120 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3121 if (ca == NULL)
3122 return OUT_OF_MEMORY;
3123 ca->addr = addr;
3124 ca->has_conflict = false;
3125 ca->label = NULL;
3126 list_add(&ca->list, &conf->stack);
3129 list_for_each_entry(change, &update->changes, list) {
3130 unsigned long tramp_addr = follow_trampolines(change, addr);
3131 list_for_each_entry(rec, &change->safety_records, list) {
3132 ret = check_record(ca, rec, tramp_addr);
3133 if (ret != OK)
3134 status = ret;
3137 return status;
3140 static abort_t check_record(struct conflict_addr *ca,
3141 const struct safety_record *rec, unsigned long addr)
3143 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3144 if (ca != NULL) {
3145 ca->label = rec->label;
3146 ca->has_conflict = true;
3148 return CODE_BUSY;
3150 return OK;
3153 /* Is the task one of the stop_machine tasks? */
3154 static bool is_stop_machine(const struct task_struct *t)
3156 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3157 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3158 const char *kstop_prefix = "kstop/";
3159 #else /* LINUX_VERSION_CODE < */
3160 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3161 const char *kstop_prefix = "kstop";
3162 #endif /* LINUX_VERSION_CODE */
3163 const char *num;
3164 if (!strstarts(t->comm, kstop_prefix))
3165 return false;
3166 num = t->comm + strlen(kstop_prefix);
3167 return num[strspn(num, "0123456789")] == '\0';
3168 #else /* LINUX_VERSION_CODE < */
3169 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3170 return strcmp(t->comm, "kstopmachine") == 0;
3171 #endif /* LINUX_VERSION_CODE */
3174 static void cleanup_conflicts(struct update *update)
3176 struct conflict *conf;
3177 list_for_each_entry(conf, &update->conflicts, list) {
3178 clear_list(&conf->stack, struct conflict_addr, list);
3179 kfree(conf->process_name);
3181 clear_list(&update->conflicts, struct conflict, list);
3184 static void print_conflicts(struct update *update)
3186 const struct conflict *conf;
3187 const struct conflict_addr *ca;
3188 list_for_each_entry(conf, &update->conflicts, list) {
3189 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3190 conf->process_name);
3191 list_for_each_entry(ca, &conf->stack, list) {
3192 _ksdebug(update, " %lx", ca->addr);
3193 if (ca->has_conflict)
3194 _ksdebug(update, " [<-CONFLICT]");
3196 _ksdebug(update, "\n");
3200 static void insert_trampoline(struct ksplice_patch *p)
3202 mm_segment_t old_fs = get_fs();
3203 set_fs(KERNEL_DS);
3204 memcpy(p->saved, p->vaddr, p->size);
3205 memcpy(p->vaddr, p->contents, p->size);
3206 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3207 set_fs(old_fs);
3210 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3211 const struct ksplice_patch *p)
3213 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3214 ksdebug(change, "Aborted. Trampoline at %lx has been "
3215 "overwritten.\n", p->oldaddr);
3216 return CODE_BUSY;
3218 return OK;
3221 static void remove_trampoline(const struct ksplice_patch *p)
3223 mm_segment_t old_fs = get_fs();
3224 set_fs(KERNEL_DS);
3225 memcpy(p->vaddr, p->saved, p->size);
3226 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3227 set_fs(old_fs);
3230 /* Returns NO_MATCH if there's already a labelval with a different value */
3231 static abort_t create_labelval(struct ksplice_mod_change *change,
3232 struct ksplice_symbol *ksym,
3233 unsigned long val, int status)
3235 val = follow_trampolines(change, val);
3236 if (ksym->vals == NULL)
3237 return ksym->value == val ? OK : NO_MATCH;
3239 ksym->value = val;
3240 if (status == TEMP) {
3241 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3242 if (lv == NULL)
3243 return OUT_OF_MEMORY;
3244 lv->symbol = ksym;
3245 lv->saved_vals = ksym->vals;
3246 list_add(&lv->list, &change->temp_labelvals);
3248 ksym->vals = NULL;
3249 return OK;
3253 * Creates a new safety_record for a old_code section based on its
3254 * ksplice_section and run-pre matching information.
3256 static abort_t create_safety_record(struct ksplice_mod_change *change,
3257 const struct ksplice_section *sect,
3258 struct list_head *record_list,
3259 unsigned long run_addr,
3260 unsigned long run_size)
3262 struct safety_record *rec;
3263 struct ksplice_patch *p;
3265 if (record_list == NULL)
3266 return OK;
3268 for (p = change->patches; p < change->patches_end; p++) {
3269 const struct ksplice_reloc *r = patch_reloc(change, p);
3270 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3271 break;
3273 if (p >= change->patches_end)
3274 return OK;
3276 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3277 if (rec == NULL)
3278 return OUT_OF_MEMORY;
3280 * The old_code might be unloaded when checking reversing
3281 * patches, so we need to kstrdup the label here.
3283 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3284 if (rec->label == NULL) {
3285 kfree(rec);
3286 return OUT_OF_MEMORY;
3288 rec->addr = run_addr;
3289 rec->size = run_size;
3291 list_add(&rec->list, record_list);
3292 return OK;
3295 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3296 struct list_head *vals, unsigned long val)
3298 struct candidate_val *tmp, *new;
3301 * Careful: follow trampolines before comparing values so that we do
3302 * not mistake the obsolete function for another copy of the function.
3304 val = follow_trampolines(change, val);
3306 list_for_each_entry(tmp, vals, list) {
3307 if (tmp->val == val)
3308 return OK;
3310 new = kmalloc(sizeof(*new), GFP_KERNEL);
3311 if (new == NULL)
3312 return OUT_OF_MEMORY;
3313 new->val = val;
3314 list_add(&new->list, vals);
3315 return OK;
3318 static void release_vals(struct list_head *vals)
3320 clear_list(vals, struct candidate_val, list);
3324 * The temp_labelvals list is used to cache those temporary labelvals
3325 * that have been created to cross-check the symbol values obtained
3326 * from different relocations within a single section being matched.
3328 * If status is VAL, commit the temp_labelvals as final values.
3330 * If status is NOVAL, restore the list of possible values to the
3331 * ksplice_symbol, so that it no longer has a known value.
3333 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3335 struct labelval *lv, *n;
3336 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3337 if (status == NOVAL) {
3338 lv->symbol->vals = lv->saved_vals;
3339 } else {
3340 release_vals(lv->saved_vals);
3341 kfree(lv->saved_vals);
3343 list_del(&lv->list);
3344 kfree(lv);
3348 /* Is there a Ksplice canary with given howto at blank_addr? */
3349 static int contains_canary(struct ksplice_mod_change *change,
3350 unsigned long blank_addr,
3351 const struct ksplice_reloc_howto *howto)
3353 switch (howto->size) {
3354 case 1:
3355 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3356 (KSPLICE_CANARY & howto->dst_mask);
3357 case 2:
3358 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3359 (KSPLICE_CANARY & howto->dst_mask);
3360 case 4:
3361 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3362 (KSPLICE_CANARY & howto->dst_mask);
3363 #if BITS_PER_LONG >= 64
3364 case 8:
3365 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3366 (KSPLICE_CANARY & howto->dst_mask);
3367 #endif /* BITS_PER_LONG */
3368 default:
3369 ksdebug(change, "Aborted. Invalid relocation size.\n");
3370 return -1;
3375 * Compute the address of the code you would actually run if you were
3376 * to call the function at addr (i.e., follow the sequence of jumps
3377 * starting at addr)
3379 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3380 unsigned long addr)
3382 unsigned long new_addr;
3383 struct module *m;
3385 while (1) {
3386 #ifdef KSPLICE_STANDALONE
3387 if (!bootstrapped)
3388 return addr;
3389 #endif /* KSPLICE_STANDALONE */
3390 if (!__kernel_text_address(addr) ||
3391 trampoline_target(change, addr, &new_addr) != OK)
3392 return addr;
3393 m = __module_text_address(new_addr);
3394 if (m == NULL || m == change->target ||
3395 !strstarts(m->name, "ksplice"))
3396 return addr;
3397 addr = new_addr;
3401 /* Does module a patch module b? */
3402 static bool patches_module(const struct module *a, const struct module *b)
3404 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3405 const char *name;
3406 const char *modname = b == NULL ? "vmlinux" : b->name;
3407 if (a == b)
3408 return true;
3409 if (a == NULL || !strstarts(a->name, "ksplice_"))
3410 return false;
3411 name = a->name + strlen("ksplice_");
3412 name += strcspn(name, "_");
3413 if (name[0] != '_')
3414 return false;
3415 name++;
3416 return strstarts(name, modname) &&
3417 strcmp(name + strlen(modname), "_new") == 0;
3418 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3419 struct ksplice_module_list_entry *entry;
3420 if (a == b)
3421 return true;
3422 list_for_each_entry(entry, &ksplice_modules, list) {
3423 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3424 strcmp(entry->new_code_mod_name, a->name) == 0)
3425 return true;
3427 return false;
3428 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3431 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3432 static bool strstarts(const char *str, const char *prefix)
3434 return strncmp(str, prefix, strlen(prefix)) == 0;
3436 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3438 static bool singular(struct list_head *list)
3440 return !list_empty(list) && list->next->next == list;
3443 static void *bsearch(const void *key, const void *base, size_t n,
3444 size_t size, int (*cmp)(const void *key, const void *elt))
3446 int start = 0, end = n - 1, mid, result;
3447 if (n == 0)
3448 return NULL;
3449 while (start <= end) {
3450 mid = (start + end) / 2;
3451 result = cmp(key, base + mid * size);
3452 if (result < 0)
3453 end = mid - 1;
3454 else if (result > 0)
3455 start = mid + 1;
3456 else
3457 return (void *)base + mid * size;
3459 return NULL;
3462 static int compare_relocs(const void *a, const void *b)
3464 const struct ksplice_reloc *ra = a, *rb = b;
3465 if (ra->blank_addr > rb->blank_addr)
3466 return 1;
3467 else if (ra->blank_addr < rb->blank_addr)
3468 return -1;
3469 else
3470 return ra->howto->size - rb->howto->size;
3473 #ifdef KSPLICE_STANDALONE
3474 static int compare_system_map(const void *a, const void *b)
3476 const struct ksplice_system_map *sa = a, *sb = b;
3477 return strcmp(sa->label, sb->label);
3479 #endif /* KSPLICE_STANDALONE */
3481 #ifdef CONFIG_DEBUG_FS
3482 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3483 /* Old kernels don't have debugfs_create_blob */
3484 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3485 size_t count, loff_t *ppos)
3487 struct debugfs_blob_wrapper *blob = file->private_data;
3488 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3489 blob->size);
3492 static int blob_open(struct inode *inode, struct file *file)
3494 if (inode->i_private)
3495 file->private_data = inode->i_private;
3496 return 0;
3499 static struct file_operations fops_blob = {
3500 .read = read_file_blob,
3501 .open = blob_open,
3504 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3505 struct dentry *parent,
3506 struct debugfs_blob_wrapper *blob)
3508 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3510 #endif /* LINUX_VERSION_CODE */
3512 static abort_t init_debug_buf(struct update *update)
3514 update->debug_blob.size = 0;
3515 update->debug_blob.data = NULL;
3516 update->debugfs_dentry =
3517 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3518 &update->debug_blob);
3519 if (update->debugfs_dentry == NULL)
3520 return OUT_OF_MEMORY;
3521 return OK;
3524 static void clear_debug_buf(struct update *update)
3526 if (update->debugfs_dentry == NULL)
3527 return;
3528 debugfs_remove(update->debugfs_dentry);
3529 update->debugfs_dentry = NULL;
3530 update->debug_blob.size = 0;
3531 vfree(update->debug_blob.data);
3532 update->debug_blob.data = NULL;
3535 static int _ksdebug(struct update *update, const char *fmt, ...)
3537 va_list args;
3538 unsigned long size, old_size, new_size;
3540 if (update->debug == 0)
3541 return 0;
3543 /* size includes the trailing '\0' */
3544 va_start(args, fmt);
3545 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3546 va_end(args);
3547 old_size = update->debug_blob.size == 0 ? 0 :
3548 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3549 new_size = update->debug_blob.size + size == 0 ? 0 :
3550 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3551 if (new_size > old_size) {
3552 char *buf = vmalloc(new_size);
3553 if (buf == NULL)
3554 return -ENOMEM;
3555 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3556 vfree(update->debug_blob.data);
3557 update->debug_blob.data = buf;
3559 va_start(args, fmt);
3560 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3561 update->debug_blob.size,
3562 size, fmt, args);
3563 va_end(args);
3564 return 0;
3566 #else /* CONFIG_DEBUG_FS */
3567 static abort_t init_debug_buf(struct update *update)
3569 return OK;
3572 static void clear_debug_buf(struct update *update)
3574 return;
3577 static int _ksdebug(struct update *update, const char *fmt, ...)
3579 va_list args;
3581 if (update->debug == 0)
3582 return 0;
3584 if (!update->debug_continue_line)
3585 printk(KERN_DEBUG "ksplice: ");
3587 va_start(args, fmt);
3588 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3589 vprintk(fmt, args);
3590 #else /* LINUX_VERSION_CODE < */
3591 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3593 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3594 printk("%s", buf);
3595 kfree(buf);
3597 #endif /* LINUX_VERSION_CODE */
3598 va_end(args);
3600 update->debug_continue_line =
3601 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3602 return 0;
3604 #endif /* CONFIG_DEBUG_FS */
3606 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3607 #ifdef CONFIG_KALLSYMS
3608 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3609 struct module *, unsigned long),
3610 void *data)
3612 char namebuf[KSYM_NAME_LEN];
3613 unsigned long i;
3614 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3615 unsigned int off;
3616 #endif /* LINUX_VERSION_CODE */
3617 int ret;
3619 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3620 * 2.6.10 was the first release after this commit
3622 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3623 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3624 off = kallsyms_expand_symbol(off, namebuf);
3625 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3626 if (ret != 0)
3627 return ret;
3629 #else /* LINUX_VERSION_CODE < */
3630 char *knames;
3632 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3633 unsigned prefix = *knames++;
3635 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3637 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3638 if (ret != OK)
3639 return ret;
3641 knames += strlen(knames) + 1;
3643 #endif /* LINUX_VERSION_CODE */
3644 return module_kallsyms_on_each_symbol(fn, data);
3647 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3648 * 2.6.10 was the first release after this commit
3650 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3651 extern u8 kallsyms_token_table[];
3652 extern u16 kallsyms_token_index[];
3654 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3656 long len, skipped_first = 0;
3657 const u8 *tptr, *data;
3659 data = &kallsyms_names[off];
3660 len = *data;
3661 data++;
3663 off += len + 1;
3665 while (len) {
3666 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3667 data++;
3668 len--;
3670 while (*tptr) {
3671 if (skipped_first) {
3672 *result = *tptr;
3673 result++;
3674 } else
3675 skipped_first = 1;
3676 tptr++;
3680 *result = '\0';
3682 return off;
3684 #endif /* LINUX_VERSION_CODE */
3686 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3687 struct module *,
3688 unsigned long),
3689 void *data)
3691 struct module *mod;
3692 unsigned int i;
3693 int ret;
3695 list_for_each_entry(mod, &modules, list) {
3696 for (i = 0; i < mod->num_symtab; i++) {
3697 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3698 mod, mod->symtab[i].st_value);
3699 if (ret != 0)
3700 return ret;
3703 return 0;
3705 #endif /* CONFIG_KALLSYMS */
3707 static struct module *find_module(const char *name)
3709 struct module *mod;
3711 list_for_each_entry(mod, &modules, list) {
3712 if (strcmp(mod->name, name) == 0)
3713 return mod;
3715 return NULL;
3718 #ifdef CONFIG_MODULE_UNLOAD
3719 struct module_use {
3720 struct list_head list;
3721 struct module *module_which_uses;
3724 /* I'm not yet certain whether we need the strong form of this. */
3725 static inline int strong_try_module_get(struct module *mod)
3727 if (mod && mod->state != MODULE_STATE_LIVE)
3728 return -EBUSY;
3729 if (try_module_get(mod))
3730 return 0;
3731 return -ENOENT;
3734 /* Does a already use b? */
3735 static int already_uses(struct module *a, struct module *b)
3737 struct module_use *use;
3738 list_for_each_entry(use, &b->modules_which_use_me, list) {
3739 if (use->module_which_uses == a)
3740 return 1;
3742 return 0;
3745 /* Make it so module a uses b. Must be holding module_mutex */
3746 static int use_module(struct module *a, struct module *b)
3748 struct module_use *use;
3749 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3750 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3751 int no_warn;
3752 #endif /* LINUX_VERSION_CODE */
3753 if (b == NULL || already_uses(a, b))
3754 return 1;
3756 if (strong_try_module_get(b) < 0)
3757 return 0;
3759 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3760 if (!use) {
3761 module_put(b);
3762 return 0;
3764 use->module_which_uses = a;
3765 list_add(&use->list, &b->modules_which_use_me);
3766 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3767 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3768 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3769 #endif /* LINUX_VERSION_CODE */
3770 return 1;
3772 #else /* CONFIG_MODULE_UNLOAD */
3773 static int use_module(struct module *a, struct module *b)
3775 return 1;
3777 #endif /* CONFIG_MODULE_UNLOAD */
3779 #ifndef CONFIG_MODVERSIONS
3780 #define symversion(base, idx) NULL
3781 #else
3782 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3783 #endif
3785 static bool each_symbol_in_section(const struct symsearch *arr,
3786 unsigned int arrsize,
3787 struct module *owner,
3788 bool (*fn)(const struct symsearch *syms,
3789 struct module *owner,
3790 unsigned int symnum, void *data),
3791 void *data)
3793 unsigned int i, j;
3795 for (j = 0; j < arrsize; j++) {
3796 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3797 if (fn(&arr[j], owner, i, data))
3798 return true;
3801 return false;
3804 /* Returns true as soon as fn returns true, otherwise false. */
3805 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3806 struct module *owner,
3807 unsigned int symnum, void *data),
3808 void *data)
3810 struct module *mod;
3811 const struct symsearch arr[] = {
3812 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3813 NOT_GPL_ONLY, false },
3814 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3815 __start___kcrctab_gpl,
3816 GPL_ONLY, false },
3817 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3818 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3819 __start___kcrctab_gpl_future,
3820 WILL_BE_GPL_ONLY, false },
3821 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3822 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3823 { __start___ksymtab_unused, __stop___ksymtab_unused,
3824 __start___kcrctab_unused,
3825 NOT_GPL_ONLY, true },
3826 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3827 __start___kcrctab_unused_gpl,
3828 GPL_ONLY, true },
3829 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3832 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3833 return 1;
3835 list_for_each_entry(mod, &modules, list) {
3836 struct symsearch module_arr[] = {
3837 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3838 NOT_GPL_ONLY, false },
3839 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3840 mod->gpl_crcs,
3841 GPL_ONLY, false },
3842 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3843 { mod->gpl_future_syms,
3844 mod->gpl_future_syms + mod->num_gpl_future_syms,
3845 mod->gpl_future_crcs,
3846 WILL_BE_GPL_ONLY, false },
3847 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3848 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3849 { mod->unused_syms,
3850 mod->unused_syms + mod->num_unused_syms,
3851 mod->unused_crcs,
3852 NOT_GPL_ONLY, true },
3853 { mod->unused_gpl_syms,
3854 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3855 mod->unused_gpl_crcs,
3856 GPL_ONLY, true },
3857 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3860 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3861 mod, fn, data))
3862 return true;
3864 return false;
3867 struct find_symbol_arg {
3868 /* Input */
3869 const char *name;
3870 bool gplok;
3871 bool warn;
3873 /* Output */
3874 struct module *owner;
3875 const unsigned long *crc;
3876 const struct kernel_symbol *sym;
3879 static bool find_symbol_in_section(const struct symsearch *syms,
3880 struct module *owner,
3881 unsigned int symnum, void *data)
3883 struct find_symbol_arg *fsa = data;
3885 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3886 return false;
3888 if (!fsa->gplok) {
3889 if (syms->licence == GPL_ONLY)
3890 return false;
3891 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3892 printk(KERN_WARNING "Symbol %s is being used "
3893 "by a non-GPL module, which will not "
3894 "be allowed in the future\n", fsa->name);
3895 printk(KERN_WARNING "Please see the file "
3896 "Documentation/feature-removal-schedule.txt "
3897 "in the kernel source tree for more details.\n");
3901 #ifdef CONFIG_UNUSED_SYMBOLS
3902 if (syms->unused && fsa->warn) {
3903 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3904 "however this module is using it.\n", fsa->name);
3905 printk(KERN_WARNING
3906 "This symbol will go away in the future.\n");
3907 printk(KERN_WARNING
3908 "Please evalute if this is the right api to use and if "
3909 "it really is, submit a report the linux kernel "
3910 "mailinglist together with submitting your code for "
3911 "inclusion.\n");
3913 #endif
3915 fsa->owner = owner;
3916 fsa->crc = symversion(syms->crcs, symnum);
3917 fsa->sym = &syms->start[symnum];
3918 return true;
3921 /* Find a symbol and return it, along with, (optional) crc and
3922 * (optional) module which owns it */
3923 static const struct kernel_symbol *find_symbol(const char *name,
3924 struct module **owner,
3925 const unsigned long **crc,
3926 bool gplok, bool warn)
3928 struct find_symbol_arg fsa;
3930 fsa.name = name;
3931 fsa.gplok = gplok;
3932 fsa.warn = warn;
3934 if (each_symbol(find_symbol_in_section, &fsa)) {
3935 if (owner)
3936 *owner = fsa.owner;
3937 if (crc)
3938 *crc = fsa.crc;
3939 return fsa.sym;
3942 return NULL;
3945 static inline int within_module_core(unsigned long addr, struct module *mod)
3947 return (unsigned long)mod->module_core <= addr &&
3948 addr < (unsigned long)mod->module_core + mod->core_size;
3951 static inline int within_module_init(unsigned long addr, struct module *mod)
3953 return (unsigned long)mod->module_init <= addr &&
3954 addr < (unsigned long)mod->module_init + mod->init_size;
3957 static struct module *__module_address(unsigned long addr)
3959 struct module *mod;
3961 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3962 list_for_each_entry_rcu(mod, &modules, list)
3963 #else
3964 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3965 list_for_each_entry(mod, &modules, list)
3966 #endif
3967 if (within_module_core(addr, mod) ||
3968 within_module_init(addr, mod))
3969 return mod;
3970 return NULL;
3972 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3974 struct update_attribute {
3975 struct attribute attr;
3976 ssize_t (*show)(struct update *update, char *buf);
3977 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3980 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
3981 char *buf)
3983 struct update_attribute *attribute =
3984 container_of(attr, struct update_attribute, attr);
3985 struct update *update = container_of(kobj, struct update, kobj);
3986 if (attribute->show == NULL)
3987 return -EIO;
3988 return attribute->show(update, buf);
3991 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
3992 const char *buf, size_t len)
3994 struct update_attribute *attribute =
3995 container_of(attr, struct update_attribute, attr);
3996 struct update *update = container_of(kobj, struct update, kobj);
3997 if (attribute->store == NULL)
3998 return -EIO;
3999 return attribute->store(update, buf, len);
4002 static struct sysfs_ops update_sysfs_ops = {
4003 .show = update_attr_show,
4004 .store = update_attr_store,
4007 static void update_release(struct kobject *kobj)
4009 struct update *update;
4010 update = container_of(kobj, struct update, kobj);
4011 cleanup_ksplice_update(update);
4014 static ssize_t stage_show(struct update *update, char *buf)
4016 switch (update->stage) {
4017 case STAGE_PREPARING:
4018 return snprintf(buf, PAGE_SIZE, "preparing\n");
4019 case STAGE_APPLIED:
4020 return snprintf(buf, PAGE_SIZE, "applied\n");
4021 case STAGE_REVERSED:
4022 return snprintf(buf, PAGE_SIZE, "reversed\n");
4024 return 0;
4027 static ssize_t abort_cause_show(struct update *update, char *buf)
4029 switch (update->abort_cause) {
4030 case OK:
4031 return snprintf(buf, PAGE_SIZE, "ok\n");
4032 case NO_MATCH:
4033 return snprintf(buf, PAGE_SIZE, "no_match\n");
4034 #ifdef KSPLICE_STANDALONE
4035 case BAD_SYSTEM_MAP:
4036 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4037 #endif /* KSPLICE_STANDALONE */
4038 case CODE_BUSY:
4039 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4040 case MODULE_BUSY:
4041 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4042 case OUT_OF_MEMORY:
4043 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4044 case FAILED_TO_FIND:
4045 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4046 case ALREADY_REVERSED:
4047 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4048 case MISSING_EXPORT:
4049 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4050 case UNEXPECTED_RUNNING_TASK:
4051 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4052 case TARGET_NOT_LOADED:
4053 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4054 case CALL_FAILED:
4055 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4056 case COLD_UPDATE_LOADED:
4057 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4058 case UNEXPECTED:
4059 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4060 default:
4061 return snprintf(buf, PAGE_SIZE, "unknown\n");
4063 return 0;
4066 static ssize_t conflict_show(struct update *update, char *buf)
4068 const struct conflict *conf;
4069 const struct conflict_addr *ca;
4070 int used = 0;
4071 mutex_lock(&module_mutex);
4072 list_for_each_entry(conf, &update->conflicts, list) {
4073 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4074 conf->process_name, conf->pid);
4075 list_for_each_entry(ca, &conf->stack, list) {
4076 if (!ca->has_conflict)
4077 continue;
4078 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4079 ca->label);
4081 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4083 mutex_unlock(&module_mutex);
4084 return used;
4087 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4088 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4090 struct update *update = updateptr;
4091 mutex_lock(&module_mutex);
4092 maybe_cleanup_ksplice_update(update);
4093 mutex_unlock(&module_mutex);
4094 return 0;
4097 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4099 enum stage old_stage;
4100 mutex_lock(&module_mutex);
4101 old_stage = update->stage;
4102 if ((strncmp(buf, "applied", len) == 0 ||
4103 strncmp(buf, "applied\n", len) == 0) &&
4104 update->stage == STAGE_PREPARING)
4105 update->abort_cause = apply_update(update);
4106 else if ((strncmp(buf, "reversed", len) == 0 ||
4107 strncmp(buf, "reversed\n", len) == 0) &&
4108 update->stage == STAGE_APPLIED)
4109 update->abort_cause = reverse_patches(update);
4110 else if ((strncmp(buf, "cleanup", len) == 0 ||
4111 strncmp(buf, "cleanup\n", len) == 0) &&
4112 update->stage == STAGE_REVERSED)
4113 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4114 "ksplice_cleanup_%s", update->kid);
4116 if (old_stage != STAGE_REVERSED && update->abort_cause == OK)
4117 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
4118 update->kid,
4119 update->stage == STAGE_APPLIED ? "applied" : "reversed");
4120 mutex_unlock(&module_mutex);
4121 return len;
4124 static ssize_t debug_show(struct update *update, char *buf)
4126 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4129 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4131 unsigned long l;
4132 int ret = strict_strtoul(buf, 10, &l);
4133 if (ret != 0)
4134 return ret;
4135 update->debug = l;
4136 return len;
4139 static ssize_t partial_show(struct update *update, char *buf)
4141 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4144 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4146 unsigned long l;
4147 int ret = strict_strtoul(buf, 10, &l);
4148 if (ret != 0)
4149 return ret;
4150 update->partial = l;
4151 return len;
4154 static struct update_attribute stage_attribute =
4155 __ATTR(stage, 0600, stage_show, stage_store);
4156 static struct update_attribute abort_cause_attribute =
4157 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4158 static struct update_attribute debug_attribute =
4159 __ATTR(debug, 0600, debug_show, debug_store);
4160 static struct update_attribute partial_attribute =
4161 __ATTR(partial, 0600, partial_show, partial_store);
4162 static struct update_attribute conflict_attribute =
4163 __ATTR(conflicts, 0400, conflict_show, NULL);
4165 static struct attribute *update_attrs[] = {
4166 &stage_attribute.attr,
4167 &abort_cause_attribute.attr,
4168 &debug_attribute.attr,
4169 &partial_attribute.attr,
4170 &conflict_attribute.attr,
4171 NULL
4174 static struct kobj_type update_ktype = {
4175 .sysfs_ops = &update_sysfs_ops,
4176 .release = update_release,
4177 .default_attrs = update_attrs,
4180 #ifdef KSPLICE_STANDALONE
4181 static int debug;
4182 module_param(debug, int, 0600);
4183 MODULE_PARM_DESC(debug, "Debug level");
4185 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4187 static struct ksplice_mod_change bootstrap_mod_change = {
4188 .name = "ksplice_" __stringify(KSPLICE_KID),
4189 .kid = "init_" __stringify(KSPLICE_KID),
4190 .target_name = NULL,
4191 .target = NULL,
4192 .map_printk = MAP_PRINTK,
4193 .new_code_mod = THIS_MODULE,
4194 .new_code.system_map = ksplice_system_map,
4195 .new_code.system_map_end = ksplice_system_map_end,
4197 #endif /* KSPLICE_STANDALONE */
4199 static int init_ksplice(void)
4201 #ifdef KSPLICE_STANDALONE
4202 struct ksplice_mod_change *change = &bootstrap_mod_change;
4203 change->update = init_ksplice_update(change->kid);
4204 sort(change->new_code.system_map,
4205 change->new_code.system_map_end - change->new_code.system_map,
4206 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4207 if (change->update == NULL)
4208 return -ENOMEM;
4209 add_to_update(change, change->update);
4210 change->update->debug = debug;
4211 change->update->abort_cause =
4212 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4213 if (change->update->abort_cause == OK)
4214 bootstrapped = true;
4215 cleanup_ksplice_update(bootstrap_mod_change.update);
4216 #else /* !KSPLICE_STANDALONE */
4217 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4218 if (ksplice_kobj == NULL)
4219 return -ENOMEM;
4220 #endif /* KSPLICE_STANDALONE */
4221 return 0;
4224 static void cleanup_ksplice(void)
4226 #ifndef KSPLICE_STANDALONE
4227 kobject_put(ksplice_kobj);
4228 #endif /* KSPLICE_STANDALONE */
4231 module_init(init_ksplice);
4232 module_exit(cleanup_ksplice);
4234 MODULE_AUTHOR("Ksplice, Inc.");
4235 MODULE_DESCRIPTION("Ksplice rebootless update system");
4236 #ifdef KSPLICE_VERSION
4237 MODULE_VERSION(KSPLICE_VERSION);
4238 #endif
4239 MODULE_LICENSE("GPL v2");