EXTRACT_SYMBOL(search_exception_tables)
[ksplice.git] / kmodsrc / ksplice.c
blobf18a9d3d6c60af566e4ab96759471b040f5aaf28
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #ifdef KSPLICE_STANDALONE
62 #if !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #ifndef __used
67 #define __used __attribute_used__
68 #endif
70 #define EXTRACT_SYMBOL(sym) \
71 static const typeof(&sym) PASTE(__ksplice_extract_, __LINE__) \
72 __used __attribute__((section(".ksplice_extract"))) = &sym
73 #endif /* KSPLICE_STANDALONE */
75 enum stage {
76 STAGE_PREPARING, /* the update is not yet applied */
77 STAGE_APPLIED, /* the update is applied */
78 STAGE_REVERSED, /* the update has been applied and reversed */
81 /* parameter to modify run-pre matching */
82 enum run_pre_mode {
83 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
84 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
85 RUN_PRE_FINAL, /* finalizes the matching */
86 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
87 RUN_PRE_SILENT,
88 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
91 enum { NOVAL, TEMP, VAL };
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
94 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
95 #define __bitwise__
96 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
97 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
98 #define __bitwise__ __bitwise
99 #endif
101 typedef int __bitwise__ abort_t;
103 #define OK ((__force abort_t) 0)
104 #define NO_MATCH ((__force abort_t) 1)
105 #define CODE_BUSY ((__force abort_t) 2)
106 #define MODULE_BUSY ((__force abort_t) 3)
107 #define OUT_OF_MEMORY ((__force abort_t) 4)
108 #define FAILED_TO_FIND ((__force abort_t) 5)
109 #define ALREADY_REVERSED ((__force abort_t) 6)
110 #define MISSING_EXPORT ((__force abort_t) 7)
111 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
112 #define UNEXPECTED ((__force abort_t) 9)
113 #define TARGET_NOT_LOADED ((__force abort_t) 10)
114 #define CALL_FAILED ((__force abort_t) 11)
115 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
116 #ifdef KSPLICE_STANDALONE
117 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
118 #endif /* KSPLICE_STANDALONE */
120 struct update {
121 const char *kid;
122 const char *name;
123 struct kobject kobj;
124 enum stage stage;
125 abort_t abort_cause;
126 int debug;
127 #ifdef CONFIG_DEBUG_FS
128 struct debugfs_blob_wrapper debug_blob;
129 struct dentry *debugfs_dentry;
130 #else /* !CONFIG_DEBUG_FS */
131 bool debug_continue_line;
132 #endif /* CONFIG_DEBUG_FS */
133 bool partial; /* is it OK if some target mods aren't loaded */
134 struct list_head changes, /* changes for loaded target mods */
135 unused_changes; /* changes for non-loaded target mods */
136 struct list_head conflicts;
137 struct list_head list;
138 struct list_head ksplice_module_list;
141 /* a process conflicting with an update */
142 struct conflict {
143 const char *process_name;
144 pid_t pid;
145 struct list_head stack;
146 struct list_head list;
149 /* an address on the stack of a conflict */
150 struct conflict_addr {
151 unsigned long addr; /* the address on the stack */
152 bool has_conflict; /* does this address in particular conflict? */
153 const char *label; /* the label of the conflicting safety_record */
154 struct list_head list;
157 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
158 /* Old kernels don't have debugfs_create_blob */
159 struct debugfs_blob_wrapper {
160 void *data;
161 unsigned long size;
163 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
166 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
167 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
168 #endif
170 struct labelval {
171 struct list_head list;
172 struct ksplice_symbol *symbol;
173 struct list_head *saved_vals;
176 /* region to be checked for conflicts in the stack check */
177 struct safety_record {
178 struct list_head list;
179 const char *label;
180 unsigned long addr; /* the address to be checked for conflicts
181 * (e.g. an obsolete function's starting addr)
183 unsigned long size; /* the size of the region to be checked */
186 /* possible value for a symbol */
187 struct candidate_val {
188 struct list_head list;
189 unsigned long val;
192 /* private struct used by init_symbol_array */
193 struct ksplice_lookup {
194 /* input */
195 struct ksplice_mod_change *change;
196 struct ksplice_symbol **arr;
197 size_t size;
198 /* output */
199 abort_t ret;
202 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
203 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
204 struct symsearch {
205 const struct kernel_symbol *start, *stop;
206 const unsigned long *crcs;
207 enum {
208 NOT_GPL_ONLY,
209 GPL_ONLY,
210 WILL_BE_GPL_ONLY,
211 } licence;
212 bool unused;
214 #endif /* LINUX_VERSION_CODE */
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
217 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
219 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
220 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
221 static bool virtual_address_mapped(unsigned long addr)
223 char retval;
224 return probe_kernel_address(addr, retval) != -EFAULT;
226 #else /* LINUX_VERSION_CODE < */
227 static bool virtual_address_mapped(unsigned long addr);
228 #endif /* LINUX_VERSION_CODE */
230 static long probe_kernel_read(void *dst, void *src, size_t size)
232 if (size == 0)
233 return 0;
234 if (!virtual_address_mapped((unsigned long)src) ||
235 !virtual_address_mapped((unsigned long)src + size - 1))
236 return -EFAULT;
238 memcpy(dst, src, size);
239 return 0;
241 #endif /* LINUX_VERSION_CODE */
243 static LIST_HEAD(updates);
244 #ifdef KSPLICE_STANDALONE
245 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
246 extern struct list_head ksplice_modules;
247 #else /* !CONFIG_KSPLICE */
248 LIST_HEAD(ksplice_modules);
249 #endif /* CONFIG_KSPLICE */
250 #else /* !KSPLICE_STANDALONE */
251 LIST_HEAD(ksplice_modules);
252 EXPORT_SYMBOL_GPL(ksplice_modules);
253 static struct kobject *ksplice_kobj;
254 #endif /* KSPLICE_STANDALONE */
256 static struct kobj_type update_ktype;
258 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
259 /* Old kernels do not have kcalloc
260 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
262 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
264 char *mem;
265 if (n != 0 && size > ULONG_MAX / n)
266 return NULL;
267 mem = kmalloc(n * size, flags);
268 if (mem)
269 memset(mem, 0, n * size);
270 return mem;
272 #endif /* LINUX_VERSION_CODE */
274 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
275 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
276 static void u32_swap(void *a, void *b, int size)
278 u32 t = *(u32 *)a;
279 *(u32 *)a = *(u32 *)b;
280 *(u32 *)b = t;
283 static void generic_swap(void *a, void *b, int size)
285 char t;
287 do {
288 t = *(char *)a;
289 *(char *)a++ = *(char *)b;
290 *(char *)b++ = t;
291 } while (--size > 0);
295 * sort - sort an array of elements
296 * @base: pointer to data to sort
297 * @num: number of elements
298 * @size: size of each element
299 * @cmp: pointer to comparison function
300 * @swap: pointer to swap function or NULL
302 * This function does a heapsort on the given array. You may provide a
303 * swap function optimized to your element type.
305 * Sorting time is O(n log n) both on average and worst-case. While
306 * qsort is about 20% faster on average, it suffers from exploitable
307 * O(n*n) worst-case behavior and extra memory requirements that make
308 * it less suitable for kernel use.
311 void sort(void *base, size_t num, size_t size,
312 int (*cmp)(const void *, const void *),
313 void (*swap)(void *, void *, int size))
315 /* pre-scale counters for performance */
316 int i = (num / 2 - 1) * size, n = num * size, c, r;
318 if (!swap)
319 swap = (size == 4 ? u32_swap : generic_swap);
321 /* heapify */
322 for (; i >= 0; i -= size) {
323 for (r = i; r * 2 + size < n; r = c) {
324 c = r * 2 + size;
325 if (c < n - size && cmp(base + c, base + c + size) < 0)
326 c += size;
327 if (cmp(base + r, base + c) >= 0)
328 break;
329 swap(base + r, base + c, size);
333 /* sort */
334 for (i = n - size; i > 0; i -= size) {
335 swap(base, base + i, size);
336 for (r = 0; r * 2 + size < i; r = c) {
337 c = r * 2 + size;
338 if (c < i - size && cmp(base + c, base + c + size) < 0)
339 c += size;
340 if (cmp(base + r, base + c) >= 0)
341 break;
342 swap(base + r, base + c, size);
346 #endif /* LINUX_VERSION_CODE < */
348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
349 /* Old kernels do not have kstrdup
350 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was after 2.6.12
352 #define kstrdup ksplice_kstrdup
353 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
355 size_t len;
356 char *buf;
358 if (!s)
359 return NULL;
361 len = strlen(s) + 1;
362 buf = kmalloc(len, gfp);
363 if (buf)
364 memcpy(buf, s, len);
365 return buf;
367 #endif /* LINUX_VERSION_CODE */
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
370 /* Old kernels use semaphore instead of mutex
371 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
373 #define mutex semaphore
374 #define mutex_lock down
375 #define mutex_unlock up
376 #endif /* LINUX_VERSION_CODE */
378 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
379 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
380 static char * __attribute_used__
381 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
383 unsigned int len;
384 char *p, dummy[1];
385 va_list aq;
387 va_copy(aq, ap);
388 len = vsnprintf(dummy, 0, fmt, aq);
389 va_end(aq);
391 p = kmalloc(len + 1, gfp);
392 if (!p)
393 return NULL;
395 vsnprintf(p, len + 1, fmt, ap);
397 return p;
399 #endif
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
402 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
403 static char * __attribute__((format (printf, 2, 3)))
404 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
406 va_list ap;
407 char *p;
409 va_start(ap, fmt);
410 p = kvasprintf(gfp, fmt, ap);
411 va_end(ap);
413 return p;
415 #endif
417 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
418 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
419 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
421 char *tail;
422 unsigned long val;
423 size_t len;
425 *res = 0;
426 len = strlen(cp);
427 if (len == 0)
428 return -EINVAL;
430 val = simple_strtoul(cp, &tail, base);
431 if ((*tail == '\0') ||
432 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
433 *res = val;
434 return 0;
437 return -EINVAL;
439 #endif
441 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
442 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
443 /* Assume cpus == NULL. */
444 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
445 #endif /* LINUX_VERSION_CODE */
447 #ifndef task_thread_info
448 #define task_thread_info(task) (task)->thread_info
449 #endif /* !task_thread_info */
451 #ifdef KSPLICE_STANDALONE
453 #ifdef do_each_thread_ve /* OpenVZ kernels define this */
454 #define do_each_thread do_each_thread_all
455 #define while_each_thread while_each_thread_all
456 #endif
458 static bool bootstrapped = false;
460 /* defined by ksplice-create */
461 extern const struct ksplice_reloc ksplice_init_relocs[],
462 ksplice_init_relocs_end[];
464 #endif /* KSPLICE_STANDALONE */
466 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
467 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
468 extern struct list_head modules;
469 extern struct mutex module_mutex;
470 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
471 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
472 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
473 #endif /* LINUX_VERSION_CODE */
474 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
475 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
476 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
477 #endif /* LINUX_VERSION_CODE */
478 extern const struct kernel_symbol __start___ksymtab[];
479 extern const struct kernel_symbol __stop___ksymtab[];
480 extern const unsigned long __start___kcrctab[];
481 extern const struct kernel_symbol __start___ksymtab_gpl[];
482 extern const struct kernel_symbol __stop___ksymtab_gpl[];
483 extern const unsigned long __start___kcrctab_gpl[];
484 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
485 extern const struct kernel_symbol __start___ksymtab_unused[];
486 extern const struct kernel_symbol __stop___ksymtab_unused[];
487 extern const unsigned long __start___kcrctab_unused[];
488 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
489 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
490 extern const unsigned long __start___kcrctab_unused_gpl[];
491 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
492 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
493 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
494 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
495 extern const unsigned long __start___kcrctab_gpl_future[];
496 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
497 #endif /* LINUX_VERSION_CODE */
499 static struct update *init_ksplice_update(const char *kid);
500 static void cleanup_ksplice_update(struct update *update);
501 static void maybe_cleanup_ksplice_update(struct update *update);
502 static void add_to_update(struct ksplice_mod_change *change,
503 struct update *update);
504 static int ksplice_sysfs_init(struct update *update);
506 /* Preparing the relocations and patches for application */
507 static abort_t apply_update(struct update *update);
508 static abort_t reverse_update(struct update *update);
509 static abort_t prepare_change(struct ksplice_mod_change *change);
510 static abort_t finalize_change(struct ksplice_mod_change *change);
511 static abort_t finalize_patches(struct ksplice_mod_change *change);
512 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
513 unsigned long addr);
514 static abort_t map_trampoline_pages(struct update *update);
515 static void unmap_trampoline_pages(struct update *update);
516 static void *map_writable(void *addr, size_t len);
517 static abort_t apply_relocs(struct ksplice_mod_change *change,
518 const struct ksplice_reloc *relocs,
519 const struct ksplice_reloc *relocs_end);
520 static abort_t apply_reloc(struct ksplice_mod_change *change,
521 const struct ksplice_reloc *r);
522 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
523 const struct ksplice_reloc *r);
524 static abort_t apply_howto_date(struct ksplice_mod_change *change,
525 const struct ksplice_reloc *r);
526 static abort_t read_reloc_value(struct ksplice_mod_change *change,
527 const struct ksplice_reloc *r,
528 unsigned long addr, unsigned long *valp);
529 static abort_t write_reloc_value(struct ksplice_mod_change *change,
530 const struct ksplice_reloc *r,
531 unsigned long addr, unsigned long sym_addr);
532 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
533 bool to_be_applied);
534 static void cleanup_module_list_entries(struct update *update);
535 static void __attribute__((noreturn)) ksplice_deleted(void);
537 /* run-pre matching */
538 static abort_t match_change_sections(struct ksplice_mod_change *change,
539 bool consider_data_sections);
540 static abort_t find_section(struct ksplice_mod_change *change,
541 struct ksplice_section *sect);
542 static abort_t try_addr(struct ksplice_mod_change *change,
543 struct ksplice_section *sect,
544 unsigned long run_addr,
545 struct list_head *safety_records,
546 enum run_pre_mode mode);
547 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
548 const struct ksplice_section *sect,
549 unsigned long run_addr,
550 struct list_head *safety_records,
551 enum run_pre_mode mode);
552 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
553 /* defined in arch/ARCH/kernel/ksplice-arch.c */
554 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
555 struct ksplice_section *sect,
556 unsigned long run_addr,
557 struct list_head *safety_records,
558 enum run_pre_mode mode);
559 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
560 static void print_bytes(struct ksplice_mod_change *change,
561 const unsigned char *run, int runc,
562 const unsigned char *pre, int prec);
563 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
564 static abort_t brute_search(struct ksplice_mod_change *change,
565 struct ksplice_section *sect,
566 const void *start, unsigned long len,
567 struct list_head *vals);
568 static abort_t brute_search_all(struct ksplice_mod_change *change,
569 struct ksplice_section *sect,
570 struct list_head *vals);
571 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
572 static const struct ksplice_reloc *
573 init_reloc_search(struct ksplice_mod_change *change,
574 const struct ksplice_section *sect);
575 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
576 const struct ksplice_reloc *end,
577 unsigned long address,
578 unsigned long size);
579 static abort_t lookup_reloc(struct ksplice_mod_change *change,
580 const struct ksplice_reloc **fingerp,
581 unsigned long addr,
582 const struct ksplice_reloc **relocp);
583 static abort_t handle_reloc(struct ksplice_mod_change *change,
584 const struct ksplice_section *sect,
585 const struct ksplice_reloc *r,
586 unsigned long run_addr, enum run_pre_mode mode);
587 static abort_t handle_howto_date(struct ksplice_mod_change *change,
588 const struct ksplice_section *sect,
589 const struct ksplice_reloc *r,
590 unsigned long run_addr,
591 enum run_pre_mode mode);
592 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
593 const struct ksplice_section *sect,
594 const struct ksplice_reloc *r,
595 unsigned long run_addr,
596 enum run_pre_mode mode);
597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
598 #ifdef CONFIG_BUG
599 static abort_t handle_bug(struct ksplice_mod_change *change,
600 const struct ksplice_reloc *r,
601 unsigned long run_addr);
602 #endif /* CONFIG_BUG */
603 #else /* LINUX_VERSION_CODE < */
604 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
605 #endif /* LINUX_VERSION_CODE */
606 static abort_t handle_extable(struct ksplice_mod_change *change,
607 const struct ksplice_reloc *r,
608 unsigned long run_addr);
609 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
610 const struct ksplice_symbol *sym);
611 static int compare_section_labels(const void *va, const void *vb);
612 static int symbol_section_bsearch_compare(const void *a, const void *b);
613 static const struct ksplice_reloc *
614 patch_reloc(struct ksplice_mod_change *change,
615 const struct ksplice_patch *p);
617 /* Computing possible addresses for symbols */
618 static abort_t lookup_symbol(struct ksplice_mod_change *change,
619 const struct ksplice_symbol *ksym,
620 struct list_head *vals);
621 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
622 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
623 static abort_t init_symbol_array(struct ksplice_mod_change *change,
624 struct ksplice_symbol *start,
625 struct ksplice_symbol *end);
626 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
627 static abort_t add_matching_values(struct ksplice_lookup *lookup,
628 const char *sym_name, unsigned long sym_val);
629 static bool add_export_values(const struct symsearch *syms,
630 struct module *owner,
631 unsigned int symnum, void *data);
632 static int symbolp_bsearch_compare(const void *key, const void *elt);
633 static int compare_symbolp_names(const void *a, const void *b);
634 static int compare_symbolp_labels(const void *a, const void *b);
635 #ifdef CONFIG_KALLSYMS
636 static int add_kallsyms_values(void *data, const char *name,
637 struct module *owner, unsigned long val);
638 #endif /* CONFIG_KALLSYMS */
639 #ifdef KSPLICE_STANDALONE
640 static abort_t
641 add_system_map_candidates(struct ksplice_mod_change *change,
642 const struct ksplice_system_map *start,
643 const struct ksplice_system_map *end,
644 const char *label, struct list_head *vals);
645 static int compare_system_map(const void *a, const void *b);
646 static int system_map_bsearch_compare(const void *key, const void *elt);
647 #endif /* KSPLICE_STANDALONE */
648 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
649 const char *name, struct list_head *vals);
651 /* Atomic update trampoline insertion and removal */
652 static abort_t patch_action(struct update *update, enum ksplice_action action);
653 static int __apply_patches(void *update);
654 static int __reverse_patches(void *update);
655 static abort_t check_each_task(struct update *update);
656 static abort_t check_task(struct update *update,
657 const struct task_struct *t, bool rerun);
658 static abort_t check_stack(struct update *update, struct conflict *conf,
659 const struct thread_info *tinfo,
660 const unsigned long *stack);
661 static abort_t check_address(struct update *update,
662 struct conflict *conf, unsigned long addr);
663 static abort_t check_record(struct conflict_addr *ca,
664 const struct safety_record *rec,
665 unsigned long addr);
666 static bool is_stop_machine(const struct task_struct *t);
667 static void cleanup_conflicts(struct update *update);
668 static void print_conflicts(struct update *update);
669 static void insert_trampoline(struct ksplice_patch *p);
670 static abort_t verify_trampoline(struct ksplice_mod_change *change,
671 const struct ksplice_patch *p);
672 static void remove_trampoline(const struct ksplice_patch *p);
674 static abort_t create_labelval(struct ksplice_mod_change *change,
675 struct ksplice_symbol *ksym,
676 unsigned long val, int status);
677 static abort_t create_safety_record(struct ksplice_mod_change *change,
678 const struct ksplice_section *sect,
679 struct list_head *record_list,
680 unsigned long run_addr,
681 unsigned long run_size);
682 static abort_t add_candidate_val(struct ksplice_mod_change *change,
683 struct list_head *vals, unsigned long val);
684 static void release_vals(struct list_head *vals);
685 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
687 static int contains_canary(struct ksplice_mod_change *change,
688 unsigned long blank_addr,
689 const struct ksplice_reloc_howto *howto);
690 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
691 unsigned long addr);
692 static bool patches_module(const struct module *a, const struct module *b);
693 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
694 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
695 static bool strstarts(const char *str, const char *prefix);
696 #endif /* LINUX_VERSION_CODE */
697 static bool singular(struct list_head *list);
698 static void *bsearch(const void *key, const void *base, size_t n,
699 size_t size, int (*cmp)(const void *key, const void *elt));
700 static int compare_relocs(const void *a, const void *b);
701 static int reloc_bsearch_compare(const void *key, const void *elt);
703 /* Debugging */
704 static abort_t init_debug_buf(struct update *update);
705 static void clear_debug_buf(struct update *update);
706 static int __attribute__((format(printf, 2, 3)))
707 _ksdebug(struct update *update, const char *fmt, ...);
708 #define ksdebug(change, fmt, ...) \
709 _ksdebug(change->update, fmt, ## __VA_ARGS__)
711 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
712 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
713 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
714 struct module *, unsigned long),
715 void *data);
716 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
717 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
718 #endif /* LINUX_VERSION_CODE */
719 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
720 struct module *,
721 unsigned long),
722 void *data);
723 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
725 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
726 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
727 static struct module *find_module(const char *name);
728 static int use_module(struct module *a, struct module *b);
729 static const struct kernel_symbol *find_symbol(const char *name,
730 struct module **owner,
731 const unsigned long **crc,
732 bool gplok, bool warn);
733 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
734 struct module *owner,
735 unsigned int symnum, void *data),
736 void *data);
737 static struct module *__module_address(unsigned long addr);
738 #endif /* LINUX_VERSION_CODE */
740 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
742 /* Prepare a trampoline for the given patch */
743 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
744 struct ksplice_patch *p);
745 /* What address does the trampoline at addr jump to? */
746 static abort_t trampoline_target(struct ksplice_mod_change *change,
747 unsigned long addr, unsigned long *new_addr);
748 /* Hook to handle pc-relative jumps inserted by parainstructions */
749 static abort_t handle_paravirt(struct ksplice_mod_change *change,
750 unsigned long pre, unsigned long run,
751 int *matched);
752 /* Is address p on the stack of the given thread? */
753 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
755 #ifndef KSPLICE_STANDALONE
756 #include "ksplice-arch.c"
757 #elif defined CONFIG_X86
758 #include "x86/ksplice-arch.c"
759 #elif defined CONFIG_ARM
760 #include "arm/ksplice-arch.c"
761 #endif /* KSPLICE_STANDALONE */
763 #define clear_list(head, type, member) \
764 do { \
765 struct list_head *_pos, *_n; \
766 list_for_each_safe(_pos, _n, head) { \
767 list_del(_pos); \
768 kfree(list_entry(_pos, type, member)); \
770 } while (0)
773 * init_ksplice_mod_change() - Initializes a ksplice change
774 * @change: The change to be initialized. All of the public fields of the
775 * change and its associated data structures should be populated
776 * before this function is called. The values of the private
777 * fields will be ignored.
779 int init_ksplice_mod_change(struct ksplice_mod_change *change)
781 struct update *update;
782 struct ksplice_patch *p;
783 struct ksplice_section *s;
784 int ret = 0;
786 #ifdef KSPLICE_STANDALONE
787 if (!bootstrapped)
788 return -1;
789 #endif /* KSPLICE_STANDALONE */
791 INIT_LIST_HEAD(&change->temp_labelvals);
792 INIT_LIST_HEAD(&change->safety_records);
794 sort(change->old_code.relocs,
795 change->old_code.relocs_end - change->old_code.relocs,
796 sizeof(*change->old_code.relocs), compare_relocs, NULL);
797 sort(change->new_code.relocs,
798 change->new_code.relocs_end - change->new_code.relocs,
799 sizeof(*change->new_code.relocs), compare_relocs, NULL);
800 sort(change->old_code.sections,
801 change->old_code.sections_end - change->old_code.sections,
802 sizeof(*change->old_code.sections), compare_section_labels, NULL);
803 #ifdef KSPLICE_STANDALONE
804 sort(change->new_code.system_map,
805 change->new_code.system_map_end - change->new_code.system_map,
806 sizeof(*change->new_code.system_map), compare_system_map, NULL);
807 sort(change->old_code.system_map,
808 change->old_code.system_map_end - change->old_code.system_map,
809 sizeof(*change->old_code.system_map), compare_system_map, NULL);
810 #endif /* KSPLICE_STANDALONE */
812 for (p = change->patches; p < change->patches_end; p++)
813 p->vaddr = NULL;
814 for (s = change->old_code.sections; s < change->old_code.sections_end;
815 s++)
816 s->match_map = NULL;
817 for (p = change->patches; p < change->patches_end; p++) {
818 const struct ksplice_reloc *r = patch_reloc(change, p);
819 if (r == NULL)
820 return -ENOENT;
821 if (p->type == KSPLICE_PATCH_DATA) {
822 s = symbol_section(change, r->symbol);
823 if (s == NULL)
824 return -ENOENT;
825 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
826 * to modify rodata sections that have been explicitly
827 * marked for patching using the ksplice-patch.h macro
828 * ksplice_assume_rodata. Here we modify the section
829 * flags appropriately.
831 if (s->flags & KSPLICE_SECTION_DATA)
832 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
833 KSPLICE_SECTION_RODATA;
837 mutex_lock(&module_mutex);
838 list_for_each_entry(update, &updates, list) {
839 if (strcmp(change->kid, update->kid) == 0) {
840 if (update->stage != STAGE_PREPARING) {
841 ret = -EPERM;
842 goto out;
844 add_to_update(change, update);
845 ret = 0;
846 goto out;
849 update = init_ksplice_update(change->kid);
850 if (update == NULL) {
851 ret = -ENOMEM;
852 goto out;
854 ret = ksplice_sysfs_init(update);
855 if (ret != 0) {
856 cleanup_ksplice_update(update);
857 goto out;
859 add_to_update(change, update);
860 out:
861 mutex_unlock(&module_mutex);
862 return ret;
864 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
867 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
868 * @change: The change to be cleaned up
870 * cleanup_ksplice_mod_change is currently called twice for each
871 * Ksplice update; once when the old_code module is unloaded, and once
872 * when the new_code module is unloaded. The extra call is used to
873 * avoid leaks if you unload the old_code without applying the update.
875 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
877 if (change->update == NULL)
878 return;
880 mutex_lock(&module_mutex);
881 if (change->update->stage == STAGE_APPLIED) {
882 /* If the change wasn't actually applied (because we
883 * only applied this update to loaded modules and this
884 * target was not loaded), then unregister the change
885 * from the list of unused changes.
887 struct ksplice_mod_change *c;
888 bool found = false;
890 list_for_each_entry(c, &change->update->unused_changes, list) {
891 if (c == change)
892 found = true;
894 if (found)
895 list_del(&change->list);
896 mutex_unlock(&module_mutex);
897 return;
899 list_del(&change->list);
900 if (change->update->stage == STAGE_PREPARING)
901 maybe_cleanup_ksplice_update(change->update);
902 change->update = NULL;
903 mutex_unlock(&module_mutex);
905 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
907 static struct update *init_ksplice_update(const char *kid)
909 struct update *update;
910 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
911 if (update == NULL)
912 return NULL;
913 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
914 if (update->name == NULL) {
915 kfree(update);
916 return NULL;
918 update->kid = kstrdup(kid, GFP_KERNEL);
919 if (update->kid == NULL) {
920 kfree(update->name);
921 kfree(update);
922 return NULL;
924 if (try_module_get(THIS_MODULE) != 1) {
925 kfree(update->kid);
926 kfree(update->name);
927 kfree(update);
928 return NULL;
930 INIT_LIST_HEAD(&update->changes);
931 INIT_LIST_HEAD(&update->unused_changes);
932 INIT_LIST_HEAD(&update->ksplice_module_list);
933 if (init_debug_buf(update) != OK) {
934 module_put(THIS_MODULE);
935 kfree(update->kid);
936 kfree(update->name);
937 kfree(update);
938 return NULL;
940 list_add(&update->list, &updates);
941 update->stage = STAGE_PREPARING;
942 update->abort_cause = OK;
943 update->partial = 0;
944 INIT_LIST_HEAD(&update->conflicts);
945 return update;
948 static void cleanup_ksplice_update(struct update *update)
950 list_del(&update->list);
951 cleanup_conflicts(update);
952 clear_debug_buf(update);
953 cleanup_module_list_entries(update);
954 kfree(update->kid);
955 kfree(update->name);
956 kfree(update);
957 module_put(THIS_MODULE);
960 /* Clean up the update if it no longer has any changes */
961 static void maybe_cleanup_ksplice_update(struct update *update)
963 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
964 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
965 kobject_put(&update->kobj);
966 #else /* LINUX_VERSION_CODE < */
967 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
968 kobject_unregister(&update->kobj);
969 #endif /* LINUX_VERSION_CODE */
972 static void add_to_update(struct ksplice_mod_change *change,
973 struct update *update)
975 change->update = update;
976 list_add(&change->list, &update->unused_changes);
979 static int ksplice_sysfs_init(struct update *update)
981 int ret = 0;
982 memset(&update->kobj, 0, sizeof(update->kobj));
983 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
984 #ifndef KSPLICE_STANDALONE
985 ret = kobject_init_and_add(&update->kobj, &update_ktype,
986 ksplice_kobj, "%s", update->kid);
987 #else /* KSPLICE_STANDALONE */
988 ret = kobject_init_and_add(&update->kobj, &update_ktype,
989 &THIS_MODULE->mkobj.kobj, "ksplice");
990 #endif /* KSPLICE_STANDALONE */
991 #else /* LINUX_VERSION_CODE < */
992 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
993 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
994 if (ret != 0)
995 return ret;
996 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
997 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
998 #else /* LINUX_VERSION_CODE < */
999 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1000 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
1001 #endif /* LINUX_VERSION_CODE */
1002 update->kobj.ktype = &update_ktype;
1003 ret = kobject_register(&update->kobj);
1004 #endif /* LINUX_VERSION_CODE */
1005 if (ret != 0)
1006 return ret;
1007 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1008 kobject_uevent(&update->kobj, KOBJ_ADD);
1009 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1010 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1011 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1012 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
1013 #endif /* LINUX_VERSION_CODE */
1014 return 0;
1017 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1018 EXTRACT_SYMBOL(apply_paravirt);
1019 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1021 static abort_t apply_update(struct update *update)
1023 struct ksplice_mod_change *change, *n;
1024 abort_t ret;
1025 int retval;
1027 list_for_each_entry(change, &update->changes, list) {
1028 ret = create_module_list_entry(change, true);
1029 if (ret != OK)
1030 goto out;
1033 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
1034 if (strcmp(change->target_name, "vmlinux") == 0) {
1035 change->target = NULL;
1036 } else if (change->target == NULL) {
1037 change->target = find_module(change->target_name);
1038 if (change->target == NULL ||
1039 !module_is_live(change->target)) {
1040 if (!update->partial) {
1041 ret = TARGET_NOT_LOADED;
1042 goto out;
1044 ret = create_module_list_entry(change, false);
1045 if (ret != OK)
1046 goto out;
1047 continue;
1049 retval = use_module(change->new_code_mod,
1050 change->target);
1051 if (retval != 1) {
1052 ret = UNEXPECTED;
1053 goto out;
1056 ret = create_module_list_entry(change, true);
1057 if (ret != OK)
1058 goto out;
1059 list_del(&change->list);
1060 list_add_tail(&change->list, &update->changes);
1062 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1063 if (change->target == NULL) {
1064 apply_paravirt(change->new_code.parainstructions,
1065 change->new_code.parainstructions_end);
1066 apply_paravirt(change->old_code.parainstructions,
1067 change->old_code.parainstructions_end);
1069 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1072 list_for_each_entry(change, &update->changes, list) {
1073 const struct ksplice_section *sect;
1074 for (sect = change->new_code.sections;
1075 sect < change->new_code.sections_end; sect++) {
1076 struct safety_record *rec = kmalloc(sizeof(*rec),
1077 GFP_KERNEL);
1078 if (rec == NULL) {
1079 ret = OUT_OF_MEMORY;
1080 goto out;
1082 rec->addr = sect->address;
1083 rec->size = sect->size;
1084 rec->label = sect->symbol->label;
1085 list_add(&rec->list, &change->safety_records);
1089 list_for_each_entry(change, &update->changes, list) {
1090 ret = init_symbol_arrays(change);
1091 if (ret != OK) {
1092 cleanup_symbol_arrays(change);
1093 goto out;
1095 ret = prepare_change(change);
1096 cleanup_symbol_arrays(change);
1097 if (ret != OK)
1098 goto out;
1100 ret = patch_action(update, KS_APPLY);
1101 out:
1102 list_for_each_entry(change, &update->changes, list) {
1103 struct ksplice_section *s;
1104 if (update->stage == STAGE_PREPARING)
1105 clear_list(&change->safety_records,
1106 struct safety_record, list);
1107 for (s = change->old_code.sections;
1108 s < change->old_code.sections_end; s++) {
1109 if (s->match_map != NULL) {
1110 vfree(s->match_map);
1111 s->match_map = NULL;
1115 if (update->stage == STAGE_PREPARING)
1116 cleanup_module_list_entries(update);
1118 if (ret == OK)
1119 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
1120 update->kid);
1121 return ret;
1124 static abort_t reverse_update(struct update *update)
1126 abort_t ret;
1127 struct ksplice_mod_change *change;
1129 clear_debug_buf(update);
1130 ret = init_debug_buf(update);
1131 if (ret != OK)
1132 return ret;
1134 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1136 ret = patch_action(update, KS_REVERSE);
1137 if (ret != OK)
1138 return ret;
1140 list_for_each_entry(change, &update->changes, list)
1141 clear_list(&change->safety_records, struct safety_record, list);
1143 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
1144 update->kid);
1145 return OK;
1148 static int compare_symbolp_names(const void *a, const void *b)
1150 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1151 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1152 return 0;
1153 if ((*sympa)->name == NULL)
1154 return -1;
1155 if ((*sympb)->name == NULL)
1156 return 1;
1157 return strcmp((*sympa)->name, (*sympb)->name);
1160 static int compare_symbolp_labels(const void *a, const void *b)
1162 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1163 return strcmp((*sympa)->label, (*sympb)->label);
1166 static int symbolp_bsearch_compare(const void *key, const void *elt)
1168 const char *name = key;
1169 const struct ksplice_symbol *const *symp = elt;
1170 const struct ksplice_symbol *sym = *symp;
1171 if (sym->name == NULL)
1172 return 1;
1173 return strcmp(name, sym->name);
1176 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1177 const char *sym_name, unsigned long sym_val)
1179 struct ksplice_symbol **symp;
1180 abort_t ret;
1182 symp = bsearch(sym_name, lookup->arr, lookup->size,
1183 sizeof(*lookup->arr), symbolp_bsearch_compare);
1184 if (symp == NULL)
1185 return OK;
1187 while (symp > lookup->arr &&
1188 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1189 symp--;
1191 for (; symp < lookup->arr + lookup->size; symp++) {
1192 struct ksplice_symbol *sym = *symp;
1193 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1194 break;
1195 ret = add_candidate_val(lookup->change,
1196 sym->candidate_vals, sym_val);
1197 if (ret != OK)
1198 return ret;
1200 return OK;
1203 #ifdef CONFIG_KALLSYMS
1204 static int add_kallsyms_values(void *data, const char *name,
1205 struct module *owner, unsigned long val)
1207 struct ksplice_lookup *lookup = data;
1208 if (owner == lookup->change->new_code_mod ||
1209 !patches_module(owner, lookup->change->target))
1210 return (__force int)OK;
1211 return (__force int)add_matching_values(lookup, name, val);
1213 #endif /* CONFIG_KALLSYMS */
1215 static bool add_export_values(const struct symsearch *syms,
1216 struct module *owner,
1217 unsigned int symnum, void *data)
1219 struct ksplice_lookup *lookup = data;
1220 abort_t ret;
1222 ret = add_matching_values(lookup, syms->start[symnum].name,
1223 syms->start[symnum].value);
1224 if (ret != OK) {
1225 lookup->ret = ret;
1226 return true;
1228 return false;
1231 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1233 struct ksplice_symbol *sym;
1234 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1235 sym++) {
1236 if (sym->candidate_vals != NULL) {
1237 clear_list(sym->candidate_vals, struct candidate_val,
1238 list);
1239 kfree(sym->candidate_vals);
1240 sym->candidate_vals = NULL;
1243 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1244 sym++) {
1245 if (sym->candidate_vals != NULL) {
1246 clear_list(sym->candidate_vals, struct candidate_val,
1247 list);
1248 kfree(sym->candidate_vals);
1249 sym->candidate_vals = NULL;
1255 * The new_code and old_code modules each have their own independent
1256 * ksplice_symbol structures. uniquify_symbols unifies these separate
1257 * pieces of kernel symbol information by replacing all references to
1258 * the old_code copy of symbols with references to the new_code copy.
1260 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1262 struct ksplice_reloc *r;
1263 struct ksplice_section *s;
1264 struct ksplice_symbol *sym, **sym_arr, **symp;
1265 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1267 if (size == 0)
1268 return OK;
1270 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1271 if (sym_arr == NULL)
1272 return OUT_OF_MEMORY;
1274 for (symp = sym_arr, sym = change->new_code.symbols;
1275 symp < sym_arr + size && sym < change->new_code.symbols_end;
1276 sym++, symp++)
1277 *symp = sym;
1279 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1281 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1282 r++) {
1283 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1284 compare_symbolp_labels);
1285 if (symp != NULL) {
1286 if ((*symp)->name == NULL)
1287 (*symp)->name = r->symbol->name;
1288 r->symbol = *symp;
1292 for (s = change->old_code.sections; s < change->old_code.sections_end;
1293 s++) {
1294 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1295 compare_symbolp_labels);
1296 if (symp != NULL) {
1297 if ((*symp)->name == NULL)
1298 (*symp)->name = s->symbol->name;
1299 s->symbol = *symp;
1303 vfree(sym_arr);
1304 return OK;
1308 * Initialize the ksplice_symbol structures in the given array using
1309 * the kallsyms and exported symbol tables.
1311 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1312 struct ksplice_symbol *start,
1313 struct ksplice_symbol *end)
1315 struct ksplice_symbol *sym, **sym_arr, **symp;
1316 struct ksplice_lookup lookup;
1317 size_t size = end - start;
1318 abort_t ret;
1320 if (size == 0)
1321 return OK;
1323 for (sym = start; sym < end; sym++) {
1324 if (strstarts(sym->label, "__ksymtab")) {
1325 const struct kernel_symbol *ksym;
1326 const char *colon = strchr(sym->label, ':');
1327 const char *name = colon + 1;
1328 if (colon == NULL)
1329 continue;
1330 ksym = find_symbol(name, NULL, NULL, true, false);
1331 if (ksym == NULL) {
1332 ksdebug(change, "Could not find kernel_symbol "
1333 "structure for %s\n", name);
1334 continue;
1336 sym->value = (unsigned long)ksym;
1337 sym->candidate_vals = NULL;
1338 continue;
1341 sym->candidate_vals = kmalloc(sizeof(*sym->candidate_vals),
1342 GFP_KERNEL);
1343 if (sym->candidate_vals == NULL)
1344 return OUT_OF_MEMORY;
1345 INIT_LIST_HEAD(sym->candidate_vals);
1346 sym->value = 0;
1349 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1350 if (sym_arr == NULL)
1351 return OUT_OF_MEMORY;
1353 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1354 sym++, symp++)
1355 *symp = sym;
1357 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1359 lookup.change = change;
1360 lookup.arr = sym_arr;
1361 lookup.size = size;
1362 lookup.ret = OK;
1364 each_symbol(add_export_values, &lookup);
1365 ret = lookup.ret;
1366 #ifdef CONFIG_KALLSYMS
1367 if (ret == OK)
1368 ret = (__force abort_t)
1369 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1370 #endif /* CONFIG_KALLSYMS */
1371 vfree(sym_arr);
1372 return ret;
1376 * Prepare the change's ksplice_symbol structures for run-pre matching
1378 * noinline to prevent garbage on the stack from confusing check_stack
1380 static noinline abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1382 abort_t ret;
1384 ret = uniquify_symbols(change);
1385 if (ret != OK)
1386 return ret;
1388 ret = init_symbol_array(change, change->old_code.symbols,
1389 change->old_code.symbols_end);
1390 if (ret != OK)
1391 return ret;
1393 ret = init_symbol_array(change, change->new_code.symbols,
1394 change->new_code.symbols_end);
1395 if (ret != OK)
1396 return ret;
1398 return OK;
1401 /* noinline to prevent garbage on the stack from confusing check_stack */
1402 static noinline abort_t prepare_change(struct ksplice_mod_change *change)
1404 abort_t ret;
1406 ksdebug(change, "Preparing and checking %s\n", change->name);
1407 ret = match_change_sections(change, false);
1408 if (ret == NO_MATCH) {
1409 /* It is possible that by using relocations from .data sections
1410 * we can successfully run-pre match the rest of the sections.
1411 * To avoid using any symbols obtained from .data sections
1412 * (which may be unreliable) in the post code, we first prepare
1413 * the post code and then try to run-pre match the remaining
1414 * sections with the help of .data sections.
1416 ksdebug(change, "Continuing without some sections; we might "
1417 "find them later.\n");
1418 ret = finalize_change(change);
1419 if (ret != OK) {
1420 ksdebug(change, "Aborted. Unable to continue without "
1421 "the unmatched sections.\n");
1422 return ret;
1425 ksdebug(change, "run-pre: Considering .data sections to find "
1426 "the unmatched sections\n");
1427 ret = match_change_sections(change, true);
1428 if (ret != OK)
1429 return ret;
1431 ksdebug(change, "run-pre: Found all previously unmatched "
1432 "sections\n");
1433 return OK;
1434 } else if (ret != OK) {
1435 return ret;
1438 return finalize_change(change);
1442 * Finish preparing the change for insertion into the kernel.
1443 * Afterwards, the replacement code should be ready to run and the
1444 * ksplice_patches should all be ready for trampoline insertion.
1446 static abort_t finalize_change(struct ksplice_mod_change *change)
1448 abort_t ret;
1449 ret = apply_relocs(change, change->new_code.relocs,
1450 change->new_code.relocs_end);
1451 if (ret != OK)
1452 return ret;
1454 ret = finalize_patches(change);
1455 if (ret != OK)
1456 return ret;
1458 return OK;
1461 static abort_t finalize_patches(struct ksplice_mod_change *change)
1463 struct ksplice_patch *p;
1464 struct safety_record *rec;
1465 abort_t ret;
1467 for (p = change->patches; p < change->patches_end; p++) {
1468 bool found = false;
1469 list_for_each_entry(rec, &change->safety_records, list) {
1470 if (rec->addr <= p->oldaddr &&
1471 p->oldaddr < rec->addr + rec->size) {
1472 found = true;
1473 break;
1476 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1477 const struct ksplice_reloc *r = patch_reloc(change, p);
1478 if (r == NULL) {
1479 ksdebug(change, "A patch with no reloc at its "
1480 "oldaddr has no safety record\n");
1481 return NO_MATCH;
1483 ksdebug(change, "No safety record for patch with "
1484 "oldaddr %s+%lx\n", r->symbol->label,
1485 r->target_addend);
1486 return NO_MATCH;
1489 if (p->type == KSPLICE_PATCH_TEXT) {
1490 ret = prepare_trampoline(change, p);
1491 if (ret != OK)
1492 return ret;
1495 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1496 ksdebug(change, "Safety record %s is too short for "
1497 "patch\n", rec->label);
1498 return UNEXPECTED;
1501 if (p->type == KSPLICE_PATCH_TEXT) {
1502 if (p->repladdr == 0)
1503 p->repladdr = (unsigned long)ksplice_deleted;
1506 return OK;
1509 /* noinline to prevent garbage on the stack from confusing check_stack */
1510 static noinline abort_t map_trampoline_pages(struct update *update)
1512 struct ksplice_mod_change *change;
1513 list_for_each_entry(change, &update->changes, list) {
1514 struct ksplice_patch *p;
1515 for (p = change->patches; p < change->patches_end; p++) {
1516 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1517 if (p->vaddr == NULL) {
1518 ksdebug(change,
1519 "Unable to map oldaddr read/write\n");
1520 unmap_trampoline_pages(update);
1521 return UNEXPECTED;
1525 return OK;
1528 static void unmap_trampoline_pages(struct update *update)
1530 struct ksplice_mod_change *change;
1531 list_for_each_entry(change, &update->changes, list) {
1532 struct ksplice_patch *p;
1533 for (p = change->patches; p < change->patches_end; p++) {
1534 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1535 p->vaddr = NULL;
1541 * map_writable creates a shadow page mapping of the range
1542 * [addr, addr + len) so that we can write to code mapped read-only.
1544 * It is similar to a generalized version of x86's text_poke. But
1545 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1546 * map_writable to map the pages before stop_machine, then use the
1547 * mapping inside stop_machine, and unmap the pages afterwards.
1549 static void *map_writable(void *addr, size_t len)
1551 void *vaddr;
1552 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1553 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1554 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1555 int i;
1557 if (pages == NULL)
1558 return NULL;
1560 for (i = 0; i < nr_pages; i++) {
1561 if (__module_address((unsigned long)page_addr) == NULL) {
1562 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1563 pages[i] = virt_to_page(page_addr);
1564 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1565 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1566 pages[i] =
1567 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1568 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1569 WARN_ON(!PageReserved(pages[i]));
1570 } else {
1571 pages[i] = vmalloc_to_page(addr);
1573 if (pages[i] == NULL) {
1574 kfree(pages);
1575 return NULL;
1577 page_addr += PAGE_SIZE;
1579 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1580 kfree(pages);
1581 if (vaddr == NULL)
1582 return NULL;
1583 return vaddr + offset_in_page(addr);
1587 * Ksplice adds a dependency on any symbol address used to resolve
1588 * relocations in the new_code module.
1590 * Be careful to follow_trampolines so that we always depend on the
1591 * latest version of the target function, since that's the code that
1592 * will run if we call addr.
1594 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1595 unsigned long addr)
1597 struct ksplice_mod_change *c;
1598 struct module *m =
1599 __module_text_address(follow_trampolines(change, addr));
1600 if (m == NULL)
1601 return OK;
1602 list_for_each_entry(c, &change->update->changes, list) {
1603 if (m == c->new_code_mod)
1604 return OK;
1606 if (use_module(change->new_code_mod, m) != 1)
1607 return MODULE_BUSY;
1608 return OK;
1611 static abort_t apply_relocs(struct ksplice_mod_change *change,
1612 const struct ksplice_reloc *relocs,
1613 const struct ksplice_reloc *relocs_end)
1615 const struct ksplice_reloc *r;
1616 for (r = relocs; r < relocs_end; r++) {
1617 abort_t ret = apply_reloc(change, r);
1618 if (ret != OK)
1619 return ret;
1621 return OK;
1624 static abort_t apply_reloc(struct ksplice_mod_change *change,
1625 const struct ksplice_reloc *r)
1627 switch (r->howto->type) {
1628 case KSPLICE_HOWTO_RELOC:
1629 case KSPLICE_HOWTO_RELOC_PATCH:
1630 return apply_howto_reloc(change, r);
1631 case KSPLICE_HOWTO_DATE:
1632 case KSPLICE_HOWTO_TIME:
1633 return apply_howto_date(change, r);
1634 default:
1635 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1636 return UNEXPECTED;
1641 * Applies a relocation. Aborts if the symbol referenced in it has
1642 * not been uniquely resolved.
1644 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1645 const struct ksplice_reloc *r)
1647 abort_t ret;
1648 int canary_ret;
1649 unsigned long sym_addr;
1650 LIST_HEAD(vals);
1652 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1653 if (canary_ret < 0)
1654 return UNEXPECTED;
1655 if (canary_ret == 0) {
1656 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1657 r->blank_addr, r->symbol->label, r->target_addend);
1658 return OK;
1661 #ifdef KSPLICE_STANDALONE
1662 if (!bootstrapped) {
1663 ret = add_system_map_candidates(change,
1664 change->new_code.system_map,
1665 change->new_code.system_map_end,
1666 r->symbol->label, &vals);
1667 if (ret != OK) {
1668 release_vals(&vals);
1669 return ret;
1672 #endif /* KSPLICE_STANDALONE */
1673 ret = lookup_symbol(change, r->symbol, &vals);
1674 if (ret != OK) {
1675 release_vals(&vals);
1676 return ret;
1679 * Relocations for the oldaddr fields of patches must have
1680 * been resolved via run-pre matching.
1682 if (!singular(&vals) || (r->symbol->candidate_vals != NULL &&
1683 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1684 release_vals(&vals);
1685 ksdebug(change, "Failed to find %s for reloc\n",
1686 r->symbol->label);
1687 return FAILED_TO_FIND;
1689 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1690 release_vals(&vals);
1692 ret = write_reloc_value(change, r, r->blank_addr,
1693 r->howto->pcrel ? sym_addr - r->blank_addr :
1694 sym_addr);
1695 if (ret != OK)
1696 return ret;
1698 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1699 r->symbol->label, r->target_addend, sym_addr);
1700 switch (r->howto->size) {
1701 case 1:
1702 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1703 break;
1704 case 2:
1705 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1706 break;
1707 case 4:
1708 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1709 break;
1710 #if BITS_PER_LONG >= 64
1711 case 8:
1712 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1713 break;
1714 #endif /* BITS_PER_LONG */
1715 default:
1716 ksdebug(change, "Aborted. Invalid relocation size.\n");
1717 return UNEXPECTED;
1719 #ifdef KSPLICE_STANDALONE
1720 if (!bootstrapped)
1721 return OK;
1722 #endif /* KSPLICE_STANDALONE */
1725 * Create labelvals so that we can verify our choices in the
1726 * second round of run-pre matching that considers data sections.
1728 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1729 if (ret != OK)
1730 return ret;
1732 return add_dependency_on_address(change, sym_addr);
1736 * Date relocations are created wherever __DATE__ or __TIME__ is used
1737 * in the kernel; we resolve them by simply copying in the date/time
1738 * obtained from run-pre matching the relevant compilation unit.
1740 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1741 const struct ksplice_reloc *r)
1743 if (r->symbol->candidate_vals != NULL) {
1744 ksdebug(change, "Failed to find %s for date\n",
1745 r->symbol->label);
1746 return FAILED_TO_FIND;
1748 memcpy((unsigned char *)r->blank_addr,
1749 (const unsigned char *)r->symbol->value, r->howto->size);
1750 return OK;
1754 * Given a relocation and its run address, compute the address of the
1755 * symbol the relocation referenced, and store it in *valp.
1757 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1758 const struct ksplice_reloc *r,
1759 unsigned long addr, unsigned long *valp)
1761 unsigned char bytes[sizeof(long)];
1762 unsigned long val;
1763 const struct ksplice_reloc_howto *howto = r->howto;
1765 if (howto->size <= 0 || howto->size > sizeof(long)) {
1766 ksdebug(change, "Aborted. Invalid relocation size.\n");
1767 return UNEXPECTED;
1770 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1771 return NO_MATCH;
1773 switch (howto->size) {
1774 case 1:
1775 val = *(uint8_t *)bytes;
1776 break;
1777 case 2:
1778 val = *(uint16_t *)bytes;
1779 break;
1780 case 4:
1781 val = *(uint32_t *)bytes;
1782 break;
1783 #if BITS_PER_LONG >= 64
1784 case 8:
1785 val = *(uint64_t *)bytes;
1786 break;
1787 #endif /* BITS_PER_LONG */
1788 default:
1789 ksdebug(change, "Aborted. Invalid relocation size.\n");
1790 return UNEXPECTED;
1793 val &= howto->dst_mask;
1794 if (howto->signed_addend)
1795 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1796 val <<= howto->rightshift;
1797 val -= r->insn_addend + r->target_addend;
1798 *valp = val;
1799 return OK;
1803 * Given a relocation, the address of its storage unit, and the
1804 * address of the symbol the relocation references, write the
1805 * relocation's final value into the storage unit.
1807 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1808 const struct ksplice_reloc *r,
1809 unsigned long addr, unsigned long sym_addr)
1811 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1812 const struct ksplice_reloc_howto *howto = r->howto;
1813 val >>= howto->rightshift;
1814 switch (howto->size) {
1815 case 1:
1816 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1817 (val & howto->dst_mask);
1818 break;
1819 case 2:
1820 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1821 (val & howto->dst_mask);
1822 break;
1823 case 4:
1824 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1825 (val & howto->dst_mask);
1826 break;
1827 #if BITS_PER_LONG >= 64
1828 case 8:
1829 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1830 (val & howto->dst_mask);
1831 break;
1832 #endif /* BITS_PER_LONG */
1833 default:
1834 ksdebug(change, "Aborted. Invalid relocation size.\n");
1835 return UNEXPECTED;
1838 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1839 ksdebug(change, "Aborted. Relocation overflow.\n");
1840 return UNEXPECTED;
1843 return OK;
1846 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1847 bool to_be_applied)
1849 struct ksplice_module_list_entry *entry =
1850 kmalloc(sizeof(*entry), GFP_KERNEL);
1851 if (entry == NULL)
1852 return OUT_OF_MEMORY;
1853 entry->new_code_mod_name =
1854 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1855 if (entry->new_code_mod_name == NULL) {
1856 kfree(entry);
1857 return OUT_OF_MEMORY;
1859 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1860 if (entry->target_mod_name == NULL) {
1861 kfree(entry->new_code_mod_name);
1862 kfree(entry);
1863 return OUT_OF_MEMORY;
1865 /* The update's kid is guaranteed to outlast the module_list_entry */
1866 entry->kid = change->update->kid;
1867 entry->applied = to_be_applied;
1868 list_add(&entry->update_list, &change->update->ksplice_module_list);
1869 return OK;
1872 static void cleanup_module_list_entries(struct update *update)
1874 struct ksplice_module_list_entry *entry;
1875 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1876 kfree(entry->target_mod_name);
1877 kfree(entry->new_code_mod_name);
1879 clear_list(&update->ksplice_module_list,
1880 struct ksplice_module_list_entry, update_list);
1883 /* Replacement address used for functions deleted by the patch */
1884 static void __attribute__((noreturn)) ksplice_deleted(void)
1886 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1887 BUG();
1888 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1889 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1890 for (;;);
1891 #endif
1894 /* Floodfill to run-pre match the sections within a change. */
1895 static abort_t match_change_sections(struct ksplice_mod_change *change,
1896 bool consider_data_sections)
1898 struct ksplice_section *sect;
1899 abort_t ret;
1900 int remaining = 0;
1901 bool progress;
1903 for (sect = change->old_code.sections;
1904 sect < change->old_code.sections_end; sect++) {
1905 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1906 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1907 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1908 remaining++;
1911 while (remaining > 0) {
1912 progress = false;
1913 for (sect = change->old_code.sections;
1914 sect < change->old_code.sections_end; sect++) {
1915 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1916 continue;
1917 if ((!consider_data_sections &&
1918 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1919 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1920 continue;
1921 ret = find_section(change, sect);
1922 if (ret == OK) {
1923 sect->flags |= KSPLICE_SECTION_MATCHED;
1924 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1925 remaining--;
1926 progress = true;
1927 } else if (ret != NO_MATCH) {
1928 return ret;
1932 if (progress)
1933 continue;
1935 for (sect = change->old_code.sections;
1936 sect < change->old_code.sections_end; sect++) {
1937 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1938 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1939 continue;
1940 ksdebug(change, "run-pre: could not match %s "
1941 "section %s\n",
1942 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1943 "data" :
1944 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1945 "rodata" : "text", sect->symbol->label);
1947 ksdebug(change, "Aborted. run-pre: could not match some "
1948 "sections.\n");
1949 return NO_MATCH;
1951 return OK;
1955 * Search for the section in the running kernel. Returns OK if and
1956 * only if it finds precisely one address in the kernel matching the
1957 * section.
1959 static abort_t find_section(struct ksplice_mod_change *change,
1960 struct ksplice_section *sect)
1962 int i;
1963 abort_t ret;
1964 unsigned long run_addr;
1965 LIST_HEAD(vals);
1966 struct candidate_val *v, *n;
1968 #ifdef KSPLICE_STANDALONE
1969 ret = add_system_map_candidates(change, change->old_code.system_map,
1970 change->old_code.system_map_end,
1971 sect->symbol->label, &vals);
1972 if (ret != OK) {
1973 release_vals(&vals);
1974 return ret;
1976 #endif /* KSPLICE_STANDALONE */
1977 ret = lookup_symbol(change, sect->symbol, &vals);
1978 if (ret != OK) {
1979 release_vals(&vals);
1980 return ret;
1983 ksdebug(change, "run-pre: starting sect search for %s\n",
1984 sect->symbol->label);
1986 list_for_each_entry_safe(v, n, &vals, list) {
1987 run_addr = v->val;
1989 yield();
1990 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
1991 if (ret == NO_MATCH) {
1992 list_del(&v->list);
1993 kfree(v);
1994 } else if (ret != OK) {
1995 release_vals(&vals);
1996 return ret;
2000 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2001 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2002 ret = brute_search_all(change, sect, &vals);
2003 if (ret != OK) {
2004 release_vals(&vals);
2005 return ret;
2008 * Make sure run-pre matching output is displayed if
2009 * brute_search succeeds.
2011 if (singular(&vals)) {
2012 run_addr = list_entry(vals.next, struct candidate_val,
2013 list)->val;
2014 ret = try_addr(change, sect, run_addr, NULL,
2015 RUN_PRE_INITIAL);
2016 if (ret != OK) {
2017 ksdebug(change, "run-pre: Debug run failed for "
2018 "sect %s:\n", sect->symbol->label);
2019 release_vals(&vals);
2020 return ret;
2024 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2026 if (singular(&vals)) {
2027 LIST_HEAD(safety_records);
2028 run_addr = list_entry(vals.next, struct candidate_val,
2029 list)->val;
2030 ret = try_addr(change, sect, run_addr, &safety_records,
2031 RUN_PRE_FINAL);
2032 release_vals(&vals);
2033 if (ret != OK) {
2034 clear_list(&safety_records, struct safety_record, list);
2035 ksdebug(change, "run-pre: Final run failed for sect "
2036 "%s:\n", sect->symbol->label);
2037 } else {
2038 list_splice(&safety_records, &change->safety_records);
2040 return ret;
2041 } else if (!list_empty(&vals)) {
2042 struct candidate_val *val;
2043 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
2044 sect->symbol->label);
2045 i = 0;
2046 list_for_each_entry(val, &vals, list) {
2047 i++;
2048 ksdebug(change, "%lx\n", val->val);
2049 if (i > 5) {
2050 ksdebug(change, "...\n");
2051 break;
2054 release_vals(&vals);
2055 return NO_MATCH;
2057 release_vals(&vals);
2058 return NO_MATCH;
2062 * try_addr is the the interface to run-pre matching. Its primary
2063 * purpose is to manage debugging information for run-pre matching;
2064 * all the hard work is in run_pre_cmp.
2066 static abort_t try_addr(struct ksplice_mod_change *change,
2067 struct ksplice_section *sect,
2068 unsigned long run_addr,
2069 struct list_head *safety_records,
2070 enum run_pre_mode mode)
2072 abort_t ret;
2073 const struct module *run_module = __module_address(run_addr);
2075 if (run_module == change->new_code_mod) {
2076 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2077 "module %s for sect %s\n", run_addr, run_module->name,
2078 sect->symbol->label);
2079 return UNEXPECTED;
2081 if (!patches_module(run_module, change->target)) {
2082 ksdebug(change, "run-pre: ignoring address %lx in other module "
2083 "%s for sect %s\n", run_addr, run_module == NULL ?
2084 "vmlinux" : run_module->name, sect->symbol->label);
2085 return NO_MATCH;
2088 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2089 if (ret != OK)
2090 return ret;
2092 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2093 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2094 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2095 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2096 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2097 mode);
2098 else
2099 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2100 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2101 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2102 set_temp_labelvals(change, NOVAL);
2103 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2104 "p_a=%lx s=%lx)\n",
2105 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2106 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2107 "text", sect->symbol->label, run_addr, sect->address,
2108 sect->size);
2109 ksdebug(change, "run-pre: ");
2110 if (change->update->debug >= 1) {
2111 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2112 ret = run_pre_cmp(change, sect, run_addr,
2113 safety_records, RUN_PRE_DEBUG);
2114 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2115 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2116 ret = arch_run_pre_cmp(change, sect, run_addr,
2117 safety_records,
2118 RUN_PRE_DEBUG);
2119 else
2120 ret = run_pre_cmp(change, sect, run_addr,
2121 safety_records,
2122 RUN_PRE_DEBUG);
2123 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2124 set_temp_labelvals(change, NOVAL);
2126 ksdebug(change, "\n");
2127 return ret;
2128 } else if (ret != OK) {
2129 set_temp_labelvals(change, NOVAL);
2130 return ret;
2133 if (mode != RUN_PRE_FINAL) {
2134 set_temp_labelvals(change, NOVAL);
2135 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2136 sect->symbol->label, run_addr);
2137 return OK;
2140 set_temp_labelvals(change, VAL);
2141 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2142 run_addr);
2143 return OK;
2147 * run_pre_cmp is the primary run-pre matching function; it determines
2148 * whether the given ksplice_section matches the code or data in the
2149 * running kernel starting at run_addr.
2151 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2152 * section is created.
2154 * The run_pre_mode is also used to determine what debugging
2155 * information to display.
2157 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2158 const struct ksplice_section *sect,
2159 unsigned long run_addr,
2160 struct list_head *safety_records,
2161 enum run_pre_mode mode)
2163 int matched = 0;
2164 abort_t ret;
2165 const struct ksplice_reloc *r, *finger;
2166 const unsigned char *pre, *run, *pre_start, *run_start;
2167 unsigned char runval;
2169 pre_start = (const unsigned char *)sect->address;
2170 run_start = (const unsigned char *)run_addr;
2172 finger = init_reloc_search(change, sect);
2174 pre = pre_start;
2175 run = run_start;
2176 while (pre < pre_start + sect->size) {
2177 unsigned long offset = pre - pre_start;
2178 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2179 if (ret == OK) {
2180 ret = handle_reloc(change, sect, r, (unsigned long)run,
2181 mode);
2182 if (ret != OK) {
2183 if (mode == RUN_PRE_INITIAL)
2184 ksdebug(change, "reloc in sect does "
2185 "not match after %lx/%lx "
2186 "bytes\n", offset, sect->size);
2187 return ret;
2189 if (mode == RUN_PRE_DEBUG)
2190 print_bytes(change, run, r->howto->size, pre,
2191 r->howto->size);
2192 pre += r->howto->size;
2193 run += r->howto->size;
2194 finger++;
2195 continue;
2196 } else if (ret != NO_MATCH) {
2197 return ret;
2200 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2201 ret = handle_paravirt(change, (unsigned long)pre,
2202 (unsigned long)run, &matched);
2203 if (ret != OK)
2204 return ret;
2205 if (matched != 0) {
2206 if (mode == RUN_PRE_DEBUG)
2207 print_bytes(change, run, matched, pre,
2208 matched);
2209 pre += matched;
2210 run += matched;
2211 continue;
2215 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2216 if (mode == RUN_PRE_INITIAL)
2217 ksdebug(change, "sect unmapped after %lx/%lx "
2218 "bytes\n", offset, sect->size);
2219 return NO_MATCH;
2222 if (runval != *pre &&
2223 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2224 if (mode == RUN_PRE_INITIAL)
2225 ksdebug(change, "sect does not match after "
2226 "%lx/%lx bytes\n", offset, sect->size);
2227 if (mode == RUN_PRE_DEBUG) {
2228 print_bytes(change, run, 1, pre, 1);
2229 ksdebug(change, "[p_o=%lx] ! ", offset);
2230 print_bytes(change, run + 1, 2, pre + 1, 2);
2232 return NO_MATCH;
2234 if (mode == RUN_PRE_DEBUG)
2235 print_bytes(change, run, 1, pre, 1);
2236 pre++;
2237 run++;
2239 return create_safety_record(change, sect, safety_records, run_addr,
2240 run - run_start);
2243 static void print_bytes(struct ksplice_mod_change *change,
2244 const unsigned char *run, int runc,
2245 const unsigned char *pre, int prec)
2247 int o;
2248 int matched = min(runc, prec);
2249 for (o = 0; o < matched; o++) {
2250 if (run[o] == pre[o])
2251 ksdebug(change, "%02x ", run[o]);
2252 else
2253 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2255 for (o = matched; o < runc; o++)
2256 ksdebug(change, "%02x/ ", run[o]);
2257 for (o = matched; o < prec; o++)
2258 ksdebug(change, "/%02x ", pre[o]);
2261 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2262 static abort_t brute_search(struct ksplice_mod_change *change,
2263 struct ksplice_section *sect,
2264 const void *start, unsigned long len,
2265 struct list_head *vals)
2267 unsigned long addr;
2268 char run, pre;
2269 abort_t ret;
2271 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2272 addr++) {
2273 if (addr % 100000 == 0)
2274 yield();
2276 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2277 return OK;
2279 pre = *(const unsigned char *)(sect->address);
2281 if (run != pre)
2282 continue;
2284 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2285 if (ret == OK) {
2286 ret = add_candidate_val(change, vals, addr);
2287 if (ret != OK)
2288 return ret;
2289 } else if (ret != NO_MATCH) {
2290 return ret;
2294 return OK;
2297 extern struct list_head modules;
2299 static abort_t brute_search_all(struct ksplice_mod_change *change,
2300 struct ksplice_section *sect,
2301 struct list_head *vals)
2303 struct module *m;
2304 abort_t ret = OK;
2305 int saved_debug;
2307 ksdebug(change, "brute_search: searching for %s\n",
2308 sect->symbol->label);
2309 saved_debug = change->update->debug;
2310 change->update->debug = 0;
2312 list_for_each_entry(m, &modules, list) {
2313 if (!patches_module(m, change->target) ||
2314 m == change->new_code_mod)
2315 continue;
2316 ret = brute_search(change, sect, m->module_core, m->core_size,
2317 vals);
2318 if (ret != OK)
2319 goto out;
2320 ret = brute_search(change, sect, m->module_init, m->init_size,
2321 vals);
2322 if (ret != OK)
2323 goto out;
2326 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2327 init_mm.end_code - init_mm.start_code, vals);
2329 out:
2330 change->update->debug = saved_debug;
2331 return ret;
2333 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2335 struct range {
2336 unsigned long address;
2337 unsigned long size;
2340 static int reloc_bsearch_compare(const void *key, const void *elt)
2342 const struct range *range = key;
2343 const struct ksplice_reloc *r = elt;
2344 if (range->address + range->size <= r->blank_addr)
2345 return -1;
2346 if (range->address > r->blank_addr)
2347 return 1;
2348 return 0;
2351 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2352 const struct ksplice_reloc *end,
2353 unsigned long address,
2354 unsigned long size)
2356 const struct ksplice_reloc *r;
2357 struct range range = { address, size };
2358 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2359 reloc_bsearch_compare);
2360 if (r == NULL)
2361 return NULL;
2362 while (r > start && (r - 1)->blank_addr >= address)
2363 r--;
2364 return r;
2367 static const struct ksplice_reloc *
2368 init_reloc_search(struct ksplice_mod_change *change,
2369 const struct ksplice_section *sect)
2371 const struct ksplice_reloc *r;
2372 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2373 sect->address, sect->size);
2374 if (r == NULL)
2375 return change->old_code.relocs_end;
2376 return r;
2380 * lookup_reloc implements an amortized O(1) lookup for the next
2381 * old_code relocation. It must be called with a strictly increasing
2382 * sequence of addresses.
2384 * The fingerp is private data for lookup_reloc, and needs to have
2385 * been initialized as a pointer to the result of find_reloc (or
2386 * init_reloc_search).
2388 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2389 const struct ksplice_reloc **fingerp,
2390 unsigned long addr,
2391 const struct ksplice_reloc **relocp)
2393 const struct ksplice_reloc *r = *fingerp;
2394 int canary_ret;
2396 while (r < change->old_code.relocs_end &&
2397 addr >= r->blank_addr + r->howto->size &&
2398 !(addr == r->blank_addr && r->howto->size == 0))
2399 r++;
2400 *fingerp = r;
2401 if (r == change->old_code.relocs_end)
2402 return NO_MATCH;
2403 if (addr < r->blank_addr)
2404 return NO_MATCH;
2405 *relocp = r;
2406 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2407 return OK;
2409 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2410 if (canary_ret < 0)
2411 return UNEXPECTED;
2412 if (canary_ret == 0) {
2413 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2414 "(altinstr)\n", r->blank_addr, r->symbol->label,
2415 r->target_addend);
2416 return NO_MATCH;
2418 if (addr != r->blank_addr) {
2419 ksdebug(change, "Invalid nonzero relocation offset\n");
2420 return UNEXPECTED;
2422 return OK;
2425 static abort_t handle_reloc(struct ksplice_mod_change *change,
2426 const struct ksplice_section *sect,
2427 const struct ksplice_reloc *r,
2428 unsigned long run_addr, enum run_pre_mode mode)
2430 switch (r->howto->type) {
2431 case KSPLICE_HOWTO_RELOC:
2432 return handle_howto_reloc(change, sect, r, run_addr, mode);
2433 case KSPLICE_HOWTO_DATE:
2434 case KSPLICE_HOWTO_TIME:
2435 return handle_howto_date(change, sect, r, run_addr, mode);
2436 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2437 #ifdef CONFIG_BUG
2438 case KSPLICE_HOWTO_BUG:
2439 return handle_bug(change, r, run_addr);
2440 #endif /* CONFIG_BUG */
2441 #else /* LINUX_VERSION_CODE < */
2442 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2443 #endif /* LINUX_VERSION_CODE */
2444 case KSPLICE_HOWTO_EXTABLE:
2445 return handle_extable(change, r, run_addr);
2446 default:
2447 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2448 return UNEXPECTED;
2453 * For date/time relocations, we check that the sequence of bytes
2454 * matches the format of a date or time.
2456 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2457 const struct ksplice_section *sect,
2458 const struct ksplice_reloc *r,
2459 unsigned long run_addr, enum run_pre_mode mode)
2461 abort_t ret;
2462 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2464 if (buf == NULL)
2465 return OUT_OF_MEMORY;
2466 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2467 ret = NO_MATCH;
2468 goto out;
2471 switch (r->howto->type) {
2472 case KSPLICE_HOWTO_TIME:
2473 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2474 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2475 isdigit(buf[6]) && isdigit(buf[7]))
2476 ret = OK;
2477 else
2478 ret = NO_MATCH;
2479 break;
2480 case KSPLICE_HOWTO_DATE:
2481 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2482 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2483 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2484 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2485 ret = OK;
2486 else
2487 ret = NO_MATCH;
2488 break;
2489 default:
2490 ret = UNEXPECTED;
2492 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2493 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2494 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2495 r->howto->size, buf);
2497 if (ret != OK)
2498 goto out;
2499 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2500 out:
2501 kfree(buf);
2502 return ret;
2506 * Extract the value of a symbol used in a relocation in the pre code
2507 * during run-pre matching, giving an error if it conflicts with a
2508 * previously found value of that symbol
2510 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2511 const struct ksplice_section *sect,
2512 const struct ksplice_reloc *r,
2513 unsigned long run_addr,
2514 enum run_pre_mode mode)
2516 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2517 unsigned long offset = r->target_addend;
2518 unsigned long val;
2519 abort_t ret;
2521 ret = read_reloc_value(change, r, run_addr, &val);
2522 if (ret != OK)
2523 return ret;
2524 if (r->howto->pcrel)
2525 val += run_addr;
2527 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
2528 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2530 } else if (offset < 0 || offset >= sym_sect->size) {
2531 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2532 sect->symbol->label, r->blank_addr - sect->address,
2533 r->symbol->label, offset);
2534 return NO_MATCH;
2535 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2536 sym_sect->match_map[offset] =
2537 (const unsigned char *)r->symbol->value + offset;
2538 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2539 r->symbol->value + offset) {
2541 } else if (sect == sym_sect) {
2542 ksdebug(change, "Relocations to nonmatching locations within "
2543 "section %s: %lx does not match %lx\n",
2544 sect->symbol->label, offset,
2545 (unsigned long)sect->match_map[offset] -
2546 r->symbol->value);
2547 return NO_MATCH;
2548 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2549 if (mode == RUN_PRE_INITIAL)
2550 ksdebug(change, "Delaying matching of %s due to reloc "
2551 "from to unmatching section: %s+%lx\n",
2552 sect->symbol->label, r->symbol->label, offset);
2553 return NO_MATCH;
2554 } else if (sym_sect->match_map[offset] == NULL) {
2555 if (mode == RUN_PRE_INITIAL)
2556 ksdebug(change, "Relocation not to instruction "
2557 "boundary: %s+%lx -> %s+%lx",
2558 sect->symbol->label, r->blank_addr -
2559 sect->address, r->symbol->label, offset);
2560 return NO_MATCH;
2561 } else if ((unsigned long)sym_sect->match_map[offset] !=
2562 r->symbol->value + offset) {
2563 if (mode == RUN_PRE_INITIAL)
2564 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2565 r->symbol->label, offset,
2566 r->symbol->value + offset,
2567 (unsigned long)sym_sect->match_map[offset]);
2568 val += r->symbol->value + offset -
2569 (unsigned long)sym_sect->match_map[offset];
2571 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
2573 if (mode == RUN_PRE_INITIAL)
2574 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2575 "found %s = %lx\n", run_addr, r->blank_addr,
2576 r->symbol->label, offset, r->symbol->label, val);
2578 if (contains_canary(change, run_addr, r->howto) != 0) {
2579 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2580 "\n", run_addr);
2581 return UNEXPECTED;
2584 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2585 sect->symbol == r->symbol)
2586 return OK;
2587 ret = create_labelval(change, r->symbol, val, TEMP);
2588 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2589 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2590 "%s = %lx does not match expected %lx\n", run_addr,
2591 r->blank_addr, r->symbol->label, r->symbol->value, val);
2593 if (ret != OK)
2594 return ret;
2595 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2596 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2597 if (mode == RUN_PRE_INITIAL)
2598 ksdebug(change, "Recursively comparing string section "
2599 "%s\n", sym_sect->symbol->label);
2600 else if (mode == RUN_PRE_DEBUG)
2601 ksdebug(change, "[str start] ");
2602 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2603 if (mode == RUN_PRE_DEBUG)
2604 ksdebug(change, "[str end] ");
2605 if (ret == OK && mode == RUN_PRE_INITIAL)
2606 ksdebug(change, "Successfully matched string section %s"
2607 "\n", sym_sect->symbol->label);
2608 else if (mode == RUN_PRE_INITIAL)
2609 ksdebug(change, "Failed to match string section %s\n",
2610 sym_sect->symbol->label);
2612 return ret;
2615 #ifdef CONFIG_GENERIC_BUG
2616 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2617 EXTRACT_SYMBOL(find_bug);
2618 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2619 static abort_t handle_bug(struct ksplice_mod_change *change,
2620 const struct ksplice_reloc *r, unsigned long run_addr)
2622 const struct bug_entry *run_bug = find_bug(run_addr);
2623 struct ksplice_section *bug_sect = symbol_section(change, r->symbol);
2624 if (run_bug == NULL)
2625 return NO_MATCH;
2626 if (bug_sect == NULL)
2627 return UNEXPECTED;
2628 return create_labelval(change, bug_sect->symbol, (unsigned long)run_bug,
2629 TEMP);
2631 #endif /* CONFIG_GENERIC_BUG */
2633 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2634 EXTRACT_SYMBOL(search_exception_tables);
2635 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2637 static abort_t handle_extable(struct ksplice_mod_change *change,
2638 const struct ksplice_reloc *r,
2639 unsigned long run_addr)
2641 const struct exception_table_entry *run_ent =
2642 search_exception_tables(run_addr);
2643 struct ksplice_section *ex_sect = symbol_section(change, r->symbol);
2644 if (run_ent == NULL)
2645 return NO_MATCH;
2646 if (ex_sect == NULL)
2647 return UNEXPECTED;
2648 return create_labelval(change, ex_sect->symbol, (unsigned long)run_ent,
2649 TEMP);
2652 static int symbol_section_bsearch_compare(const void *a, const void *b)
2654 const struct ksplice_symbol *sym = a;
2655 const struct ksplice_section *sect = b;
2656 return strcmp(sym->label, sect->symbol->label);
2659 static int compare_section_labels(const void *va, const void *vb)
2661 const struct ksplice_section *a = va, *b = vb;
2662 return strcmp(a->symbol->label, b->symbol->label);
2665 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2666 const struct ksplice_symbol *sym)
2668 return bsearch(sym, change->old_code.sections,
2669 change->old_code.sections_end -
2670 change->old_code.sections,
2671 sizeof(struct ksplice_section),
2672 symbol_section_bsearch_compare);
2675 /* Find the relocation for the oldaddr of a ksplice_patch */
2676 static const struct ksplice_reloc *
2677 patch_reloc(struct ksplice_mod_change *change,
2678 const struct ksplice_patch *p)
2680 unsigned long addr = (unsigned long)&p->oldaddr;
2681 const struct ksplice_reloc *r =
2682 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2683 addr, sizeof(addr));
2684 if (r == NULL || r->blank_addr < addr ||
2685 r->blank_addr >= addr + sizeof(addr))
2686 return NULL;
2687 return r;
2691 * Populates vals with the possible values for ksym from the various
2692 * sources Ksplice uses to resolve symbols
2694 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2695 const struct ksplice_symbol *ksym,
2696 struct list_head *vals)
2698 abort_t ret;
2700 #ifdef KSPLICE_STANDALONE
2701 if (!bootstrapped)
2702 return OK;
2703 #endif /* KSPLICE_STANDALONE */
2705 if (ksym->candidate_vals == NULL) {
2706 release_vals(vals);
2707 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2708 ksym->value);
2709 return add_candidate_val(change, vals, ksym->value);
2712 #ifdef CONFIG_MODULE_UNLOAD
2713 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2714 && change->target->exit != NULL) {
2715 ret = add_candidate_val(change, vals,
2716 (unsigned long)change->target->exit);
2717 if (ret != OK)
2718 return ret;
2720 #endif
2722 if (ksym->name != NULL) {
2723 struct candidate_val *val;
2724 list_for_each_entry(val, ksym->candidate_vals, list) {
2725 ret = add_candidate_val(change, vals, val->val);
2726 if (ret != OK)
2727 return ret;
2730 ret = new_export_lookup(change, ksym->name, vals);
2731 if (ret != OK)
2732 return ret;
2735 return OK;
2738 #ifdef KSPLICE_STANDALONE
2739 static abort_t
2740 add_system_map_candidates(struct ksplice_mod_change *change,
2741 const struct ksplice_system_map *start,
2742 const struct ksplice_system_map *end,
2743 const char *label, struct list_head *vals)
2745 abort_t ret;
2746 long off;
2747 int i;
2748 const struct ksplice_system_map *smap;
2750 /* Some Fedora kernel releases have System.map files whose symbol
2751 * addresses disagree with the running kernel by a constant address
2752 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2753 * values used to compile these kernels. This constant address offset
2754 * is always a multiple of 0x100000.
2756 * If we observe an offset that is NOT a multiple of 0x100000, then the
2757 * user provided us with an incorrect System.map file, and we should
2758 * abort.
2759 * If we observe an offset that is a multiple of 0x100000, then we can
2760 * adjust the System.map address values accordingly and proceed.
2762 off = (unsigned long)printk - change->map_printk;
2763 if (off & 0xfffff) {
2764 ksdebug(change,
2765 "Aborted. System.map does not match kernel.\n");
2766 return BAD_SYSTEM_MAP;
2769 smap = bsearch(label, start, end - start, sizeof(*smap),
2770 system_map_bsearch_compare);
2771 if (smap == NULL)
2772 return OK;
2774 for (i = 0; i < smap->nr_candidates; i++) {
2775 ret = add_candidate_val(change, vals,
2776 smap->candidates[i] + off);
2777 if (ret != OK)
2778 return ret;
2780 return OK;
2783 static int system_map_bsearch_compare(const void *key, const void *elt)
2785 const struct ksplice_system_map *map = elt;
2786 const char *label = key;
2787 return strcmp(label, map->label);
2789 #endif /* !KSPLICE_STANDALONE */
2792 * An update could one module to export a symbol and at the same time
2793 * change another module to use that symbol. This violates the normal
2794 * situation where the changes can be handled independently.
2796 * new_export_lookup obtains symbol values from the changes to the
2797 * exported symbol table made by other changes.
2799 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2800 const char *name, struct list_head *vals)
2802 struct ksplice_mod_change *change;
2803 struct ksplice_patch *p;
2804 list_for_each_entry(change, &ichange->update->changes, list) {
2805 for (p = change->patches; p < change->patches_end; p++) {
2806 const struct kernel_symbol *sym;
2807 const struct ksplice_reloc *r;
2808 if (p->type != KSPLICE_PATCH_EXPORT ||
2809 strcmp(name, *(const char **)p->contents) != 0)
2810 continue;
2812 /* Check that the p->oldaddr reloc has been resolved. */
2813 r = patch_reloc(change, p);
2814 if (r == NULL ||
2815 contains_canary(change, r->blank_addr,
2816 r->howto) != 0)
2817 continue;
2818 sym = (const struct kernel_symbol *)r->symbol->value;
2821 * Check that the sym->value reloc has been resolved,
2822 * if there is a Ksplice relocation there.
2824 r = find_reloc(change->new_code.relocs,
2825 change->new_code.relocs_end,
2826 (unsigned long)&sym->value,
2827 sizeof(&sym->value));
2828 if (r != NULL &&
2829 r->blank_addr == (unsigned long)&sym->value &&
2830 contains_canary(change, r->blank_addr,
2831 r->howto) != 0)
2832 continue;
2833 return add_candidate_val(ichange, vals, sym->value);
2836 return OK;
2840 * When patch_action is called, the update should be fully prepared.
2841 * patch_action will try to actually insert or remove trampolines for
2842 * the update.
2844 static abort_t patch_action(struct update *update, enum ksplice_action action)
2846 static int (*const __patch_actions[KS_ACTIONS])(void *) = {
2847 [KS_APPLY] = __apply_patches,
2848 [KS_REVERSE] = __reverse_patches,
2850 int i;
2851 abort_t ret;
2852 struct ksplice_mod_change *change;
2854 ret = map_trampoline_pages(update);
2855 if (ret != OK)
2856 return ret;
2858 list_for_each_entry(change, &update->changes, list) {
2859 const typeof(int (*)(void)) *f;
2860 for (f = change->hooks[action].pre;
2861 f < change->hooks[action].pre_end; f++) {
2862 if ((*f)() != 0) {
2863 ret = CALL_FAILED;
2864 goto out;
2869 for (i = 0; i < 5; i++) {
2870 cleanup_conflicts(update);
2871 #ifdef KSPLICE_STANDALONE
2872 bust_spinlocks(1);
2873 #endif /* KSPLICE_STANDALONE */
2874 ret = (__force abort_t)stop_machine(__patch_actions[action],
2875 update, NULL);
2876 #ifdef KSPLICE_STANDALONE
2877 bust_spinlocks(0);
2878 #endif /* KSPLICE_STANDALONE */
2879 if (ret != CODE_BUSY)
2880 break;
2881 set_current_state(TASK_INTERRUPTIBLE);
2882 schedule_timeout(msecs_to_jiffies(1000));
2884 out:
2885 unmap_trampoline_pages(update);
2887 if (ret == CODE_BUSY) {
2888 print_conflicts(update);
2889 _ksdebug(update, "Aborted %s. stack check: to-be-%s "
2890 "code is busy.\n", update->kid,
2891 action == KS_APPLY ? "replaced" : "reversed");
2892 } else if (ret == ALREADY_REVERSED) {
2893 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2894 "reversed.\n", update->kid, update->kid);
2895 } else if (ret == MODULE_BUSY) {
2896 _ksdebug(update, "Update %s is in use by another module\n",
2897 update->kid);
2900 if (ret != OK) {
2901 list_for_each_entry(change, &update->changes, list) {
2902 const typeof(void (*)(void)) *f;
2903 for (f = change->hooks[action].fail;
2904 f < change->hooks[action].fail_end; f++)
2905 (*f)();
2908 return ret;
2911 list_for_each_entry(change, &update->changes, list) {
2912 const typeof(void (*)(void)) *f;
2913 for (f = change->hooks[action].post;
2914 f < change->hooks[action].post_end; f++)
2915 (*f)();
2918 _ksdebug(update, "Atomic patch %s for %s complete\n",
2919 action == KS_APPLY ? "insertion" : "removal", update->kid);
2920 return OK;
2923 /* Atomically insert the update; run from within stop_machine */
2924 static int __apply_patches(void *updateptr)
2926 struct update *update = updateptr;
2927 struct ksplice_mod_change *change;
2928 struct ksplice_module_list_entry *entry;
2929 struct ksplice_patch *p;
2930 abort_t ret;
2932 if (update->stage == STAGE_APPLIED)
2933 return (__force int)OK;
2935 if (update->stage != STAGE_PREPARING)
2936 return (__force int)UNEXPECTED;
2938 ret = check_each_task(update);
2939 if (ret != OK)
2940 return (__force int)ret;
2942 list_for_each_entry(change, &update->changes, list) {
2943 if (try_module_get(change->new_code_mod) != 1) {
2944 struct ksplice_mod_change *change1;
2945 list_for_each_entry(change1, &update->changes, list) {
2946 if (change1 == change)
2947 break;
2948 module_put(change1->new_code_mod);
2950 module_put(THIS_MODULE);
2951 return (__force int)UNEXPECTED;
2955 list_for_each_entry(change, &update->changes, list) {
2956 const typeof(int (*)(void)) *f;
2957 for (f = change->hooks[KS_APPLY].check;
2958 f < change->hooks[KS_APPLY].check_end; f++) {
2959 if ((*f)() != 0)
2960 return (__force int)CALL_FAILED;
2964 /* Commit point: the update application will succeed. */
2966 update->stage = STAGE_APPLIED;
2967 #ifdef TAINT_KSPLICE
2968 add_taint(TAINT_KSPLICE);
2969 #endif
2971 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2972 list_add(&entry->list, &ksplice_modules);
2974 list_for_each_entry(change, &update->changes, list) {
2975 for (p = change->patches; p < change->patches_end; p++)
2976 insert_trampoline(p);
2979 list_for_each_entry(change, &update->changes, list) {
2980 const typeof(void (*)(void)) *f;
2981 for (f = change->hooks[KS_APPLY].intra;
2982 f < change->hooks[KS_APPLY].intra_end; f++)
2983 (*f)();
2986 return (__force int)OK;
2989 /* Atomically remove the update; run from within stop_machine */
2990 static int __reverse_patches(void *updateptr)
2992 struct update *update = updateptr;
2993 struct ksplice_mod_change *change;
2994 struct ksplice_module_list_entry *entry;
2995 const struct ksplice_patch *p;
2996 abort_t ret;
2998 if (update->stage != STAGE_APPLIED)
2999 return (__force int)OK;
3001 #ifdef CONFIG_MODULE_UNLOAD
3002 list_for_each_entry(change, &update->changes, list) {
3003 if (module_refcount(change->new_code_mod) != 1)
3004 return (__force int)MODULE_BUSY;
3006 #endif /* CONFIG_MODULE_UNLOAD */
3008 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
3009 if (!entry->applied &&
3010 find_module(entry->target_mod_name) != NULL)
3011 return COLD_UPDATE_LOADED;
3014 ret = check_each_task(update);
3015 if (ret != OK)
3016 return (__force int)ret;
3018 list_for_each_entry(change, &update->changes, list) {
3019 for (p = change->patches; p < change->patches_end; p++) {
3020 ret = verify_trampoline(change, p);
3021 if (ret != OK)
3022 return (__force int)ret;
3026 list_for_each_entry(change, &update->changes, list) {
3027 const typeof(int (*)(void)) *f;
3028 for (f = change->hooks[KS_REVERSE].check;
3029 f < change->hooks[KS_REVERSE].check_end; f++) {
3030 if ((*f)() != 0)
3031 return (__force int)CALL_FAILED;
3035 /* Commit point: the update reversal will succeed. */
3037 update->stage = STAGE_REVERSED;
3039 list_for_each_entry(change, &update->changes, list)
3040 module_put(change->new_code_mod);
3042 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3043 list_del(&entry->list);
3045 list_for_each_entry(change, &update->changes, list) {
3046 const typeof(void (*)(void)) *f;
3047 for (f = change->hooks[KS_REVERSE].intra;
3048 f < change->hooks[KS_REVERSE].intra_end; f++)
3049 (*f)();
3052 list_for_each_entry(change, &update->changes, list) {
3053 for (p = change->patches; p < change->patches_end; p++)
3054 remove_trampoline(p);
3057 return (__force int)OK;
3061 * Check whether any thread's instruction pointer or any address of
3062 * its stack is contained in one of the safety_records associated with
3063 * the update.
3065 * check_each_task must be called from inside stop_machine, because it
3066 * does not take tasklist_lock (which cannot be held by anyone else
3067 * during stop_machine).
3069 static abort_t check_each_task(struct update *update)
3071 const struct task_struct *g, *p;
3072 abort_t status = OK, ret;
3073 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3074 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3075 read_lock(&tasklist_lock);
3076 #endif /* LINUX_VERSION_CODE */
3077 do_each_thread(g, p) {
3078 /* do_each_thread is a double loop! */
3079 ret = check_task(update, p, false);
3080 if (ret != OK) {
3081 check_task(update, p, true);
3082 status = ret;
3084 if (ret != OK && ret != CODE_BUSY)
3085 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3086 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3087 goto out;
3088 #else /* LINUX_VERSION_CODE < */
3089 return ret;
3090 #endif /* LINUX_VERSION_CODE */
3091 } while_each_thread(g, p);
3092 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3093 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3094 out:
3095 read_unlock(&tasklist_lock);
3096 #endif /* LINUX_VERSION_CODE */
3097 return status;
3100 static abort_t check_task(struct update *update,
3101 const struct task_struct *t, bool rerun)
3103 abort_t status, ret;
3104 struct conflict *conf = NULL;
3106 if (rerun) {
3107 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3108 if (conf == NULL)
3109 return OUT_OF_MEMORY;
3110 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3111 if (conf->process_name == NULL) {
3112 kfree(conf);
3113 return OUT_OF_MEMORY;
3115 conf->pid = t->pid;
3116 INIT_LIST_HEAD(&conf->stack);
3117 list_add(&conf->list, &update->conflicts);
3120 status = check_address(update, conf, KSPLICE_IP(t));
3121 if (t == current) {
3122 ret = check_stack(update, conf, task_thread_info(t),
3123 (unsigned long *)__builtin_frame_address(0));
3124 if (status == OK)
3125 status = ret;
3126 } else if (!task_curr(t)) {
3127 ret = check_stack(update, conf, task_thread_info(t),
3128 (unsigned long *)KSPLICE_SP(t));
3129 if (status == OK)
3130 status = ret;
3131 } else if (!is_stop_machine(t)) {
3132 status = UNEXPECTED_RUNNING_TASK;
3134 return status;
3137 static abort_t check_stack(struct update *update, struct conflict *conf,
3138 const struct thread_info *tinfo,
3139 const unsigned long *stack)
3141 abort_t status = OK, ret;
3142 unsigned long addr;
3144 while (valid_stack_ptr(tinfo, stack)) {
3145 addr = *stack++;
3146 ret = check_address(update, conf, addr);
3147 if (ret != OK)
3148 status = ret;
3150 return status;
3153 static abort_t check_address(struct update *update,
3154 struct conflict *conf, unsigned long addr)
3156 abort_t status = OK, ret;
3157 const struct safety_record *rec;
3158 struct ksplice_mod_change *change;
3159 struct conflict_addr *ca = NULL;
3161 if (conf != NULL) {
3162 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3163 if (ca == NULL)
3164 return OUT_OF_MEMORY;
3165 ca->addr = addr;
3166 ca->has_conflict = false;
3167 ca->label = NULL;
3168 list_add(&ca->list, &conf->stack);
3171 list_for_each_entry(change, &update->changes, list) {
3172 unsigned long tramp_addr = follow_trampolines(change, addr);
3173 list_for_each_entry(rec, &change->safety_records, list) {
3174 ret = check_record(ca, rec, tramp_addr);
3175 if (ret != OK)
3176 status = ret;
3179 return status;
3182 static abort_t check_record(struct conflict_addr *ca,
3183 const struct safety_record *rec, unsigned long addr)
3185 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3186 if (ca != NULL) {
3187 ca->label = rec->label;
3188 ca->has_conflict = true;
3190 return CODE_BUSY;
3192 return OK;
3195 /* Is the task one of the stop_machine tasks? */
3196 static bool is_stop_machine(const struct task_struct *t)
3198 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3199 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3200 const char *kstop_prefix = "kstop/";
3201 #else /* LINUX_VERSION_CODE < */
3202 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3203 const char *kstop_prefix = "kstop";
3204 #endif /* LINUX_VERSION_CODE */
3205 const char *num;
3206 if (!strstarts(t->comm, kstop_prefix))
3207 return false;
3208 num = t->comm + strlen(kstop_prefix);
3209 return num[strspn(num, "0123456789")] == '\0';
3210 #else /* LINUX_VERSION_CODE < */
3211 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3212 return strcmp(t->comm, "kstopmachine") == 0;
3213 #endif /* LINUX_VERSION_CODE */
3216 static void cleanup_conflicts(struct update *update)
3218 struct conflict *conf;
3219 list_for_each_entry(conf, &update->conflicts, list) {
3220 clear_list(&conf->stack, struct conflict_addr, list);
3221 kfree(conf->process_name);
3223 clear_list(&update->conflicts, struct conflict, list);
3226 static void print_conflicts(struct update *update)
3228 const struct conflict *conf;
3229 const struct conflict_addr *ca;
3230 list_for_each_entry(conf, &update->conflicts, list) {
3231 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3232 conf->process_name);
3233 list_for_each_entry(ca, &conf->stack, list) {
3234 _ksdebug(update, " %lx", ca->addr);
3235 if (ca->has_conflict)
3236 _ksdebug(update, " [<-CONFLICT]");
3238 _ksdebug(update, "\n");
3242 static void insert_trampoline(struct ksplice_patch *p)
3244 mm_segment_t old_fs = get_fs();
3245 set_fs(KERNEL_DS);
3246 memcpy(p->saved, p->vaddr, p->size);
3247 memcpy(p->vaddr, p->contents, p->size);
3248 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3249 set_fs(old_fs);
3252 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3253 const struct ksplice_patch *p)
3255 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3256 ksdebug(change, "Aborted. Trampoline at %lx has been "
3257 "overwritten.\n", p->oldaddr);
3258 return CODE_BUSY;
3260 return OK;
3263 static void remove_trampoline(const struct ksplice_patch *p)
3265 mm_segment_t old_fs = get_fs();
3266 set_fs(KERNEL_DS);
3267 memcpy(p->vaddr, p->saved, p->size);
3268 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3269 set_fs(old_fs);
3272 /* Returns NO_MATCH if there's already a labelval with a different value */
3273 static abort_t create_labelval(struct ksplice_mod_change *change,
3274 struct ksplice_symbol *ksym,
3275 unsigned long val, int status)
3277 val = follow_trampolines(change, val);
3278 if (ksym->candidate_vals == NULL)
3279 return ksym->value == val ? OK : NO_MATCH;
3281 ksym->value = val;
3282 if (status == TEMP) {
3283 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3284 if (lv == NULL)
3285 return OUT_OF_MEMORY;
3286 lv->symbol = ksym;
3287 lv->saved_vals = ksym->candidate_vals;
3288 list_add(&lv->list, &change->temp_labelvals);
3290 ksym->candidate_vals = NULL;
3291 return OK;
3295 * Creates a new safety_record for a old_code section based on its
3296 * ksplice_section and run-pre matching information.
3298 static abort_t create_safety_record(struct ksplice_mod_change *change,
3299 const struct ksplice_section *sect,
3300 struct list_head *record_list,
3301 unsigned long run_addr,
3302 unsigned long run_size)
3304 struct safety_record *rec;
3305 struct ksplice_patch *p;
3307 if (record_list == NULL)
3308 return OK;
3310 for (p = change->patches; p < change->patches_end; p++) {
3311 const struct ksplice_reloc *r = patch_reloc(change, p);
3312 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3313 break;
3315 if (p >= change->patches_end)
3316 return OK;
3318 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3319 if (rec == NULL)
3320 return OUT_OF_MEMORY;
3322 * The old_code might be unloaded when checking reversing
3323 * patches, so we need to kstrdup the label here.
3325 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3326 if (rec->label == NULL) {
3327 kfree(rec);
3328 return OUT_OF_MEMORY;
3330 rec->addr = run_addr;
3331 rec->size = run_size;
3333 list_add(&rec->list, record_list);
3334 return OK;
3337 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3338 struct list_head *vals, unsigned long val)
3340 struct candidate_val *tmp, *new;
3343 * Careful: follow trampolines before comparing values so that we do
3344 * not mistake the obsolete function for another copy of the function.
3346 val = follow_trampolines(change, val);
3348 list_for_each_entry(tmp, vals, list) {
3349 if (tmp->val == val)
3350 return OK;
3352 new = kmalloc(sizeof(*new), GFP_KERNEL);
3353 if (new == NULL)
3354 return OUT_OF_MEMORY;
3355 new->val = val;
3356 list_add(&new->list, vals);
3357 return OK;
3360 static void release_vals(struct list_head *vals)
3362 clear_list(vals, struct candidate_val, list);
3366 * The temp_labelvals list is used to cache those temporary labelvals
3367 * that have been created to cross-check the symbol values obtained
3368 * from different relocations within a single section being matched.
3370 * If status is VAL, commit the temp_labelvals as final values.
3372 * If status is NOVAL, restore the list of possible values to the
3373 * ksplice_symbol, so that it no longer has a known value.
3375 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3377 struct labelval *lv, *n;
3378 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3379 if (status == NOVAL) {
3380 lv->symbol->candidate_vals = lv->saved_vals;
3381 } else {
3382 release_vals(lv->saved_vals);
3383 kfree(lv->saved_vals);
3385 list_del(&lv->list);
3386 kfree(lv);
3390 /* Is there a Ksplice canary with given howto at blank_addr? */
3391 static int contains_canary(struct ksplice_mod_change *change,
3392 unsigned long blank_addr,
3393 const struct ksplice_reloc_howto *howto)
3395 switch (howto->size) {
3396 case 1:
3397 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3398 (KSPLICE_CANARY & howto->dst_mask);
3399 case 2:
3400 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3401 (KSPLICE_CANARY & howto->dst_mask);
3402 case 4:
3403 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3404 (KSPLICE_CANARY & howto->dst_mask);
3405 #if BITS_PER_LONG >= 64
3406 case 8:
3407 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3408 (KSPLICE_CANARY & howto->dst_mask);
3409 #endif /* BITS_PER_LONG */
3410 default:
3411 ksdebug(change, "Aborted. Invalid relocation size.\n");
3412 return -1;
3417 * Compute the address of the code you would actually run if you were
3418 * to call the function at addr (i.e., follow the sequence of jumps
3419 * starting at addr)
3421 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3422 unsigned long addr)
3424 unsigned long new_addr;
3425 struct module *m;
3427 while (1) {
3428 #ifdef KSPLICE_STANDALONE
3429 if (!bootstrapped)
3430 return addr;
3431 #endif /* KSPLICE_STANDALONE */
3432 if (!__kernel_text_address(addr) ||
3433 trampoline_target(change, addr, &new_addr) != OK)
3434 return addr;
3435 m = __module_text_address(new_addr);
3436 if (m == NULL || m == change->target ||
3437 !strstarts(m->name, "ksplice"))
3438 return addr;
3439 addr = new_addr;
3443 /* Does module a patch module b? */
3444 static bool patches_module(const struct module *a, const struct module *b)
3446 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3447 const char *name;
3448 const char *modname = b == NULL ? "vmlinux" : b->name;
3449 if (a == b)
3450 return true;
3451 if (a == NULL || !strstarts(a->name, "ksplice_"))
3452 return false;
3453 name = a->name + strlen("ksplice_");
3454 name += strcspn(name, "_");
3455 if (name[0] != '_')
3456 return false;
3457 name++;
3458 return strstarts(name, modname) &&
3459 strcmp(name + strlen(modname), "_new") == 0;
3460 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3461 struct ksplice_module_list_entry *entry;
3462 if (a == b)
3463 return true;
3464 list_for_each_entry(entry, &ksplice_modules, list) {
3465 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3466 strcmp(entry->new_code_mod_name, a->name) == 0)
3467 return true;
3469 return false;
3470 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3473 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3474 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
3475 static bool strstarts(const char *str, const char *prefix)
3477 return strncmp(str, prefix, strlen(prefix)) == 0;
3479 #endif /* LINUX_VERSION_CODE */
3481 static bool singular(struct list_head *list)
3483 return !list_empty(list) && list->next->next == list;
3486 static void *bsearch(const void *key, const void *base, size_t n,
3487 size_t size, int (*cmp)(const void *key, const void *elt))
3489 int start = 0, end = n - 1, mid, result;
3490 if (n == 0)
3491 return NULL;
3492 while (start <= end) {
3493 mid = (start + end) / 2;
3494 result = cmp(key, base + mid * size);
3495 if (result < 0)
3496 end = mid - 1;
3497 else if (result > 0)
3498 start = mid + 1;
3499 else
3500 return (void *)base + mid * size;
3502 return NULL;
3505 static int compare_relocs(const void *a, const void *b)
3507 const struct ksplice_reloc *ra = a, *rb = b;
3508 if (ra->blank_addr > rb->blank_addr)
3509 return 1;
3510 else if (ra->blank_addr < rb->blank_addr)
3511 return -1;
3512 else
3513 return ra->howto->size - rb->howto->size;
3516 #ifdef KSPLICE_STANDALONE
3517 static int compare_system_map(const void *a, const void *b)
3519 const struct ksplice_system_map *sa = a, *sb = b;
3520 return strcmp(sa->label, sb->label);
3522 #endif /* KSPLICE_STANDALONE */
3524 #ifdef CONFIG_DEBUG_FS
3525 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3526 /* Old kernels don't have debugfs_create_blob */
3527 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3528 size_t count, loff_t *ppos)
3530 struct debugfs_blob_wrapper *blob = file->private_data;
3531 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3532 blob->size);
3535 static int blob_open(struct inode *inode, struct file *file)
3537 if (inode->i_private)
3538 file->private_data = inode->i_private;
3539 return 0;
3542 static struct file_operations fops_blob = {
3543 .read = read_file_blob,
3544 .open = blob_open,
3547 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3548 struct dentry *parent,
3549 struct debugfs_blob_wrapper *blob)
3551 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3553 #endif /* LINUX_VERSION_CODE */
3555 static abort_t init_debug_buf(struct update *update)
3557 update->debug_blob.size = 0;
3558 update->debug_blob.data = NULL;
3559 update->debugfs_dentry =
3560 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3561 &update->debug_blob);
3562 if (update->debugfs_dentry == NULL)
3563 return OUT_OF_MEMORY;
3564 return OK;
3567 static void clear_debug_buf(struct update *update)
3569 if (update->debugfs_dentry == NULL)
3570 return;
3571 debugfs_remove(update->debugfs_dentry);
3572 update->debugfs_dentry = NULL;
3573 update->debug_blob.size = 0;
3574 vfree(update->debug_blob.data);
3575 update->debug_blob.data = NULL;
3578 static int _ksdebug(struct update *update, const char *fmt, ...)
3580 va_list args;
3581 unsigned long size, old_size, new_size;
3583 if (update->debug == 0)
3584 return 0;
3586 /* size includes the trailing '\0' */
3587 va_start(args, fmt);
3588 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3589 va_end(args);
3590 old_size = update->debug_blob.size == 0 ? 0 :
3591 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3592 new_size = update->debug_blob.size + size == 0 ? 0 :
3593 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3594 if (new_size > old_size) {
3595 char *buf = vmalloc(new_size);
3596 if (buf == NULL)
3597 return -ENOMEM;
3598 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3599 vfree(update->debug_blob.data);
3600 update->debug_blob.data = buf;
3602 va_start(args, fmt);
3603 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3604 update->debug_blob.size,
3605 size, fmt, args);
3606 va_end(args);
3607 return 0;
3609 #else /* CONFIG_DEBUG_FS */
3610 static abort_t init_debug_buf(struct update *update)
3612 return OK;
3615 static void clear_debug_buf(struct update *update)
3617 return;
3620 static int _ksdebug(struct update *update, const char *fmt, ...)
3622 va_list args;
3624 if (update->debug == 0)
3625 return 0;
3627 if (!update->debug_continue_line)
3628 printk(KERN_DEBUG "ksplice: ");
3630 va_start(args, fmt);
3631 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3632 vprintk(fmt, args);
3633 #else /* LINUX_VERSION_CODE < */
3634 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3636 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3637 printk("%s", buf);
3638 kfree(buf);
3640 #endif /* LINUX_VERSION_CODE */
3641 va_end(args);
3643 update->debug_continue_line =
3644 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3645 return 0;
3647 #endif /* CONFIG_DEBUG_FS */
3649 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
3650 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
3651 extern unsigned long kallsyms_addresses[];
3652 extern unsigned long kallsyms_num_syms;
3653 extern u8 kallsyms_names[];
3655 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3656 struct module *, unsigned long),
3657 void *data)
3659 char namebuf[KSYM_NAME_LEN];
3660 unsigned long i;
3661 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3662 unsigned int off;
3663 #endif /* LINUX_VERSION_CODE */
3664 int ret;
3666 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3667 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3668 off = kallsyms_expand_symbol(off, namebuf);
3669 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3670 if (ret != 0)
3671 return ret;
3673 #else /* LINUX_VERSION_CODE < */
3674 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3675 char *knames;
3677 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3678 unsigned prefix = *knames++;
3680 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3682 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3683 if (ret != OK)
3684 return ret;
3686 knames += strlen(knames) + 1;
3688 #endif /* LINUX_VERSION_CODE */
3689 return module_kallsyms_on_each_symbol(fn, data);
3692 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3693 extern u8 kallsyms_token_table[];
3694 extern u16 kallsyms_token_index[];
3696 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3698 long len, skipped_first = 0;
3699 const u8 *tptr, *data;
3701 data = &kallsyms_names[off];
3702 len = *data;
3703 data++;
3705 off += len + 1;
3707 while (len) {
3708 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3709 data++;
3710 len--;
3712 while (*tptr) {
3713 if (skipped_first) {
3714 *result = *tptr;
3715 result++;
3716 } else
3717 skipped_first = 1;
3718 tptr++;
3722 *result = '\0';
3724 return off;
3726 #else /* LINUX_VERSION_CODE < */
3727 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3728 #endif /* LINUX_VERSION_CODE */
3730 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3731 struct module *,
3732 unsigned long),
3733 void *data)
3735 struct module *mod;
3736 unsigned int i;
3737 int ret;
3739 list_for_each_entry(mod, &modules, list) {
3740 for (i = 0; i < mod->num_symtab; i++) {
3741 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3742 mod, mod->symtab[i].st_value);
3743 if (ret != 0)
3744 return ret;
3747 return 0;
3749 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
3751 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3752 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
3753 static struct module *find_module(const char *name)
3755 struct module *mod;
3757 list_for_each_entry(mod, &modules, list) {
3758 if (strcmp(mod->name, name) == 0)
3759 return mod;
3761 return NULL;
3764 #ifdef CONFIG_MODULE_UNLOAD
3765 struct module_use {
3766 struct list_head list;
3767 struct module *module_which_uses;
3770 /* I'm not yet certain whether we need the strong form of this. */
3771 static inline int strong_try_module_get(struct module *mod)
3773 if (mod && mod->state != MODULE_STATE_LIVE)
3774 return -EBUSY;
3775 if (try_module_get(mod))
3776 return 0;
3777 return -ENOENT;
3780 /* Does a already use b? */
3781 static int already_uses(struct module *a, struct module *b)
3783 struct module_use *use;
3784 list_for_each_entry(use, &b->modules_which_use_me, list) {
3785 if (use->module_which_uses == a)
3786 return 1;
3788 return 0;
3791 /* Make it so module a uses b. Must be holding module_mutex */
3792 static int use_module(struct module *a, struct module *b)
3794 struct module_use *use;
3795 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3796 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3797 int no_warn;
3798 #endif /* LINUX_VERSION_CODE */
3799 if (b == NULL || already_uses(a, b))
3800 return 1;
3802 if (strong_try_module_get(b) < 0)
3803 return 0;
3805 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3806 if (!use) {
3807 module_put(b);
3808 return 0;
3810 use->module_which_uses = a;
3811 list_add(&use->list, &b->modules_which_use_me);
3812 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3813 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3814 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3815 #endif /* LINUX_VERSION_CODE */
3816 return 1;
3818 #else /* CONFIG_MODULE_UNLOAD */
3819 static int use_module(struct module *a, struct module *b)
3821 return 1;
3823 #endif /* CONFIG_MODULE_UNLOAD */
3825 #ifndef CONFIG_MODVERSIONS
3826 #define symversion(base, idx) NULL
3827 #else
3828 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3829 #endif
3831 static bool each_symbol_in_section(const struct symsearch *arr,
3832 unsigned int arrsize,
3833 struct module *owner,
3834 bool (*fn)(const struct symsearch *syms,
3835 struct module *owner,
3836 unsigned int symnum, void *data),
3837 void *data)
3839 unsigned int i, j;
3841 for (j = 0; j < arrsize; j++) {
3842 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3843 if (fn(&arr[j], owner, i, data))
3844 return true;
3847 return false;
3850 /* Returns true as soon as fn returns true, otherwise false. */
3851 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3852 struct module *owner,
3853 unsigned int symnum, void *data),
3854 void *data)
3856 struct module *mod;
3857 const struct symsearch arr[] = {
3858 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3859 NOT_GPL_ONLY, false },
3860 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3861 __start___kcrctab_gpl,
3862 GPL_ONLY, false },
3863 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3864 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3865 __start___kcrctab_gpl_future,
3866 WILL_BE_GPL_ONLY, false },
3867 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3868 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3869 { __start___ksymtab_unused, __stop___ksymtab_unused,
3870 __start___kcrctab_unused,
3871 NOT_GPL_ONLY, true },
3872 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3873 __start___kcrctab_unused_gpl,
3874 GPL_ONLY, true },
3875 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3878 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3879 return 1;
3881 list_for_each_entry(mod, &modules, list) {
3882 struct symsearch module_arr[] = {
3883 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3884 NOT_GPL_ONLY, false },
3885 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3886 mod->gpl_crcs,
3887 GPL_ONLY, false },
3888 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3889 { mod->gpl_future_syms,
3890 mod->gpl_future_syms + mod->num_gpl_future_syms,
3891 mod->gpl_future_crcs,
3892 WILL_BE_GPL_ONLY, false },
3893 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3894 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3895 { mod->unused_syms,
3896 mod->unused_syms + mod->num_unused_syms,
3897 mod->unused_crcs,
3898 NOT_GPL_ONLY, true },
3899 { mod->unused_gpl_syms,
3900 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3901 mod->unused_gpl_crcs,
3902 GPL_ONLY, true },
3903 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3906 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3907 mod, fn, data))
3908 return true;
3910 return false;
3913 struct find_symbol_arg {
3914 /* Input */
3915 const char *name;
3916 bool gplok;
3917 bool warn;
3919 /* Output */
3920 struct module *owner;
3921 const unsigned long *crc;
3922 const struct kernel_symbol *sym;
3925 static bool find_symbol_in_section(const struct symsearch *syms,
3926 struct module *owner,
3927 unsigned int symnum, void *data)
3929 struct find_symbol_arg *fsa = data;
3931 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3932 return false;
3934 if (!fsa->gplok) {
3935 if (syms->licence == GPL_ONLY)
3936 return false;
3937 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3938 printk(KERN_WARNING "Symbol %s is being used "
3939 "by a non-GPL module, which will not "
3940 "be allowed in the future\n", fsa->name);
3941 printk(KERN_WARNING "Please see the file "
3942 "Documentation/feature-removal-schedule.txt "
3943 "in the kernel source tree for more details.\n");
3947 #ifdef CONFIG_UNUSED_SYMBOLS
3948 if (syms->unused && fsa->warn) {
3949 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3950 "however this module is using it.\n", fsa->name);
3951 printk(KERN_WARNING
3952 "This symbol will go away in the future.\n");
3953 printk(KERN_WARNING
3954 "Please evalute if this is the right api to use and if "
3955 "it really is, submit a report the linux kernel "
3956 "mailinglist together with submitting your code for "
3957 "inclusion.\n");
3959 #endif
3961 fsa->owner = owner;
3962 fsa->crc = symversion(syms->crcs, symnum);
3963 fsa->sym = &syms->start[symnum];
3964 return true;
3967 /* Find a symbol and return it, along with, (optional) crc and
3968 * (optional) module which owns it */
3969 static const struct kernel_symbol *find_symbol(const char *name,
3970 struct module **owner,
3971 const unsigned long **crc,
3972 bool gplok, bool warn)
3974 struct find_symbol_arg fsa;
3976 fsa.name = name;
3977 fsa.gplok = gplok;
3978 fsa.warn = warn;
3980 if (each_symbol(find_symbol_in_section, &fsa)) {
3981 if (owner)
3982 *owner = fsa.owner;
3983 if (crc)
3984 *crc = fsa.crc;
3985 return fsa.sym;
3988 return NULL;
3991 static inline int within_module_core(unsigned long addr, struct module *mod)
3993 return (unsigned long)mod->module_core <= addr &&
3994 addr < (unsigned long)mod->module_core + mod->core_size;
3997 static inline int within_module_init(unsigned long addr, struct module *mod)
3999 return (unsigned long)mod->module_init <= addr &&
4000 addr < (unsigned long)mod->module_init + mod->init_size;
4003 static struct module *__module_address(unsigned long addr)
4005 struct module *mod;
4007 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
4008 list_for_each_entry_rcu(mod, &modules, list)
4009 #else
4010 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
4011 list_for_each_entry(mod, &modules, list)
4012 #endif
4013 if (within_module_core(addr, mod) ||
4014 within_module_init(addr, mod))
4015 return mod;
4016 return NULL;
4018 #endif /* LINUX_VERSION_CODE */
4020 struct update_attribute {
4021 struct attribute attr;
4022 ssize_t (*show)(struct update *update, char *buf);
4023 ssize_t (*store)(struct update *update, const char *buf, size_t len);
4026 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
4027 char *buf)
4029 struct update_attribute *attribute =
4030 container_of(attr, struct update_attribute, attr);
4031 struct update *update = container_of(kobj, struct update, kobj);
4032 if (attribute->show == NULL)
4033 return -EIO;
4034 return attribute->show(update, buf);
4037 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
4038 const char *buf, size_t len)
4040 struct update_attribute *attribute =
4041 container_of(attr, struct update_attribute, attr);
4042 struct update *update = container_of(kobj, struct update, kobj);
4043 if (attribute->store == NULL)
4044 return -EIO;
4045 return attribute->store(update, buf, len);
4048 static struct sysfs_ops update_sysfs_ops = {
4049 .show = update_attr_show,
4050 .store = update_attr_store,
4053 static void update_release(struct kobject *kobj)
4055 struct update *update;
4056 update = container_of(kobj, struct update, kobj);
4057 cleanup_ksplice_update(update);
4060 static ssize_t stage_show(struct update *update, char *buf)
4062 switch (update->stage) {
4063 case STAGE_PREPARING:
4064 return snprintf(buf, PAGE_SIZE, "preparing\n");
4065 case STAGE_APPLIED:
4066 return snprintf(buf, PAGE_SIZE, "applied\n");
4067 case STAGE_REVERSED:
4068 return snprintf(buf, PAGE_SIZE, "reversed\n");
4070 return 0;
4073 static ssize_t abort_cause_show(struct update *update, char *buf)
4075 switch (update->abort_cause) {
4076 case OK:
4077 return snprintf(buf, PAGE_SIZE, "ok\n");
4078 case NO_MATCH:
4079 return snprintf(buf, PAGE_SIZE, "no_match\n");
4080 #ifdef KSPLICE_STANDALONE
4081 case BAD_SYSTEM_MAP:
4082 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4083 #endif /* KSPLICE_STANDALONE */
4084 case CODE_BUSY:
4085 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4086 case MODULE_BUSY:
4087 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4088 case OUT_OF_MEMORY:
4089 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4090 case FAILED_TO_FIND:
4091 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4092 case ALREADY_REVERSED:
4093 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4094 case MISSING_EXPORT:
4095 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4096 case UNEXPECTED_RUNNING_TASK:
4097 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4098 case TARGET_NOT_LOADED:
4099 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4100 case CALL_FAILED:
4101 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4102 case COLD_UPDATE_LOADED:
4103 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4104 case UNEXPECTED:
4105 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4106 default:
4107 return snprintf(buf, PAGE_SIZE, "unknown\n");
4109 return 0;
4112 static ssize_t conflict_show(struct update *update, char *buf)
4114 const struct conflict *conf;
4115 const struct conflict_addr *ca;
4116 int used = 0;
4117 mutex_lock(&module_mutex);
4118 list_for_each_entry(conf, &update->conflicts, list) {
4119 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4120 conf->process_name, conf->pid);
4121 list_for_each_entry(ca, &conf->stack, list) {
4122 if (!ca->has_conflict)
4123 continue;
4124 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4125 ca->label);
4127 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4129 mutex_unlock(&module_mutex);
4130 return used;
4133 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4134 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4136 struct update *update = updateptr;
4137 mutex_lock(&module_mutex);
4138 maybe_cleanup_ksplice_update(update);
4139 mutex_unlock(&module_mutex);
4140 return 0;
4143 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4145 enum stage old_stage;
4146 mutex_lock(&module_mutex);
4147 old_stage = update->stage;
4148 if ((strncmp(buf, "applied", len) == 0 ||
4149 strncmp(buf, "applied\n", len) == 0) &&
4150 update->stage == STAGE_PREPARING)
4151 update->abort_cause = apply_update(update);
4152 else if ((strncmp(buf, "reversed", len) == 0 ||
4153 strncmp(buf, "reversed\n", len) == 0) &&
4154 update->stage == STAGE_APPLIED)
4155 update->abort_cause = reverse_update(update);
4156 else if ((strncmp(buf, "cleanup", len) == 0 ||
4157 strncmp(buf, "cleanup\n", len) == 0) &&
4158 update->stage == STAGE_REVERSED)
4159 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4160 "ksplice_cleanup_%s", update->kid);
4162 mutex_unlock(&module_mutex);
4163 return len;
4166 static ssize_t debug_show(struct update *update, char *buf)
4168 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4171 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4173 unsigned long l;
4174 int ret = strict_strtoul(buf, 10, &l);
4175 if (ret != 0)
4176 return ret;
4177 update->debug = l;
4178 return len;
4181 static ssize_t partial_show(struct update *update, char *buf)
4183 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4186 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4188 unsigned long l;
4189 int ret = strict_strtoul(buf, 10, &l);
4190 if (ret != 0)
4191 return ret;
4192 update->partial = l;
4193 return len;
4196 static struct update_attribute stage_attribute =
4197 __ATTR(stage, 0600, stage_show, stage_store);
4198 static struct update_attribute abort_cause_attribute =
4199 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4200 static struct update_attribute debug_attribute =
4201 __ATTR(debug, 0600, debug_show, debug_store);
4202 static struct update_attribute partial_attribute =
4203 __ATTR(partial, 0600, partial_show, partial_store);
4204 static struct update_attribute conflict_attribute =
4205 __ATTR(conflicts, 0400, conflict_show, NULL);
4207 static struct attribute *update_attrs[] = {
4208 &stage_attribute.attr,
4209 &abort_cause_attribute.attr,
4210 &debug_attribute.attr,
4211 &partial_attribute.attr,
4212 &conflict_attribute.attr,
4213 NULL
4216 static struct kobj_type update_ktype = {
4217 .sysfs_ops = &update_sysfs_ops,
4218 .release = update_release,
4219 .default_attrs = update_attrs,
4222 #ifdef KSPLICE_STANDALONE
4223 static int debug;
4224 module_param(debug, int, 0600);
4225 MODULE_PARM_DESC(debug, "Debug level");
4227 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4229 static struct ksplice_mod_change bootstrap_mod_change = {
4230 .name = "ksplice_" __stringify(KSPLICE_KID),
4231 .kid = "init_" __stringify(KSPLICE_KID),
4232 .target_name = NULL,
4233 .target = NULL,
4234 .map_printk = MAP_PRINTK,
4235 .new_code_mod = THIS_MODULE,
4236 .new_code.system_map = ksplice_system_map,
4237 .new_code.system_map_end = ksplice_system_map_end,
4239 #endif /* KSPLICE_STANDALONE */
4241 static int init_ksplice(void)
4243 #ifdef KSPLICE_STANDALONE
4244 struct ksplice_mod_change *change = &bootstrap_mod_change;
4245 change->update = init_ksplice_update(change->kid);
4246 sort(change->new_code.system_map,
4247 change->new_code.system_map_end - change->new_code.system_map,
4248 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4249 if (change->update == NULL)
4250 return -ENOMEM;
4251 add_to_update(change, change->update);
4252 change->update->debug = debug;
4253 change->update->abort_cause =
4254 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4255 if (change->update->abort_cause == OK)
4256 bootstrapped = true;
4257 cleanup_ksplice_update(bootstrap_mod_change.update);
4258 #else /* !KSPLICE_STANDALONE */
4259 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4260 if (ksplice_kobj == NULL)
4261 return -ENOMEM;
4262 #endif /* KSPLICE_STANDALONE */
4263 return 0;
4266 static void cleanup_ksplice(void)
4268 #ifndef KSPLICE_STANDALONE
4269 kobject_put(ksplice_kobj);
4270 #endif /* KSPLICE_STANDALONE */
4273 module_init(init_ksplice);
4274 module_exit(cleanup_ksplice);
4276 MODULE_AUTHOR("Ksplice, Inc.");
4277 MODULE_DESCRIPTION("Ksplice rebootless update system");
4278 #ifdef KSPLICE_VERSION
4279 MODULE_VERSION(KSPLICE_VERSION);
4280 #endif
4281 MODULE_LICENSE("GPL v2");