Don't build the ksplice core twice.
[ksplice.git] / kmodsrc / ksplice.c
blobdd2b6258ca05491b28ea59a37f06173f7852561a
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 enum stage {
67 STAGE_PREPARING, /* the update is not yet applied */
68 STAGE_APPLIED, /* the update is applied */
69 STAGE_REVERSED, /* the update has been applied and reversed */
72 /* parameter to modify run-pre matching */
73 enum run_pre_mode {
74 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
75 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
76 RUN_PRE_FINAL, /* finalizes the matching */
77 #ifdef KSPLICE_STANDALONE
78 RUN_PRE_SILENT,
79 #endif /* KSPLICE_STANDALONE */
82 enum { NOVAL, TEMP, VAL };
84 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
85 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #define __bitwise__
87 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
88 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
89 #define __bitwise__ __bitwise
90 #endif
92 typedef int __bitwise__ abort_t;
94 #define OK ((__force abort_t) 0)
95 #define NO_MATCH ((__force abort_t) 1)
96 #define CODE_BUSY ((__force abort_t) 2)
97 #define MODULE_BUSY ((__force abort_t) 3)
98 #define OUT_OF_MEMORY ((__force abort_t) 4)
99 #define FAILED_TO_FIND ((__force abort_t) 5)
100 #define ALREADY_REVERSED ((__force abort_t) 6)
101 #define MISSING_EXPORT ((__force abort_t) 7)
102 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
103 #define UNEXPECTED ((__force abort_t) 9)
104 #define TARGET_NOT_LOADED ((__force abort_t) 10)
105 #define CALL_FAILED ((__force abort_t) 11)
106 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
107 #ifdef KSPLICE_STANDALONE
108 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
109 #endif /* KSPLICE_STANDALONE */
111 struct update {
112 const char *kid;
113 const char *name;
114 struct kobject kobj;
115 enum stage stage;
116 abort_t abort_cause;
117 int debug;
118 #ifdef CONFIG_DEBUG_FS
119 struct debugfs_blob_wrapper debug_blob;
120 struct dentry *debugfs_dentry;
121 #else /* !CONFIG_DEBUG_FS */
122 bool debug_continue_line;
123 #endif /* CONFIG_DEBUG_FS */
124 bool partial; /* is it OK if some target mods aren't loaded */
125 struct list_head packs; /* packs for loaded target mods */
126 struct list_head unused_packs; /* packs for non-loaded target mods */
127 struct list_head conflicts;
128 struct list_head list;
129 struct list_head ksplice_module_list;
132 /* a process conflicting with an update */
133 struct conflict {
134 const char *process_name;
135 pid_t pid;
136 struct list_head stack;
137 struct list_head list;
140 /* an address on the stack of a conflict */
141 struct conflict_addr {
142 unsigned long addr; /* the address on the stack */
143 bool has_conflict; /* does this address in particular conflict? */
144 const char *label; /* the label of the conflicting safety_record */
145 struct list_head list;
148 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
149 /* Old kernels don't have debugfs_create_blob */
150 struct debugfs_blob_wrapper {
151 void *data;
152 unsigned long size;
154 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
157 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
158 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
159 #endif
161 struct labelval {
162 struct list_head list;
163 struct ksplice_symbol *symbol;
164 struct list_head *saved_vals;
167 /* region to be checked for conflicts in the stack check */
168 struct safety_record {
169 struct list_head list;
170 const char *label;
171 unsigned long addr; /* the address to be checked for conflicts
172 * (e.g. an obsolete function's starting addr)
174 unsigned long size; /* the size of the region to be checked */
177 /* possible value for a symbol */
178 struct candidate_val {
179 struct list_head list;
180 unsigned long val;
183 /* private struct used by init_symbol_array */
184 struct ksplice_lookup {
185 /* input */
186 struct ksplice_pack *pack;
187 struct ksplice_symbol **arr;
188 size_t size;
189 /* output */
190 abort_t ret;
193 #ifdef KSPLICE_NO_KERNEL_SUPPORT
194 struct symsearch {
195 const struct kernel_symbol *start, *stop;
196 const unsigned long *crcs;
197 enum {
198 NOT_GPL_ONLY,
199 GPL_ONLY,
200 WILL_BE_GPL_ONLY,
201 } licence;
202 bool unused;
204 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
207 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
210 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
211 static bool virtual_address_mapped(unsigned long addr)
213 char retval;
214 return probe_kernel_address(addr, retval) != -EFAULT;
216 #else /* LINUX_VERSION_CODE < */
217 static bool virtual_address_mapped(unsigned long addr);
218 #endif /* LINUX_VERSION_CODE */
220 static long probe_kernel_read(void *dst, void *src, size_t size)
222 if (size == 0)
223 return 0;
224 if (!virtual_address_mapped((unsigned long)src) ||
225 !virtual_address_mapped((unsigned long)src + size - 1))
226 return -EFAULT;
228 memcpy(dst, src, size);
229 return 0;
231 #endif /* LINUX_VERSION_CODE */
233 static LIST_HEAD(updates);
234 #ifdef KSPLICE_STANDALONE
235 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
236 extern struct list_head ksplice_module_list;
237 #else /* !CONFIG_KSPLICE */
238 LIST_HEAD(ksplice_module_list);
239 #endif /* CONFIG_KSPLICE */
240 #else /* !KSPLICE_STANDALONE */
241 LIST_HEAD(ksplice_module_list);
242 EXPORT_SYMBOL_GPL(ksplice_module_list);
243 static struct kobject *ksplice_kobj;
244 #endif /* KSPLICE_STANDALONE */
246 static struct kobj_type update_ktype;
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
249 /* Old kernels do not have kcalloc
250 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
252 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
254 char *mem;
255 if (n != 0 && size > ULONG_MAX / n)
256 return NULL;
257 mem = kmalloc(n * size, flags);
258 if (mem)
259 memset(mem, 0, n * size);
260 return mem;
262 #endif /* LINUX_VERSION_CODE */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
265 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
266 static void u32_swap(void *a, void *b, int size)
268 u32 t = *(u32 *)a;
269 *(u32 *)a = *(u32 *)b;
270 *(u32 *)b = t;
273 static void generic_swap(void *a, void *b, int size)
275 char t;
277 do {
278 t = *(char *)a;
279 *(char *)a++ = *(char *)b;
280 *(char *)b++ = t;
281 } while (--size > 0);
285 * sort - sort an array of elements
286 * @base: pointer to data to sort
287 * @num: number of elements
288 * @size: size of each element
289 * @cmp: pointer to comparison function
290 * @swap: pointer to swap function or NULL
292 * This function does a heapsort on the given array. You may provide a
293 * swap function optimized to your element type.
295 * Sorting time is O(n log n) both on average and worst-case. While
296 * qsort is about 20% faster on average, it suffers from exploitable
297 * O(n*n) worst-case behavior and extra memory requirements that make
298 * it less suitable for kernel use.
301 void sort(void *base, size_t num, size_t size,
302 int (*cmp)(const void *, const void *),
303 void (*swap)(void *, void *, int size))
305 /* pre-scale counters for performance */
306 int i = (num / 2 - 1) * size, n = num * size, c, r;
308 if (!swap)
309 swap = (size == 4 ? u32_swap : generic_swap);
311 /* heapify */
312 for (; i >= 0; i -= size) {
313 for (r = i; r * 2 + size < n; r = c) {
314 c = r * 2 + size;
315 if (c < n - size && cmp(base + c, base + c + size) < 0)
316 c += size;
317 if (cmp(base + r, base + c) >= 0)
318 break;
319 swap(base + r, base + c, size);
323 /* sort */
324 for (i = n - size; i > 0; i -= size) {
325 swap(base, base + i, size);
326 for (r = 0; r * 2 + size < i; r = c) {
327 c = r * 2 + size;
328 if (c < i - size && cmp(base + c, base + c + size) < 0)
329 c += size;
330 if (cmp(base + r, base + c) >= 0)
331 break;
332 swap(base + r, base + c, size);
336 #endif /* LINUX_VERSION_CODE < */
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
339 /* Old kernels do not have kstrdup
340 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
342 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
344 size_t len;
345 char *buf;
347 if (!s)
348 return NULL;
350 len = strlen(s) + 1;
351 buf = kmalloc(len, gfp);
352 if (buf)
353 memcpy(buf, s, len);
354 return buf;
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
359 /* Old kernels use semaphore instead of mutex
360 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
362 #define mutex semaphore
363 #define mutex_lock down
364 #define mutex_unlock up
365 #endif /* LINUX_VERSION_CODE */
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
368 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
369 static char * __attribute_used__
370 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
372 unsigned int len;
373 char *p, dummy[1];
374 va_list aq;
376 va_copy(aq, ap);
377 len = vsnprintf(dummy, 0, fmt, aq);
378 va_end(aq);
380 p = kmalloc(len + 1, gfp);
381 if (!p)
382 return NULL;
384 vsnprintf(p, len + 1, fmt, ap);
386 return p;
388 #endif
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
391 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
392 static char * __attribute__((format (printf, 2, 3)))
393 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
395 va_list ap;
396 char *p;
398 va_start(ap, fmt);
399 p = kvasprintf(gfp, fmt, ap);
400 va_end(ap);
402 return p;
404 #endif
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
407 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
408 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
410 char *tail;
411 unsigned long val;
412 size_t len;
414 *res = 0;
415 len = strlen(cp);
416 if (len == 0)
417 return -EINVAL;
419 val = simple_strtoul(cp, &tail, base);
420 if ((*tail == '\0') ||
421 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
422 *res = val;
423 return 0;
426 return -EINVAL;
428 #endif
430 #ifndef task_thread_info
431 #define task_thread_info(task) (task)->thread_info
432 #endif /* !task_thread_info */
434 #ifdef KSPLICE_STANDALONE
436 static bool bootstrapped = false;
438 #ifdef CONFIG_KALLSYMS
439 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
440 extern u8 kallsyms_names[];
441 #endif /* CONFIG_KALLSYMS */
443 /* defined by ksplice-create */
444 extern const struct ksplice_reloc ksplice_init_relocs[],
445 ksplice_init_relocs_end[];
447 /* Obtained via System.map */
448 extern struct list_head modules;
449 extern struct mutex module_mutex;
450 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
451 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
452 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
453 #endif /* LINUX_VERSION_CODE */
454 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
455 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
456 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
457 #endif /* LINUX_VERSION_CODE */
458 extern const struct kernel_symbol __start___ksymtab[];
459 extern const struct kernel_symbol __stop___ksymtab[];
460 extern const unsigned long __start___kcrctab[];
461 extern const struct kernel_symbol __start___ksymtab_gpl[];
462 extern const struct kernel_symbol __stop___ksymtab_gpl[];
463 extern const unsigned long __start___kcrctab_gpl[];
464 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
465 extern const struct kernel_symbol __start___ksymtab_unused[];
466 extern const struct kernel_symbol __stop___ksymtab_unused[];
467 extern const unsigned long __start___kcrctab_unused[];
468 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
469 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
470 extern const unsigned long __start___kcrctab_unused_gpl[];
471 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
472 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
473 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
474 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
475 extern const unsigned long __start___kcrctab_gpl_future[];
476 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
478 #endif /* KSPLICE_STANDALONE */
480 static struct update *init_ksplice_update(const char *kid);
481 static void cleanup_ksplice_update(struct update *update);
482 static void maybe_cleanup_ksplice_update(struct update *update);
483 static void add_to_update(struct ksplice_pack *pack, struct update *update);
484 static int ksplice_sysfs_init(struct update *update);
486 /* Preparing the relocations and patches for application */
487 static abort_t apply_update(struct update *update);
488 static abort_t prepare_pack(struct ksplice_pack *pack);
489 static abort_t finalize_pack(struct ksplice_pack *pack);
490 static abort_t finalize_patches(struct ksplice_pack *pack);
491 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
492 unsigned long addr);
493 static abort_t map_trampoline_pages(struct update *update);
494 static void unmap_trampoline_pages(struct update *update);
495 static void *map_writable(void *addr, size_t len);
496 static abort_t apply_relocs(struct ksplice_pack *pack,
497 const struct ksplice_reloc *relocs,
498 const struct ksplice_reloc *relocs_end);
499 static abort_t apply_reloc(struct ksplice_pack *pack,
500 const struct ksplice_reloc *r);
501 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
502 const struct ksplice_reloc *r);
503 static abort_t apply_howto_date(struct ksplice_pack *pack,
504 const struct ksplice_reloc *r);
505 static abort_t read_reloc_value(struct ksplice_pack *pack,
506 const struct ksplice_reloc *r,
507 unsigned long addr, unsigned long *valp);
508 static abort_t write_reloc_value(struct ksplice_pack *pack,
509 const struct ksplice_reloc *r,
510 unsigned long addr, unsigned long sym_addr);
511 static abort_t create_module_list_entry(struct ksplice_pack *pack,
512 bool to_be_applied);
513 static void cleanup_module_list_entries(struct update *update);
514 static void __attribute__((noreturn)) ksplice_deleted(void);
516 /* run-pre matching */
517 static abort_t match_pack_sections(struct ksplice_pack *pack,
518 bool consider_data_sections);
519 static abort_t find_section(struct ksplice_pack *pack,
520 struct ksplice_section *sect);
521 static abort_t try_addr(struct ksplice_pack *pack,
522 struct ksplice_section *sect,
523 unsigned long run_addr,
524 struct list_head *safety_records,
525 enum run_pre_mode mode);
526 static abort_t run_pre_cmp(struct ksplice_pack *pack,
527 const struct ksplice_section *sect,
528 unsigned long run_addr,
529 struct list_head *safety_records,
530 enum run_pre_mode mode);
531 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
532 /* defined in arch/ARCH/kernel/ksplice-arch.c */
533 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
534 struct ksplice_section *sect,
535 unsigned long run_addr,
536 struct list_head *safety_records,
537 enum run_pre_mode mode);
538 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
539 static void print_bytes(struct ksplice_pack *pack,
540 const unsigned char *run, int runc,
541 const unsigned char *pre, int prec);
542 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
543 static abort_t brute_search(struct ksplice_pack *pack,
544 struct ksplice_section *sect,
545 const void *start, unsigned long len,
546 struct list_head *vals);
547 static abort_t brute_search_all(struct ksplice_pack *pack,
548 struct ksplice_section *sect,
549 struct list_head *vals);
550 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
551 static const struct ksplice_reloc *
552 init_reloc_search(struct ksplice_pack *pack,
553 const struct ksplice_section *sect);
554 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
555 const struct ksplice_reloc *end,
556 unsigned long address,
557 unsigned long size);
558 static abort_t lookup_reloc(struct ksplice_pack *pack,
559 const struct ksplice_reloc **fingerp,
560 unsigned long addr,
561 const struct ksplice_reloc **relocp);
562 static abort_t handle_reloc(struct ksplice_pack *pack,
563 const struct ksplice_section *sect,
564 const struct ksplice_reloc *r,
565 unsigned long run_addr, enum run_pre_mode mode);
566 static abort_t handle_howto_date(struct ksplice_pack *pack,
567 const struct ksplice_section *sect,
568 const struct ksplice_reloc *r,
569 unsigned long run_addr,
570 enum run_pre_mode mode);
571 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
572 const struct ksplice_section *sect,
573 const struct ksplice_reloc *r,
574 unsigned long run_addr,
575 enum run_pre_mode mode);
576 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
577 const struct ksplice_symbol *sym);
578 static int compare_section_labels(const void *va, const void *vb);
579 static int symbol_section_bsearch_compare(const void *a, const void *b);
580 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
581 const struct ksplice_patch *p);
583 /* Computing possible addresses for symbols */
584 static abort_t lookup_symbol(struct ksplice_pack *pack,
585 const struct ksplice_symbol *ksym,
586 struct list_head *vals);
587 static void cleanup_symbol_arrays(struct ksplice_pack *pack);
588 static abort_t init_symbol_arrays(struct ksplice_pack *pack);
589 static abort_t init_symbol_array(struct ksplice_pack *pack,
590 struct ksplice_symbol *start,
591 struct ksplice_symbol *end);
592 static abort_t uniquify_symbols(struct ksplice_pack *pack);
593 static abort_t add_matching_values(struct ksplice_lookup *lookup,
594 const char *sym_name, unsigned long sym_val);
595 static bool add_export_values(const struct symsearch *syms,
596 struct module *owner,
597 unsigned int symnum, void *data);
598 static int symbolp_bsearch_compare(const void *key, const void *elt);
599 static int compare_symbolp_names(const void *a, const void *b);
600 static int compare_symbolp_labels(const void *a, const void *b);
601 #ifdef CONFIG_KALLSYMS
602 static int add_kallsyms_values(void *data, const char *name,
603 struct module *owner, unsigned long val);
604 #endif /* CONFIG_KALLSYMS */
605 #ifdef KSPLICE_STANDALONE
606 static abort_t
607 add_system_map_candidates(struct ksplice_pack *pack,
608 const struct ksplice_system_map *start,
609 const struct ksplice_system_map *end,
610 const char *label, struct list_head *vals);
611 static int compare_system_map(const void *a, const void *b);
612 static int system_map_bsearch_compare(const void *key, const void *elt);
613 #endif /* KSPLICE_STANDALONE */
614 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
615 struct list_head *vals);
617 /* Atomic update trampoline insertion and removal */
618 static abort_t apply_patches(struct update *update);
619 static abort_t reverse_patches(struct update *update);
620 static int __apply_patches(void *update);
621 static int __reverse_patches(void *update);
622 static abort_t check_each_task(struct update *update);
623 static abort_t check_task(struct update *update,
624 const struct task_struct *t, bool rerun);
625 static abort_t check_stack(struct update *update, struct conflict *conf,
626 const struct thread_info *tinfo,
627 const unsigned long *stack);
628 static abort_t check_address(struct update *update,
629 struct conflict *conf, unsigned long addr);
630 static abort_t check_record(struct conflict_addr *ca,
631 const struct safety_record *rec,
632 unsigned long addr);
633 static bool is_stop_machine(const struct task_struct *t);
634 static void cleanup_conflicts(struct update *update);
635 static void print_conflicts(struct update *update);
636 static void insert_trampoline(struct ksplice_patch *p);
637 static abort_t verify_trampoline(struct ksplice_pack *pack,
638 const struct ksplice_patch *p);
639 static void remove_trampoline(const struct ksplice_patch *p);
641 static abort_t create_labelval(struct ksplice_pack *pack,
642 struct ksplice_symbol *ksym,
643 unsigned long val, int status);
644 static abort_t create_safety_record(struct ksplice_pack *pack,
645 const struct ksplice_section *sect,
646 struct list_head *record_list,
647 unsigned long run_addr,
648 unsigned long run_size);
649 static abort_t add_candidate_val(struct ksplice_pack *pack,
650 struct list_head *vals, unsigned long val);
651 static void release_vals(struct list_head *vals);
652 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
654 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
655 const struct ksplice_reloc_howto *howto);
656 static unsigned long follow_trampolines(struct ksplice_pack *pack,
657 unsigned long addr);
658 static bool patches_module(const struct module *a, const struct module *b);
659 static bool starts_with(const char *str, const char *prefix);
660 static bool singular(struct list_head *list);
661 static void *bsearch(const void *key, const void *base, size_t n,
662 size_t size, int (*cmp)(const void *key, const void *elt));
663 static int compare_relocs(const void *a, const void *b);
664 static int reloc_bsearch_compare(const void *key, const void *elt);
666 /* Debugging */
667 static abort_t init_debug_buf(struct update *update);
668 static void clear_debug_buf(struct update *update);
669 static int __attribute__((format(printf, 2, 3)))
670 _ksdebug(struct update *update, const char *fmt, ...);
671 #define ksdebug(pack, fmt, ...) \
672 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
674 #ifdef KSPLICE_NO_KERNEL_SUPPORT
675 /* Functions defined here that will be exported in later kernels */
676 #ifdef CONFIG_KALLSYMS
677 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
678 struct module *, unsigned long),
679 void *data);
680 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
681 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
682 #endif /* LINUX_VERSION_CODE */
683 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
684 struct module *,
685 unsigned long),
686 void *data);
687 #endif /* CONFIG_KALLSYMS */
688 static struct module *find_module(const char *name);
689 static int use_module(struct module *a, struct module *b);
690 static const struct kernel_symbol *find_symbol(const char *name,
691 struct module **owner,
692 const unsigned long **crc,
693 bool gplok, bool warn);
694 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
695 struct module *owner,
696 unsigned int symnum, void *data),
697 void *data);
698 static struct module *__module_data_address(unsigned long addr);
699 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
701 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
703 /* Prepare a trampoline for the given patch */
704 static abort_t prepare_trampoline(struct ksplice_pack *pack,
705 struct ksplice_patch *p);
706 /* What address does the trampoline at addr jump to? */
707 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
708 unsigned long *new_addr);
709 /* Hook to handle pc-relative jumps inserted by parainstructions */
710 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
711 unsigned long run, int *matched);
712 /* Called for relocations of type KSPLICE_HOWTO_BUG */
713 static abort_t handle_bug(struct ksplice_pack *pack,
714 const struct ksplice_reloc *r,
715 unsigned long run_addr);
716 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
717 static abort_t handle_extable(struct ksplice_pack *pack,
718 const struct ksplice_reloc *r,
719 unsigned long run_addr);
720 /* Is address p on the stack of the given thread? */
721 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
723 #ifndef KSPLICE_STANDALONE
724 #include "ksplice-arch.c"
725 #elif defined CONFIG_X86
726 #include "x86/ksplice-arch.c"
727 #elif defined CONFIG_ARM
728 #include "arm/ksplice-arch.c"
729 #endif /* KSPLICE_STANDALONE */
731 #define clear_list(head, type, member) \
732 do { \
733 struct list_head *_pos, *_n; \
734 list_for_each_safe(_pos, _n, head) { \
735 list_del(_pos); \
736 kfree(list_entry(_pos, type, member)); \
738 } while (0)
741 * init_ksplice_pack() - Initializes a ksplice pack
742 * @pack: The pack to be initialized. All of the public fields of the
743 * pack and its associated data structures should be populated
744 * before this function is called. The values of the private
745 * fields will be ignored.
747 int init_ksplice_pack(struct ksplice_pack *pack)
749 struct update *update;
750 struct ksplice_patch *p;
751 struct ksplice_section *s;
752 int ret = 0;
754 #ifdef KSPLICE_STANDALONE
755 if (!bootstrapped)
756 return -1;
757 #endif /* KSPLICE_STANDALONE */
759 INIT_LIST_HEAD(&pack->temp_labelvals);
760 INIT_LIST_HEAD(&pack->safety_records);
762 sort(pack->helper_relocs,
763 pack->helper_relocs_end - pack->helper_relocs,
764 sizeof(*pack->helper_relocs), compare_relocs, NULL);
765 sort(pack->primary_relocs,
766 pack->primary_relocs_end - pack->primary_relocs,
767 sizeof(*pack->primary_relocs), compare_relocs, NULL);
768 sort(pack->helper_sections,
769 pack->helper_sections_end - pack->helper_sections,
770 sizeof(*pack->helper_sections), compare_section_labels, NULL);
771 #ifdef KSPLICE_STANDALONE
772 sort(pack->primary_system_map,
773 pack->primary_system_map_end - pack->primary_system_map,
774 sizeof(*pack->primary_system_map), compare_system_map, NULL);
775 sort(pack->helper_system_map,
776 pack->helper_system_map_end - pack->helper_system_map,
777 sizeof(*pack->helper_system_map), compare_system_map, NULL);
778 #endif /* KSPLICE_STANDALONE */
780 for (p = pack->patches; p < pack->patches_end; p++)
781 p->vaddr = NULL;
782 for (s = pack->helper_sections; s < pack->helper_sections_end; s++)
783 s->match_map = NULL;
784 for (p = pack->patches; p < pack->patches_end; p++) {
785 const struct ksplice_reloc *r = patch_reloc(pack, p);
786 if (r == NULL)
787 return -ENOENT;
788 if (p->type == KSPLICE_PATCH_DATA) {
789 s = symbol_section(pack, r->symbol);
790 if (s == NULL)
791 return -ENOENT;
792 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
793 * to modify rodata sections that have been explicitly
794 * marked for patching using the ksplice-patch.h macro
795 * ksplice_assume_rodata. Here we modify the section
796 * flags appropriately.
798 if (s->flags & KSPLICE_SECTION_DATA)
799 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
800 KSPLICE_SECTION_RODATA;
804 mutex_lock(&module_mutex);
805 list_for_each_entry(update, &updates, list) {
806 if (strcmp(pack->kid, update->kid) == 0) {
807 if (update->stage != STAGE_PREPARING) {
808 ret = -EPERM;
809 goto out;
811 add_to_update(pack, update);
812 ret = 0;
813 goto out;
816 update = init_ksplice_update(pack->kid);
817 if (update == NULL) {
818 ret = -ENOMEM;
819 goto out;
821 ret = ksplice_sysfs_init(update);
822 if (ret != 0) {
823 cleanup_ksplice_update(update);
824 goto out;
826 add_to_update(pack, update);
827 out:
828 mutex_unlock(&module_mutex);
829 return ret;
831 EXPORT_SYMBOL_GPL(init_ksplice_pack);
834 * cleanup_ksplice_pack() - Cleans up a pack
835 * @pack: The pack to be cleaned up
837 void cleanup_ksplice_pack(struct ksplice_pack *pack)
839 if (pack->update == NULL)
840 return;
842 mutex_lock(&module_mutex);
843 if (pack->update->stage == STAGE_APPLIED) {
844 /* If the pack wasn't actually applied (because we
845 * only applied this update to loaded modules and this
846 * target was not loaded), then unregister the pack
847 * from the list of unused packs.
849 struct ksplice_pack *p;
850 bool found = false;
852 list_for_each_entry(p, &pack->update->unused_packs, list) {
853 if (p == pack)
854 found = true;
856 if (found)
857 list_del(&pack->list);
858 mutex_unlock(&module_mutex);
859 return;
861 list_del(&pack->list);
862 if (pack->update->stage == STAGE_PREPARING)
863 maybe_cleanup_ksplice_update(pack->update);
864 pack->update = NULL;
865 mutex_unlock(&module_mutex);
867 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
869 static struct update *init_ksplice_update(const char *kid)
871 struct update *update;
872 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
873 if (update == NULL)
874 return NULL;
875 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
876 if (update->name == NULL) {
877 kfree(update);
878 return NULL;
880 update->kid = kstrdup(kid, GFP_KERNEL);
881 if (update->kid == NULL) {
882 kfree(update->name);
883 kfree(update);
884 return NULL;
886 if (try_module_get(THIS_MODULE) != 1) {
887 kfree(update->kid);
888 kfree(update->name);
889 kfree(update);
890 return NULL;
892 INIT_LIST_HEAD(&update->packs);
893 INIT_LIST_HEAD(&update->unused_packs);
894 INIT_LIST_HEAD(&update->ksplice_module_list);
895 if (init_debug_buf(update) != OK) {
896 module_put(THIS_MODULE);
897 kfree(update->kid);
898 kfree(update->name);
899 kfree(update);
900 return NULL;
902 list_add(&update->list, &updates);
903 update->stage = STAGE_PREPARING;
904 update->abort_cause = OK;
905 update->partial = 0;
906 INIT_LIST_HEAD(&update->conflicts);
907 return update;
910 static void cleanup_ksplice_update(struct update *update)
912 list_del(&update->list);
913 cleanup_conflicts(update);
914 clear_debug_buf(update);
915 cleanup_module_list_entries(update);
916 kfree(update->kid);
917 kfree(update->name);
918 kfree(update);
919 module_put(THIS_MODULE);
922 /* Clean up the update if it no longer has any packs */
923 static void maybe_cleanup_ksplice_update(struct update *update)
925 if (list_empty(&update->packs) && list_empty(&update->unused_packs))
926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
927 kobject_put(&update->kobj);
928 #else /* LINUX_VERSION_CODE < */
929 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
930 kobject_unregister(&update->kobj);
931 #endif /* LINUX_VERSION_CODE */
934 static void add_to_update(struct ksplice_pack *pack, struct update *update)
936 pack->update = update;
937 list_add(&pack->list, &update->unused_packs);
940 static int ksplice_sysfs_init(struct update *update)
942 int ret = 0;
943 memset(&update->kobj, 0, sizeof(update->kobj));
944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
945 #ifndef KSPLICE_STANDALONE
946 ret = kobject_init_and_add(&update->kobj, &update_ktype,
947 ksplice_kobj, "%s", update->kid);
948 #else /* KSPLICE_STANDALONE */
949 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
950 ret = kobject_init_and_add(&update->kobj, &update_ktype,
951 &THIS_MODULE->mkobj.kobj, "ksplice");
952 #endif /* KSPLICE_STANDALONE */
953 #else /* LINUX_VERSION_CODE < */
954 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
955 if (ret != 0)
956 return ret;
957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
958 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
959 #else /* LINUX_VERSION_CODE < */
960 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
961 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
962 #endif /* LINUX_VERSION_CODE */
963 update->kobj.ktype = &update_ktype;
964 ret = kobject_register(&update->kobj);
965 #endif /* LINUX_VERSION_CODE */
966 if (ret != 0)
967 return ret;
968 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
969 kobject_uevent(&update->kobj, KOBJ_ADD);
970 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
971 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
972 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
973 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
974 #endif /* LINUX_VERSION_CODE */
975 return 0;
978 static abort_t apply_update(struct update *update)
980 struct ksplice_pack *pack, *n;
981 abort_t ret;
982 int retval;
984 list_for_each_entry(pack, &update->packs, list) {
985 ret = create_module_list_entry(pack, true);
986 if (ret != OK)
987 goto out;
990 list_for_each_entry_safe(pack, n, &update->unused_packs, list) {
991 if (strcmp(pack->target_name, "vmlinux") == 0) {
992 pack->target = NULL;
993 } else if (pack->target == NULL) {
994 pack->target = find_module(pack->target_name);
995 if (pack->target == NULL ||
996 !module_is_live(pack->target)) {
997 if (!update->partial) {
998 ret = TARGET_NOT_LOADED;
999 goto out;
1001 ret = create_module_list_entry(pack, false);
1002 if (ret != OK)
1003 goto out;
1004 continue;
1006 retval = use_module(pack->primary, pack->target);
1007 if (retval != 1) {
1008 ret = UNEXPECTED;
1009 goto out;
1012 ret = create_module_list_entry(pack, true);
1013 if (ret != OK)
1014 goto out;
1015 list_del(&pack->list);
1016 list_add_tail(&pack->list, &update->packs);
1018 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1019 if (pack->target == NULL) {
1020 apply_paravirt(pack->primary_parainstructions,
1021 pack->primary_parainstructions_end);
1022 apply_paravirt(pack->helper_parainstructions,
1023 pack->helper_parainstructions_end);
1025 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1028 list_for_each_entry(pack, &update->packs, list) {
1029 const struct ksplice_section *sect;
1030 for (sect = pack->primary_sections;
1031 sect < pack->primary_sections_end; sect++) {
1032 struct safety_record *rec = kmalloc(sizeof(*rec),
1033 GFP_KERNEL);
1034 if (rec == NULL) {
1035 ret = OUT_OF_MEMORY;
1036 goto out;
1038 rec->addr = sect->address;
1039 rec->size = sect->size;
1040 rec->label = sect->symbol->label;
1041 list_add(&rec->list, &pack->safety_records);
1045 list_for_each_entry(pack, &update->packs, list) {
1046 ret = init_symbol_arrays(pack);
1047 if (ret != OK) {
1048 cleanup_symbol_arrays(pack);
1049 goto out;
1051 ret = prepare_pack(pack);
1052 cleanup_symbol_arrays(pack);
1053 if (ret != OK)
1054 goto out;
1056 ret = apply_patches(update);
1057 out:
1058 list_for_each_entry(pack, &update->packs, list) {
1059 struct ksplice_section *s;
1060 if (update->stage == STAGE_PREPARING)
1061 clear_list(&pack->safety_records, struct safety_record,
1062 list);
1063 for (s = pack->helper_sections; s < pack->helper_sections_end;
1064 s++) {
1065 if (s->match_map != NULL) {
1066 vfree(s->match_map);
1067 s->match_map = NULL;
1071 if (update->stage == STAGE_PREPARING)
1072 cleanup_module_list_entries(update);
1073 return ret;
1076 static int compare_symbolp_names(const void *a, const void *b)
1078 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1079 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1080 return 0;
1081 if ((*sympa)->name == NULL)
1082 return -1;
1083 if ((*sympb)->name == NULL)
1084 return 1;
1085 return strcmp((*sympa)->name, (*sympb)->name);
1088 static int compare_symbolp_labels(const void *a, const void *b)
1090 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1091 return strcmp((*sympa)->label, (*sympb)->label);
1094 static int symbolp_bsearch_compare(const void *key, const void *elt)
1096 const char *name = key;
1097 const struct ksplice_symbol *const *symp = elt;
1098 const struct ksplice_symbol *sym = *symp;
1099 if (sym->name == NULL)
1100 return 1;
1101 return strcmp(name, sym->name);
1104 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1105 const char *sym_name, unsigned long sym_val)
1107 struct ksplice_symbol **symp;
1108 abort_t ret;
1110 symp = bsearch(sym_name, lookup->arr, lookup->size,
1111 sizeof(*lookup->arr), symbolp_bsearch_compare);
1112 if (symp == NULL)
1113 return OK;
1115 while (symp > lookup->arr &&
1116 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1117 symp--;
1119 for (; symp < lookup->arr + lookup->size; symp++) {
1120 struct ksplice_symbol *sym = *symp;
1121 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1122 break;
1123 ret = add_candidate_val(lookup->pack, sym->vals, sym_val);
1124 if (ret != OK)
1125 return ret;
1127 return OK;
1130 #ifdef CONFIG_KALLSYMS
1131 static int add_kallsyms_values(void *data, const char *name,
1132 struct module *owner, unsigned long val)
1134 struct ksplice_lookup *lookup = data;
1135 if (owner == lookup->pack->primary ||
1136 !patches_module(owner, lookup->pack->target))
1137 return (__force int)OK;
1138 return (__force int)add_matching_values(lookup, name, val);
1140 #endif /* CONFIG_KALLSYMS */
1142 static bool add_export_values(const struct symsearch *syms,
1143 struct module *owner,
1144 unsigned int symnum, void *data)
1146 struct ksplice_lookup *lookup = data;
1147 abort_t ret;
1149 ret = add_matching_values(lookup, syms->start[symnum].name,
1150 syms->start[symnum].value);
1151 if (ret != OK) {
1152 lookup->ret = ret;
1153 return true;
1155 return false;
1158 static void cleanup_symbol_arrays(struct ksplice_pack *pack)
1160 struct ksplice_symbol *sym;
1161 for (sym = pack->primary_symbols; sym < pack->primary_symbols_end;
1162 sym++) {
1163 if (sym->vals != NULL) {
1164 clear_list(sym->vals, struct candidate_val, list);
1165 kfree(sym->vals);
1166 sym->vals = NULL;
1169 for (sym = pack->helper_symbols; sym < pack->helper_symbols_end; sym++) {
1170 if (sym->vals != NULL) {
1171 clear_list(sym->vals, struct candidate_val, list);
1172 kfree(sym->vals);
1173 sym->vals = NULL;
1179 * The primary and helper modules each have their own independent
1180 * ksplice_symbol structures. uniquify_symbols unifies these separate
1181 * pieces of kernel symbol information by replacing all references to
1182 * the helper copy of symbols with references to the primary copy.
1184 static abort_t uniquify_symbols(struct ksplice_pack *pack)
1186 struct ksplice_reloc *r;
1187 struct ksplice_section *s;
1188 struct ksplice_symbol *sym, **sym_arr, **symp;
1189 size_t size = pack->primary_symbols_end - pack->primary_symbols;
1191 if (size == 0)
1192 return OK;
1194 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1195 if (sym_arr == NULL)
1196 return OUT_OF_MEMORY;
1198 for (symp = sym_arr, sym = pack->primary_symbols;
1199 symp < sym_arr + size && sym < pack->primary_symbols_end;
1200 sym++, symp++)
1201 *symp = sym;
1203 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1205 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1206 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1207 compare_symbolp_labels);
1208 if (symp != NULL) {
1209 if ((*symp)->name == NULL)
1210 (*symp)->name = r->symbol->name;
1211 r->symbol = *symp;
1215 for (s = pack->helper_sections; s < pack->helper_sections_end; s++) {
1216 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1217 compare_symbolp_labels);
1218 if (symp != NULL) {
1219 if ((*symp)->name == NULL)
1220 (*symp)->name = s->symbol->name;
1221 s->symbol = *symp;
1225 vfree(sym_arr);
1226 return OK;
1230 * Initialize the ksplice_symbol structures in the given array using
1231 * the kallsyms and exported symbol tables.
1233 static abort_t init_symbol_array(struct ksplice_pack *pack,
1234 struct ksplice_symbol *start,
1235 struct ksplice_symbol *end)
1237 struct ksplice_symbol *sym, **sym_arr, **symp;
1238 struct ksplice_lookup lookup;
1239 size_t size = end - start;
1240 abort_t ret;
1242 if (size == 0)
1243 return OK;
1245 for (sym = start; sym < end; sym++) {
1246 if (starts_with(sym->label, "__ksymtab")) {
1247 const struct kernel_symbol *ksym;
1248 const char *colon = strchr(sym->label, ':');
1249 const char *name = colon + 1;
1250 if (colon == NULL)
1251 continue;
1252 ksym = find_symbol(name, NULL, NULL, true, false);
1253 if (ksym == NULL) {
1254 ksdebug(pack, "Could not find kernel_symbol "
1255 "structure for %s\n", name);
1256 continue;
1258 sym->value = (unsigned long)ksym;
1259 sym->vals = NULL;
1260 continue;
1263 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1264 if (sym->vals == NULL)
1265 return OUT_OF_MEMORY;
1266 INIT_LIST_HEAD(sym->vals);
1267 sym->value = 0;
1270 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1271 if (sym_arr == NULL)
1272 return OUT_OF_MEMORY;
1274 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1275 sym++, symp++)
1276 *symp = sym;
1278 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1280 lookup.pack = pack;
1281 lookup.arr = sym_arr;
1282 lookup.size = size;
1283 lookup.ret = OK;
1285 each_symbol(add_export_values, &lookup);
1286 ret = lookup.ret;
1287 #ifdef CONFIG_KALLSYMS
1288 if (ret == OK)
1289 ret = (__force abort_t)
1290 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1291 #endif /* CONFIG_KALLSYMS */
1292 vfree(sym_arr);
1293 return ret;
1296 /* Prepare the pack's ksplice_symbol structures for run-pre matching */
1297 static abort_t init_symbol_arrays(struct ksplice_pack *pack)
1299 abort_t ret;
1301 ret = uniquify_symbols(pack);
1302 if (ret != OK)
1303 return ret;
1305 ret = init_symbol_array(pack, pack->helper_symbols,
1306 pack->helper_symbols_end);
1307 if (ret != OK)
1308 return ret;
1310 ret = init_symbol_array(pack, pack->primary_symbols,
1311 pack->primary_symbols_end);
1312 if (ret != OK)
1313 return ret;
1315 return OK;
1318 static abort_t prepare_pack(struct ksplice_pack *pack)
1320 abort_t ret;
1322 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1323 ret = match_pack_sections(pack, false);
1324 if (ret == NO_MATCH) {
1325 /* It is possible that by using relocations from .data sections
1326 * we can successfully run-pre match the rest of the sections.
1327 * To avoid using any symbols obtained from .data sections
1328 * (which may be unreliable) in the post code, we first prepare
1329 * the post code and then try to run-pre match the remaining
1330 * sections with the help of .data sections.
1332 ksdebug(pack, "Continuing without some sections; we might "
1333 "find them later.\n");
1334 ret = finalize_pack(pack);
1335 if (ret != OK) {
1336 ksdebug(pack, "Aborted. Unable to continue without "
1337 "the unmatched sections.\n");
1338 return ret;
1341 ksdebug(pack, "run-pre: Considering .data sections to find the "
1342 "unmatched sections\n");
1343 ret = match_pack_sections(pack, true);
1344 if (ret != OK)
1345 return ret;
1347 ksdebug(pack, "run-pre: Found all previously unmatched "
1348 "sections\n");
1349 return OK;
1350 } else if (ret != OK) {
1351 return ret;
1354 return finalize_pack(pack);
1358 * Finish preparing the pack for insertion into the kernel.
1359 * Afterwards, the replacement code should be ready to run and the
1360 * ksplice_patches should all be ready for trampoline insertion.
1362 static abort_t finalize_pack(struct ksplice_pack *pack)
1364 abort_t ret;
1365 ret = apply_relocs(pack, pack->primary_relocs,
1366 pack->primary_relocs_end);
1367 if (ret != OK)
1368 return ret;
1370 ret = finalize_patches(pack);
1371 if (ret != OK)
1372 return ret;
1374 return OK;
1377 static abort_t finalize_patches(struct ksplice_pack *pack)
1379 struct ksplice_patch *p;
1380 struct safety_record *rec;
1381 abort_t ret;
1383 for (p = pack->patches; p < pack->patches_end; p++) {
1384 bool found = false;
1385 list_for_each_entry(rec, &pack->safety_records, list) {
1386 if (rec->addr <= p->oldaddr &&
1387 p->oldaddr < rec->addr + rec->size) {
1388 found = true;
1389 break;
1392 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1393 const struct ksplice_reloc *r = patch_reloc(pack, p);
1394 if (r == NULL) {
1395 ksdebug(pack, "A patch with no ksplice_reloc at"
1396 " its oldaddr has no safety record\n");
1397 return NO_MATCH;
1399 ksdebug(pack, "No safety record for patch with oldaddr "
1400 "%s+%lx\n", r->symbol->label, r->target_addend);
1401 return NO_MATCH;
1404 if (p->type == KSPLICE_PATCH_TEXT) {
1405 ret = prepare_trampoline(pack, p);
1406 if (ret != OK)
1407 return ret;
1410 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1411 ksdebug(pack, "Safety record %s is too short for "
1412 "patch\n", rec->label);
1413 return UNEXPECTED;
1416 if (p->type == KSPLICE_PATCH_TEXT) {
1417 if (p->repladdr == 0)
1418 p->repladdr = (unsigned long)ksplice_deleted;
1421 return OK;
1424 static abort_t map_trampoline_pages(struct update *update)
1426 struct ksplice_pack *pack;
1427 list_for_each_entry(pack, &update->packs, list) {
1428 struct ksplice_patch *p;
1429 for (p = pack->patches; p < pack->patches_end; p++) {
1430 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1431 if (p->vaddr == NULL) {
1432 ksdebug(pack, "Unable to map oldaddr read/write"
1433 "\n");
1434 unmap_trampoline_pages(update);
1435 return UNEXPECTED;
1439 return OK;
1442 static void unmap_trampoline_pages(struct update *update)
1444 struct ksplice_pack *pack;
1445 list_for_each_entry(pack, &update->packs, list) {
1446 struct ksplice_patch *p;
1447 for (p = pack->patches; p < pack->patches_end; p++) {
1448 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1449 p->vaddr = NULL;
1455 * map_writable creates a shadow page mapping of the range
1456 * [addr, addr + len) so that we can write to code mapped read-only.
1458 * It is similar to a generalized version of x86's text_poke. But
1459 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1460 * map_writable to map the pages before stop_machine, then use the
1461 * mapping inside stop_machine, and unmap the pages afterwards.
1463 static void *map_writable(void *addr, size_t len)
1465 void *vaddr;
1466 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1467 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1468 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1469 int i;
1471 if (pages == NULL)
1472 return NULL;
1474 for (i = 0; i < nr_pages; i++) {
1475 if (__module_text_address((unsigned long)page_addr) == NULL &&
1476 __module_data_address((unsigned long)page_addr) == NULL) {
1477 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1478 pages[i] = virt_to_page(page_addr);
1479 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1480 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1481 pages[i] =
1482 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1483 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1484 WARN_ON(!PageReserved(pages[i]));
1485 } else {
1486 pages[i] = vmalloc_to_page(addr);
1488 if (pages[i] == NULL) {
1489 kfree(pages);
1490 return NULL;
1492 page_addr += PAGE_SIZE;
1494 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1495 kfree(pages);
1496 if (vaddr == NULL)
1497 return NULL;
1498 return vaddr + offset_in_page(addr);
1502 * Ksplice adds a dependency on any symbol address used to resolve relocations
1503 * in the primary module.
1505 * Be careful to follow_trampolines so that we always depend on the
1506 * latest version of the target function, since that's the code that
1507 * will run if we call addr.
1509 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
1510 unsigned long addr)
1512 struct ksplice_pack *p;
1513 struct module *m =
1514 __module_text_address(follow_trampolines(pack, addr));
1515 if (m == NULL)
1516 return OK;
1517 list_for_each_entry(p, &pack->update->packs, list) {
1518 if (m == p->primary)
1519 return OK;
1521 if (use_module(pack->primary, m) != 1)
1522 return MODULE_BUSY;
1523 return OK;
1526 static abort_t apply_relocs(struct ksplice_pack *pack,
1527 const struct ksplice_reloc *relocs,
1528 const struct ksplice_reloc *relocs_end)
1530 const struct ksplice_reloc *r;
1531 for (r = relocs; r < relocs_end; r++) {
1532 abort_t ret = apply_reloc(pack, r);
1533 if (ret != OK)
1534 return ret;
1536 return OK;
1539 static abort_t apply_reloc(struct ksplice_pack *pack,
1540 const struct ksplice_reloc *r)
1542 switch (r->howto->type) {
1543 case KSPLICE_HOWTO_RELOC:
1544 case KSPLICE_HOWTO_RELOC_PATCH:
1545 return apply_howto_reloc(pack, r);
1546 case KSPLICE_HOWTO_DATE:
1547 case KSPLICE_HOWTO_TIME:
1548 return apply_howto_date(pack, r);
1549 default:
1550 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
1551 return UNEXPECTED;
1556 * Applies a relocation. Aborts if the symbol referenced in it has
1557 * not been uniquely resolved.
1559 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
1560 const struct ksplice_reloc *r)
1562 abort_t ret;
1563 int canary_ret;
1564 unsigned long sym_addr;
1565 LIST_HEAD(vals);
1567 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
1568 if (canary_ret < 0)
1569 return UNEXPECTED;
1570 if (canary_ret == 0) {
1571 ksdebug(pack, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1572 r->blank_addr, r->symbol->label, r->target_addend);
1573 return OK;
1576 #ifdef KSPLICE_STANDALONE
1577 if (!bootstrapped) {
1578 ret = add_system_map_candidates(pack,
1579 pack->primary_system_map,
1580 pack->primary_system_map_end,
1581 r->symbol->label, &vals);
1582 if (ret != OK) {
1583 release_vals(&vals);
1584 return ret;
1587 #endif /* KSPLICE_STANDALONE */
1588 ret = lookup_symbol(pack, r->symbol, &vals);
1589 if (ret != OK) {
1590 release_vals(&vals);
1591 return ret;
1594 * Relocations for the oldaddr fields of patches must have
1595 * been resolved via run-pre matching.
1597 if (!singular(&vals) || (r->symbol->vals != NULL &&
1598 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1599 release_vals(&vals);
1600 ksdebug(pack, "Failed to find %s for reloc\n",
1601 r->symbol->label);
1602 return FAILED_TO_FIND;
1604 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1605 release_vals(&vals);
1607 ret = write_reloc_value(pack, r, r->blank_addr,
1608 r->howto->pcrel ? sym_addr - r->blank_addr :
1609 sym_addr);
1610 if (ret != OK)
1611 return ret;
1613 ksdebug(pack, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1614 r->symbol->label, r->target_addend, sym_addr);
1615 switch (r->howto->size) {
1616 case 1:
1617 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1618 break;
1619 case 2:
1620 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1621 break;
1622 case 4:
1623 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1624 break;
1625 #if BITS_PER_LONG >= 64
1626 case 8:
1627 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1628 break;
1629 #endif /* BITS_PER_LONG */
1630 default:
1631 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1632 return UNEXPECTED;
1634 #ifdef KSPLICE_STANDALONE
1635 if (!bootstrapped)
1636 return OK;
1637 #endif /* KSPLICE_STANDALONE */
1640 * Create labelvals so that we can verify our choices in the
1641 * second round of run-pre matching that considers data sections.
1643 ret = create_labelval(pack, r->symbol, sym_addr, VAL);
1644 if (ret != OK)
1645 return ret;
1647 return add_dependency_on_address(pack, sym_addr);
1651 * Date relocations are created wherever __DATE__ or __TIME__ is used
1652 * in the kernel; we resolve them by simply copying in the date/time
1653 * obtained from run-pre matching the relevant compilation unit.
1655 static abort_t apply_howto_date(struct ksplice_pack *pack,
1656 const struct ksplice_reloc *r)
1658 if (r->symbol->vals != NULL) {
1659 ksdebug(pack, "Failed to find %s for date\n", r->symbol->label);
1660 return FAILED_TO_FIND;
1662 memcpy((unsigned char *)r->blank_addr,
1663 (const unsigned char *)r->symbol->value, r->howto->size);
1664 return OK;
1668 * Given a relocation and its run address, compute the address of the
1669 * symbol the relocation referenced, and store it in *valp.
1671 static abort_t read_reloc_value(struct ksplice_pack *pack,
1672 const struct ksplice_reloc *r,
1673 unsigned long addr, unsigned long *valp)
1675 unsigned char bytes[sizeof(long)];
1676 unsigned long val;
1677 const struct ksplice_reloc_howto *howto = r->howto;
1679 if (howto->size <= 0 || howto->size > sizeof(long)) {
1680 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1681 return UNEXPECTED;
1684 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1685 return NO_MATCH;
1687 switch (howto->size) {
1688 case 1:
1689 val = *(uint8_t *)bytes;
1690 break;
1691 case 2:
1692 val = *(uint16_t *)bytes;
1693 break;
1694 case 4:
1695 val = *(uint32_t *)bytes;
1696 break;
1697 #if BITS_PER_LONG >= 64
1698 case 8:
1699 val = *(uint64_t *)bytes;
1700 break;
1701 #endif /* BITS_PER_LONG */
1702 default:
1703 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1704 return UNEXPECTED;
1707 val &= howto->dst_mask;
1708 if (howto->signed_addend)
1709 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1710 val <<= howto->rightshift;
1711 val -= r->insn_addend + r->target_addend;
1712 *valp = val;
1713 return OK;
1717 * Given a relocation, the address of its storage unit, and the
1718 * address of the symbol the relocation references, write the
1719 * relocation's final value into the storage unit.
1721 static abort_t write_reloc_value(struct ksplice_pack *pack,
1722 const struct ksplice_reloc *r,
1723 unsigned long addr, unsigned long sym_addr)
1725 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1726 const struct ksplice_reloc_howto *howto = r->howto;
1727 val >>= howto->rightshift;
1728 switch (howto->size) {
1729 case 1:
1730 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1731 (val & howto->dst_mask);
1732 break;
1733 case 2:
1734 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1735 (val & howto->dst_mask);
1736 break;
1737 case 4:
1738 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1739 (val & howto->dst_mask);
1740 break;
1741 #if BITS_PER_LONG >= 64
1742 case 8:
1743 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1744 (val & howto->dst_mask);
1745 break;
1746 #endif /* BITS_PER_LONG */
1747 default:
1748 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1749 return UNEXPECTED;
1752 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1753 ksdebug(pack, "Aborted. Relocation overflow.\n");
1754 return UNEXPECTED;
1757 return OK;
1760 static abort_t create_module_list_entry(struct ksplice_pack *pack,
1761 bool to_be_applied)
1763 struct ksplice_module_list_entry *entry =
1764 kmalloc(sizeof(*entry), GFP_KERNEL);
1765 if (entry == NULL)
1766 return OUT_OF_MEMORY;
1767 entry->primary_name = kstrdup(pack->primary->name, GFP_KERNEL);
1768 if (entry->primary_name == NULL) {
1769 kfree(entry);
1770 return OUT_OF_MEMORY;
1772 entry->target_name = kstrdup(pack->target_name, GFP_KERNEL);
1773 if (entry->target_name == NULL) {
1774 kfree(entry->primary_name);
1775 kfree(entry);
1776 return OUT_OF_MEMORY;
1778 /* The update's kid is guaranteed to outlast the module_list_entry */
1779 entry->kid = pack->update->kid;
1780 entry->applied = to_be_applied;
1781 list_add(&entry->update_list, &pack->update->ksplice_module_list);
1782 return OK;
1785 static void cleanup_module_list_entries(struct update *update)
1787 struct ksplice_module_list_entry *entry;
1788 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1789 kfree(entry->target_name);
1790 kfree(entry->primary_name);
1792 clear_list(&update->ksplice_module_list,
1793 struct ksplice_module_list_entry, update_list);
1796 /* Replacement address used for functions deleted by the patch */
1797 static void __attribute__((noreturn)) ksplice_deleted(void)
1799 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1800 BUG();
1801 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1802 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1803 for (;;);
1804 #endif
1807 /* Floodfill to run-pre match the sections within a pack. */
1808 static abort_t match_pack_sections(struct ksplice_pack *pack,
1809 bool consider_data_sections)
1811 struct ksplice_section *sect;
1812 abort_t ret;
1813 int remaining = 0;
1814 bool progress;
1816 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1817 sect++) {
1818 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1819 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1820 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1821 remaining++;
1824 while (remaining > 0) {
1825 progress = false;
1826 for (sect = pack->helper_sections;
1827 sect < pack->helper_sections_end; sect++) {
1828 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1829 continue;
1830 if ((!consider_data_sections &&
1831 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1832 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1833 continue;
1834 ret = find_section(pack, sect);
1835 if (ret == OK) {
1836 sect->flags |= KSPLICE_SECTION_MATCHED;
1837 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1838 remaining--;
1839 progress = true;
1840 } else if (ret != NO_MATCH) {
1841 return ret;
1845 if (progress)
1846 continue;
1848 for (sect = pack->helper_sections;
1849 sect < pack->helper_sections_end; sect++) {
1850 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1851 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1852 continue;
1853 ksdebug(pack, "run-pre: could not match %s "
1854 "section %s\n",
1855 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1856 "data" :
1857 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1858 "rodata" : "text", sect->symbol->label);
1860 ksdebug(pack, "Aborted. run-pre: could not match some "
1861 "sections.\n");
1862 return NO_MATCH;
1864 return OK;
1868 * Search for the section in the running kernel. Returns OK if and
1869 * only if it finds precisely one address in the kernel matching the
1870 * section.
1872 static abort_t find_section(struct ksplice_pack *pack,
1873 struct ksplice_section *sect)
1875 int i;
1876 abort_t ret;
1877 unsigned long run_addr;
1878 LIST_HEAD(vals);
1879 struct candidate_val *v, *n;
1881 #ifdef KSPLICE_STANDALONE
1882 ret = add_system_map_candidates(pack, pack->helper_system_map,
1883 pack->helper_system_map_end,
1884 sect->symbol->label, &vals);
1885 if (ret != OK) {
1886 release_vals(&vals);
1887 return ret;
1889 #endif /* KSPLICE_STANDALONE */
1890 ret = lookup_symbol(pack, sect->symbol, &vals);
1891 if (ret != OK) {
1892 release_vals(&vals);
1893 return ret;
1896 ksdebug(pack, "run-pre: starting sect search for %s\n",
1897 sect->symbol->label);
1899 list_for_each_entry_safe(v, n, &vals, list) {
1900 run_addr = v->val;
1902 yield();
1903 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1904 if (ret == NO_MATCH) {
1905 list_del(&v->list);
1906 kfree(v);
1907 } else if (ret != OK) {
1908 release_vals(&vals);
1909 return ret;
1913 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1914 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1915 ret = brute_search_all(pack, sect, &vals);
1916 if (ret != OK) {
1917 release_vals(&vals);
1918 return ret;
1921 * Make sure run-pre matching output is displayed if
1922 * brute_search succeeds.
1924 if (singular(&vals)) {
1925 run_addr = list_entry(vals.next, struct candidate_val,
1926 list)->val;
1927 ret = try_addr(pack, sect, run_addr, NULL,
1928 RUN_PRE_INITIAL);
1929 if (ret != OK) {
1930 ksdebug(pack, "run-pre: Debug run failed for "
1931 "sect %s:\n", sect->symbol->label);
1932 release_vals(&vals);
1933 return ret;
1937 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1939 if (singular(&vals)) {
1940 LIST_HEAD(safety_records);
1941 run_addr = list_entry(vals.next, struct candidate_val,
1942 list)->val;
1943 ret = try_addr(pack, sect, run_addr, &safety_records,
1944 RUN_PRE_FINAL);
1945 release_vals(&vals);
1946 if (ret != OK) {
1947 clear_list(&safety_records, struct safety_record, list);
1948 ksdebug(pack, "run-pre: Final run failed for sect "
1949 "%s:\n", sect->symbol->label);
1950 } else {
1951 list_splice(&safety_records, &pack->safety_records);
1953 return ret;
1954 } else if (!list_empty(&vals)) {
1955 struct candidate_val *val;
1956 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1957 sect->symbol->label);
1958 i = 0;
1959 list_for_each_entry(val, &vals, list) {
1960 i++;
1961 ksdebug(pack, "%lx\n", val->val);
1962 if (i > 5) {
1963 ksdebug(pack, "...\n");
1964 break;
1967 release_vals(&vals);
1968 return NO_MATCH;
1970 release_vals(&vals);
1971 return NO_MATCH;
1975 * try_addr is the the interface to run-pre matching. Its primary
1976 * purpose is to manage debugging information for run-pre matching;
1977 * all the hard work is in run_pre_cmp.
1979 static abort_t try_addr(struct ksplice_pack *pack,
1980 struct ksplice_section *sect,
1981 unsigned long run_addr,
1982 struct list_head *safety_records,
1983 enum run_pre_mode mode)
1985 abort_t ret;
1986 const struct module *run_module;
1988 if ((sect->flags & KSPLICE_SECTION_RODATA) != 0 ||
1989 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1990 run_module = __module_data_address(run_addr);
1991 else
1992 run_module = __module_text_address(run_addr);
1993 if (run_module == pack->primary) {
1994 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1995 "module %s for sect %s\n", run_addr, run_module->name,
1996 sect->symbol->label);
1997 return UNEXPECTED;
1999 if (!patches_module(run_module, pack->target)) {
2000 ksdebug(pack, "run-pre: ignoring address %lx in other module "
2001 "%s for sect %s\n", run_addr, run_module == NULL ?
2002 "vmlinux" : run_module->name, sect->symbol->label);
2003 return NO_MATCH;
2006 ret = create_labelval(pack, sect->symbol, run_addr, TEMP);
2007 if (ret != OK)
2008 return ret;
2010 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2011 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
2012 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2013 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2014 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
2015 mode);
2016 else
2017 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
2018 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2019 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2020 set_temp_labelvals(pack, NOVAL);
2021 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
2022 "p_a=%lx s=%lx)\n",
2023 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2024 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2025 "text", sect->symbol->label, run_addr, sect->address,
2026 sect->size);
2027 ksdebug(pack, "run-pre: ");
2028 if (pack->update->debug >= 1) {
2029 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2030 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
2031 RUN_PRE_DEBUG);
2032 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2033 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2034 ret = arch_run_pre_cmp(pack, sect, run_addr,
2035 safety_records,
2036 RUN_PRE_DEBUG);
2037 else
2038 ret = run_pre_cmp(pack, sect, run_addr,
2039 safety_records,
2040 RUN_PRE_DEBUG);
2041 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2042 set_temp_labelvals(pack, NOVAL);
2044 ksdebug(pack, "\n");
2045 return ret;
2046 } else if (ret != OK) {
2047 set_temp_labelvals(pack, NOVAL);
2048 return ret;
2051 if (mode != RUN_PRE_FINAL) {
2052 set_temp_labelvals(pack, NOVAL);
2053 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
2054 sect->symbol->label, run_addr);
2055 return OK;
2058 set_temp_labelvals(pack, VAL);
2059 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2060 run_addr);
2061 return OK;
2065 * run_pre_cmp is the primary run-pre matching function; it determines
2066 * whether the given ksplice_section matches the code or data in the
2067 * running kernel starting at run_addr.
2069 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2070 * section is created.
2072 * The run_pre_mode is also used to determine what debugging
2073 * information to display.
2075 static abort_t run_pre_cmp(struct ksplice_pack *pack,
2076 const struct ksplice_section *sect,
2077 unsigned long run_addr,
2078 struct list_head *safety_records,
2079 enum run_pre_mode mode)
2081 int matched = 0;
2082 abort_t ret;
2083 const struct ksplice_reloc *r, *finger;
2084 const unsigned char *pre, *run, *pre_start, *run_start;
2085 unsigned char runval;
2087 pre_start = (const unsigned char *)sect->address;
2088 run_start = (const unsigned char *)run_addr;
2090 finger = init_reloc_search(pack, sect);
2092 pre = pre_start;
2093 run = run_start;
2094 while (pre < pre_start + sect->size) {
2095 unsigned long offset = pre - pre_start;
2096 ret = lookup_reloc(pack, &finger, (unsigned long)pre, &r);
2097 if (ret == OK) {
2098 ret = handle_reloc(pack, sect, r, (unsigned long)run,
2099 mode);
2100 if (ret != OK) {
2101 if (mode == RUN_PRE_INITIAL)
2102 ksdebug(pack, "reloc in sect does not "
2103 "match after %lx/%lx bytes\n",
2104 offset, sect->size);
2105 return ret;
2107 if (mode == RUN_PRE_DEBUG)
2108 print_bytes(pack, run, r->howto->size, pre,
2109 r->howto->size);
2110 pre += r->howto->size;
2111 run += r->howto->size;
2112 finger++;
2113 continue;
2114 } else if (ret != NO_MATCH) {
2115 return ret;
2118 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2119 ret = handle_paravirt(pack, (unsigned long)pre,
2120 (unsigned long)run, &matched);
2121 if (ret != OK)
2122 return ret;
2123 if (matched != 0) {
2124 if (mode == RUN_PRE_DEBUG)
2125 print_bytes(pack, run, matched, pre,
2126 matched);
2127 pre += matched;
2128 run += matched;
2129 continue;
2133 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2134 if (mode == RUN_PRE_INITIAL)
2135 ksdebug(pack, "sect unmapped after %lx/%lx "
2136 "bytes\n", offset, sect->size);
2137 return NO_MATCH;
2140 if (runval != *pre &&
2141 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2142 if (mode == RUN_PRE_INITIAL)
2143 ksdebug(pack, "sect does not match after "
2144 "%lx/%lx bytes\n", offset, sect->size);
2145 if (mode == RUN_PRE_DEBUG) {
2146 print_bytes(pack, run, 1, pre, 1);
2147 ksdebug(pack, "[p_o=%lx] ! ", offset);
2148 print_bytes(pack, run + 1, 2, pre + 1, 2);
2150 return NO_MATCH;
2152 if (mode == RUN_PRE_DEBUG)
2153 print_bytes(pack, run, 1, pre, 1);
2154 pre++;
2155 run++;
2157 return create_safety_record(pack, sect, safety_records, run_addr,
2158 run - run_start);
2161 static void print_bytes(struct ksplice_pack *pack,
2162 const unsigned char *run, int runc,
2163 const unsigned char *pre, int prec)
2165 int o;
2166 int matched = min(runc, prec);
2167 for (o = 0; o < matched; o++) {
2168 if (run[o] == pre[o])
2169 ksdebug(pack, "%02x ", run[o]);
2170 else
2171 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
2173 for (o = matched; o < runc; o++)
2174 ksdebug(pack, "%02x/ ", run[o]);
2175 for (o = matched; o < prec; o++)
2176 ksdebug(pack, "/%02x ", pre[o]);
2179 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2180 static abort_t brute_search(struct ksplice_pack *pack,
2181 struct ksplice_section *sect,
2182 const void *start, unsigned long len,
2183 struct list_head *vals)
2185 unsigned long addr;
2186 char run, pre;
2187 abort_t ret;
2189 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2190 addr++) {
2191 if (addr % 100000 == 0)
2192 yield();
2194 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2195 return OK;
2197 pre = *(const unsigned char *)(sect->address);
2199 if (run != pre)
2200 continue;
2202 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
2203 if (ret == OK) {
2204 ret = add_candidate_val(pack, vals, addr);
2205 if (ret != OK)
2206 return ret;
2207 } else if (ret != NO_MATCH) {
2208 return ret;
2212 return OK;
2215 static abort_t brute_search_all(struct ksplice_pack *pack,
2216 struct ksplice_section *sect,
2217 struct list_head *vals)
2219 struct module *m;
2220 abort_t ret = OK;
2221 int saved_debug;
2223 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
2224 saved_debug = pack->update->debug;
2225 pack->update->debug = 0;
2227 list_for_each_entry(m, &modules, list) {
2228 if (!patches_module(m, pack->target) || m == pack->primary)
2229 continue;
2230 ret = brute_search(pack, sect, m->module_core, m->core_size,
2231 vals);
2232 if (ret != OK)
2233 goto out;
2234 ret = brute_search(pack, sect, m->module_init, m->init_size,
2235 vals);
2236 if (ret != OK)
2237 goto out;
2240 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
2241 init_mm.end_code - init_mm.start_code, vals);
2243 out:
2244 pack->update->debug = saved_debug;
2245 return ret;
2247 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2249 struct range {
2250 unsigned long address;
2251 unsigned long size;
2254 static int reloc_bsearch_compare(const void *key, const void *elt)
2256 const struct range *range = key;
2257 const struct ksplice_reloc *r = elt;
2258 if (range->address + range->size <= r->blank_addr)
2259 return -1;
2260 if (range->address > r->blank_addr)
2261 return 1;
2262 return 0;
2265 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2266 const struct ksplice_reloc *end,
2267 unsigned long address,
2268 unsigned long size)
2270 const struct ksplice_reloc *r;
2271 struct range range = { address, size };
2272 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2273 reloc_bsearch_compare);
2274 if (r == NULL)
2275 return NULL;
2276 while (r > start && (r - 1)->blank_addr >= address)
2277 r--;
2278 return r;
2281 static const struct ksplice_reloc *
2282 init_reloc_search(struct ksplice_pack *pack, const struct ksplice_section *sect)
2284 const struct ksplice_reloc *r;
2285 r = find_reloc(pack->helper_relocs, pack->helper_relocs_end,
2286 sect->address, sect->size);
2287 if (r == NULL)
2288 return pack->helper_relocs_end;
2289 return r;
2293 * lookup_reloc implements an amortized O(1) lookup for the next
2294 * helper relocation. It must be called with a strictly increasing
2295 * sequence of addresses.
2297 * The fingerp is private data for lookup_reloc, and needs to have
2298 * been initialized as a pointer to the result of find_reloc (or
2299 * init_reloc_search).
2301 static abort_t lookup_reloc(struct ksplice_pack *pack,
2302 const struct ksplice_reloc **fingerp,
2303 unsigned long addr,
2304 const struct ksplice_reloc **relocp)
2306 const struct ksplice_reloc *r = *fingerp;
2307 int canary_ret;
2309 while (r < pack->helper_relocs_end &&
2310 addr >= r->blank_addr + r->howto->size &&
2311 !(addr == r->blank_addr && r->howto->size == 0))
2312 r++;
2313 *fingerp = r;
2314 if (r == pack->helper_relocs_end)
2315 return NO_MATCH;
2316 if (addr < r->blank_addr)
2317 return NO_MATCH;
2318 *relocp = r;
2319 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2320 return OK;
2322 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
2323 if (canary_ret < 0)
2324 return UNEXPECTED;
2325 if (canary_ret == 0) {
2326 ksdebug(pack, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2327 "(altinstr)\n", r->blank_addr, r->symbol->label,
2328 r->target_addend);
2329 return NO_MATCH;
2331 if (addr != r->blank_addr) {
2332 ksdebug(pack, "Invalid nonzero relocation offset\n");
2333 return UNEXPECTED;
2335 return OK;
2338 static abort_t handle_reloc(struct ksplice_pack *pack,
2339 const struct ksplice_section *sect,
2340 const struct ksplice_reloc *r,
2341 unsigned long run_addr, enum run_pre_mode mode)
2343 switch (r->howto->type) {
2344 case KSPLICE_HOWTO_RELOC:
2345 return handle_howto_reloc(pack, sect, r, run_addr, mode);
2346 case KSPLICE_HOWTO_DATE:
2347 case KSPLICE_HOWTO_TIME:
2348 return handle_howto_date(pack, sect, r, run_addr, mode);
2349 case KSPLICE_HOWTO_BUG:
2350 return handle_bug(pack, r, run_addr);
2351 case KSPLICE_HOWTO_EXTABLE:
2352 return handle_extable(pack, r, run_addr);
2353 default:
2354 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
2355 return UNEXPECTED;
2360 * For date/time relocations, we check that the sequence of bytes
2361 * matches the format of a date or time.
2363 static abort_t handle_howto_date(struct ksplice_pack *pack,
2364 const struct ksplice_section *sect,
2365 const struct ksplice_reloc *r,
2366 unsigned long run_addr, enum run_pre_mode mode)
2368 abort_t ret;
2369 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2371 if (buf == NULL)
2372 return OUT_OF_MEMORY;
2373 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2374 ret = NO_MATCH;
2375 goto out;
2378 switch (r->howto->type) {
2379 case KSPLICE_HOWTO_TIME:
2380 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2381 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2382 isdigit(buf[6]) && isdigit(buf[7]))
2383 ret = OK;
2384 else
2385 ret = NO_MATCH;
2386 break;
2387 case KSPLICE_HOWTO_DATE:
2388 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2389 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2390 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2391 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2392 ret = OK;
2393 else
2394 ret = NO_MATCH;
2395 break;
2396 default:
2397 ret = UNEXPECTED;
2399 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2400 ksdebug(pack, "%s string: \"%.*s\" does not match format\n",
2401 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2402 r->howto->size, buf);
2404 if (ret != OK)
2405 goto out;
2406 ret = create_labelval(pack, r->symbol, run_addr, TEMP);
2407 out:
2408 kfree(buf);
2409 return ret;
2413 * Extract the value of a symbol used in a relocation in the pre code
2414 * during run-pre matching, giving an error if it conflicts with a
2415 * previously found value of that symbol
2417 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
2418 const struct ksplice_section *sect,
2419 const struct ksplice_reloc *r,
2420 unsigned long run_addr,
2421 enum run_pre_mode mode)
2423 struct ksplice_section *sym_sect = symbol_section(pack, r->symbol);
2424 unsigned long offset = r->target_addend;
2425 unsigned long val;
2426 abort_t ret;
2428 ret = read_reloc_value(pack, r, run_addr, &val);
2429 if (ret != OK)
2430 return ret;
2431 if (r->howto->pcrel)
2432 val += run_addr;
2434 #ifdef KSPLICE_STANDALONE
2435 /* The match_map is only used in KSPLICE_STANDALONE */
2436 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2438 } else if (offset < 0 || offset >= sym_sect->size) {
2439 ksdebug(pack, "Out of range relocation: %s+%lx -> %s+%lx",
2440 sect->symbol->label, r->blank_addr - sect->address,
2441 r->symbol->label, offset);
2442 return NO_MATCH;
2443 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2444 sym_sect->match_map[offset] =
2445 (const unsigned char *)r->symbol->value + offset;
2446 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2447 r->symbol->value + offset) {
2449 } else if (sect == sym_sect) {
2450 ksdebug(pack, "Relocations to nonmatching locations within "
2451 "section %s: %lx does not match %lx\n",
2452 sect->symbol->label, offset,
2453 (unsigned long)sect->match_map[offset] -
2454 r->symbol->value);
2455 return NO_MATCH;
2456 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2457 if (mode == RUN_PRE_INITIAL)
2458 ksdebug(pack, "Delaying matching of %s due to reloc "
2459 "from to unmatching section: %s+%lx\n",
2460 sect->symbol->label, r->symbol->label, offset);
2461 return NO_MATCH;
2462 } else if (sym_sect->match_map[offset] == NULL) {
2463 if (mode == RUN_PRE_INITIAL)
2464 ksdebug(pack, "Relocation not to instruction boundary: "
2465 "%s+%lx -> %s+%lx", sect->symbol->label,
2466 r->blank_addr - sect->address, r->symbol->label,
2467 offset);
2468 return NO_MATCH;
2469 } else if ((unsigned long)sym_sect->match_map[offset] !=
2470 r->symbol->value + offset) {
2471 if (mode == RUN_PRE_INITIAL)
2472 ksdebug(pack, "Match map shift %s+%lx: %lx != %lx\n",
2473 r->symbol->label, offset,
2474 r->symbol->value + offset,
2475 (unsigned long)sym_sect->match_map[offset]);
2476 val += r->symbol->value + offset -
2477 (unsigned long)sym_sect->match_map[offset];
2479 #endif /* KSPLICE_STANDALONE */
2481 if (mode == RUN_PRE_INITIAL)
2482 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2483 "found %s = %lx\n", run_addr, r->blank_addr,
2484 r->symbol->label, offset, r->symbol->label, val);
2486 if (contains_canary(pack, run_addr, r->howto) != 0) {
2487 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
2488 "\n", run_addr);
2489 return UNEXPECTED;
2492 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2493 sect->symbol == r->symbol)
2494 return OK;
2495 ret = create_labelval(pack, r->symbol, val, TEMP);
2496 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2497 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2498 "= %lx does not match expected %lx\n", run_addr,
2499 r->blank_addr, r->symbol->label, r->symbol->value, val);
2501 if (ret != OK)
2502 return ret;
2503 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2504 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2505 if (mode == RUN_PRE_INITIAL)
2506 ksdebug(pack, "Recursively comparing string section "
2507 "%s\n", sym_sect->symbol->label);
2508 else if (mode == RUN_PRE_DEBUG)
2509 ksdebug(pack, "[str start] ");
2510 ret = run_pre_cmp(pack, sym_sect, val, NULL, mode);
2511 if (mode == RUN_PRE_DEBUG)
2512 ksdebug(pack, "[str end] ");
2513 if (ret == OK && mode == RUN_PRE_INITIAL)
2514 ksdebug(pack, "Successfully matched string section %s"
2515 "\n", sym_sect->symbol->label);
2516 else if (mode == RUN_PRE_INITIAL)
2517 ksdebug(pack, "Failed to match string section %s\n",
2518 sym_sect->symbol->label);
2520 return ret;
2523 static int symbol_section_bsearch_compare(const void *a, const void *b)
2525 const struct ksplice_symbol *sym = a;
2526 const struct ksplice_section *sect = b;
2527 return strcmp(sym->label, sect->symbol->label);
2530 static int compare_section_labels(const void *va, const void *vb)
2532 const struct ksplice_section *a = va, *b = vb;
2533 return strcmp(a->symbol->label, b->symbol->label);
2536 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
2537 const struct ksplice_symbol *sym)
2539 return bsearch(sym, pack->helper_sections, pack->helper_sections_end -
2540 pack->helper_sections, sizeof(struct ksplice_section),
2541 symbol_section_bsearch_compare);
2544 /* Find the relocation for the oldaddr of a ksplice_patch */
2545 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
2546 const struct ksplice_patch *p)
2548 unsigned long addr = (unsigned long)&p->oldaddr;
2549 const struct ksplice_reloc *r =
2550 find_reloc(pack->primary_relocs, pack->primary_relocs_end, addr,
2551 sizeof(addr));
2552 if (r == NULL || r->blank_addr < addr ||
2553 r->blank_addr >= addr + sizeof(addr))
2554 return NULL;
2555 return r;
2559 * Populates vals with the possible values for ksym from the various
2560 * sources Ksplice uses to resolve symbols
2562 static abort_t lookup_symbol(struct ksplice_pack *pack,
2563 const struct ksplice_symbol *ksym,
2564 struct list_head *vals)
2566 abort_t ret;
2568 #ifdef KSPLICE_STANDALONE
2569 if (!bootstrapped)
2570 return OK;
2571 #endif /* KSPLICE_STANDALONE */
2573 if (ksym->vals == NULL) {
2574 release_vals(vals);
2575 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
2576 ksym->value);
2577 return add_candidate_val(pack, vals, ksym->value);
2580 #ifdef CONFIG_MODULE_UNLOAD
2581 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2582 && pack->target->exit != NULL) {
2583 ret = add_candidate_val(pack, vals,
2584 (unsigned long)pack->target->exit);
2585 if (ret != OK)
2586 return ret;
2588 #endif
2590 if (ksym->name != NULL) {
2591 struct candidate_val *val;
2592 list_for_each_entry(val, ksym->vals, list) {
2593 ret = add_candidate_val(pack, vals, val->val);
2594 if (ret != OK)
2595 return ret;
2598 ret = new_export_lookup(pack, ksym->name, vals);
2599 if (ret != OK)
2600 return ret;
2603 return OK;
2606 #ifdef KSPLICE_STANDALONE
2607 static abort_t
2608 add_system_map_candidates(struct ksplice_pack *pack,
2609 const struct ksplice_system_map *start,
2610 const struct ksplice_system_map *end,
2611 const char *label, struct list_head *vals)
2613 abort_t ret;
2614 long off;
2615 int i;
2616 const struct ksplice_system_map *smap;
2618 /* Some Fedora kernel releases have System.map files whose symbol
2619 * addresses disagree with the running kernel by a constant address
2620 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2621 * values used to compile these kernels. This constant address offset
2622 * is always a multiple of 0x100000.
2624 * If we observe an offset that is NOT a multiple of 0x100000, then the
2625 * user provided us with an incorrect System.map file, and we should
2626 * abort.
2627 * If we observe an offset that is a multiple of 0x100000, then we can
2628 * adjust the System.map address values accordingly and proceed.
2630 off = (unsigned long)printk - pack->map_printk;
2631 if (off & 0xfffff) {
2632 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
2633 return BAD_SYSTEM_MAP;
2636 smap = bsearch(label, start, end - start, sizeof(*smap),
2637 system_map_bsearch_compare);
2638 if (smap == NULL)
2639 return OK;
2641 for (i = 0; i < smap->nr_candidates; i++) {
2642 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
2643 if (ret != OK)
2644 return ret;
2646 return OK;
2649 static int system_map_bsearch_compare(const void *key, const void *elt)
2651 const struct ksplice_system_map *map = elt;
2652 const char *label = key;
2653 return strcmp(label, map->label);
2655 #endif /* !KSPLICE_STANDALONE */
2658 * An update could one module to export a symbol and at the same time
2659 * change another module to use that symbol. This violates the normal
2660 * situation where the packs can be handled independently.
2662 * new_export_lookup obtains symbol values from the changes to the
2663 * exported symbol table made by other packs.
2665 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
2666 struct list_head *vals)
2668 struct ksplice_pack *pack;
2669 struct ksplice_patch *p;
2670 list_for_each_entry(pack, &ipack->update->packs, list) {
2671 for (p = pack->patches; p < pack->patches_end; p++) {
2672 const struct kernel_symbol *sym;
2673 const struct ksplice_reloc *r;
2674 if (p->type != KSPLICE_PATCH_EXPORT ||
2675 strcmp(name, *(const char **)p->contents) != 0)
2676 continue;
2678 /* Check that the p->oldaddr reloc has been resolved. */
2679 r = patch_reloc(pack, p);
2680 if (r == NULL ||
2681 contains_canary(pack, r->blank_addr, r->howto) != 0)
2682 continue;
2683 sym = (const struct kernel_symbol *)r->symbol->value;
2686 * Check that the sym->value reloc has been resolved,
2687 * if there is a Ksplice relocation there.
2689 r = find_reloc(pack->primary_relocs,
2690 pack->primary_relocs_end,
2691 (unsigned long)&sym->value,
2692 sizeof(&sym->value));
2693 if (r != NULL &&
2694 r->blank_addr == (unsigned long)&sym->value &&
2695 contains_canary(pack, r->blank_addr, r->howto) != 0)
2696 continue;
2697 return add_candidate_val(ipack, vals, sym->value);
2700 return OK;
2704 * When apply_patches is called, the update should be fully prepared.
2705 * apply_patches will try to actually insert trampolines for the
2706 * update.
2708 static abort_t apply_patches(struct update *update)
2710 int i;
2711 abort_t ret;
2712 struct ksplice_pack *pack;
2714 ret = map_trampoline_pages(update);
2715 if (ret != OK)
2716 return ret;
2718 list_for_each_entry(pack, &update->packs, list) {
2719 const typeof(int (*)(void)) *f;
2720 for (f = pack->pre_apply; f < pack->pre_apply_end; f++) {
2721 if ((*f)() != 0) {
2722 ret = CALL_FAILED;
2723 goto out;
2728 for (i = 0; i < 5; i++) {
2729 cleanup_conflicts(update);
2730 #ifdef KSPLICE_STANDALONE
2731 bust_spinlocks(1);
2732 #endif /* KSPLICE_STANDALONE */
2733 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2734 ret = (__force abort_t)stop_machine(__apply_patches, update,
2735 NULL);
2736 #else /* LINUX_VERSION_CODE < */
2737 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2738 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2739 NR_CPUS);
2740 #endif /* LINUX_VERSION_CODE */
2741 #ifdef KSPLICE_STANDALONE
2742 bust_spinlocks(0);
2743 #endif /* KSPLICE_STANDALONE */
2744 if (ret != CODE_BUSY)
2745 break;
2746 set_current_state(TASK_INTERRUPTIBLE);
2747 schedule_timeout(msecs_to_jiffies(1000));
2749 out:
2750 unmap_trampoline_pages(update);
2752 if (ret == CODE_BUSY) {
2753 print_conflicts(update);
2754 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2755 "code is busy.\n", update->kid);
2756 } else if (ret == ALREADY_REVERSED) {
2757 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2758 "reversed.\n", update->kid, update->kid);
2761 if (ret != OK) {
2762 list_for_each_entry(pack, &update->packs, list) {
2763 const typeof(void (*)(void)) *f;
2764 for (f = pack->fail_apply; f < pack->fail_apply_end;
2765 f++)
2766 (*f)();
2769 return ret;
2772 list_for_each_entry(pack, &update->packs, list) {
2773 const typeof(void (*)(void)) *f;
2774 for (f = pack->post_apply; f < pack->post_apply_end; f++)
2775 (*f)();
2778 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2779 update->kid);
2780 return OK;
2783 static abort_t reverse_patches(struct update *update)
2785 int i;
2786 abort_t ret;
2787 struct ksplice_pack *pack;
2789 clear_debug_buf(update);
2790 ret = init_debug_buf(update);
2791 if (ret != OK)
2792 return ret;
2794 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2796 ret = map_trampoline_pages(update);
2797 if (ret != OK)
2798 return ret;
2800 list_for_each_entry(pack, &update->packs, list) {
2801 const typeof(int (*)(void)) *f;
2802 for (f = pack->pre_reverse; f < pack->pre_reverse_end; f++) {
2803 if ((*f)() != 0) {
2804 ret = CALL_FAILED;
2805 goto out;
2810 for (i = 0; i < 5; i++) {
2811 cleanup_conflicts(update);
2812 clear_list(&update->conflicts, struct conflict, list);
2813 #ifdef KSPLICE_STANDALONE
2814 bust_spinlocks(1);
2815 #endif /* KSPLICE_STANDALONE */
2816 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2817 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2818 NULL);
2819 #else /* LINUX_VERSION_CODE < */
2820 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2821 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2822 update, NR_CPUS);
2823 #endif /* LINUX_VERSION_CODE */
2824 #ifdef KSPLICE_STANDALONE
2825 bust_spinlocks(0);
2826 #endif /* KSPLICE_STANDALONE */
2827 if (ret != CODE_BUSY)
2828 break;
2829 set_current_state(TASK_INTERRUPTIBLE);
2830 schedule_timeout(msecs_to_jiffies(1000));
2832 out:
2833 unmap_trampoline_pages(update);
2835 if (ret == CODE_BUSY) {
2836 print_conflicts(update);
2837 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2838 "code is busy.\n", update->kid);
2839 } else if (ret == MODULE_BUSY) {
2840 _ksdebug(update, "Update %s is in use by another module\n",
2841 update->kid);
2844 if (ret != OK) {
2845 list_for_each_entry(pack, &update->packs, list) {
2846 const typeof(void (*)(void)) *f;
2847 for (f = pack->fail_reverse; f < pack->fail_reverse_end;
2848 f++)
2849 (*f)();
2852 return ret;
2855 list_for_each_entry(pack, &update->packs, list) {
2856 const typeof(void (*)(void)) *f;
2857 for (f = pack->post_reverse; f < pack->post_reverse_end; f++)
2858 (*f)();
2861 list_for_each_entry(pack, &update->packs, list)
2862 clear_list(&pack->safety_records, struct safety_record, list);
2864 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2865 return OK;
2868 /* Atomically insert the update; run from within stop_machine */
2869 static int __apply_patches(void *updateptr)
2871 struct update *update = updateptr;
2872 struct ksplice_pack *pack;
2873 struct ksplice_module_list_entry *entry;
2874 struct ksplice_patch *p;
2875 abort_t ret;
2877 if (update->stage == STAGE_APPLIED)
2878 return (__force int)OK;
2880 if (update->stage != STAGE_PREPARING)
2881 return (__force int)UNEXPECTED;
2883 ret = check_each_task(update);
2884 if (ret != OK)
2885 return (__force int)ret;
2887 list_for_each_entry(pack, &update->packs, list) {
2888 if (try_module_get(pack->primary) != 1) {
2889 struct ksplice_pack *pack1;
2890 list_for_each_entry(pack1, &update->packs, list) {
2891 if (pack1 == pack)
2892 break;
2893 module_put(pack1->primary);
2895 module_put(THIS_MODULE);
2896 return (__force int)UNEXPECTED;
2900 list_for_each_entry(pack, &update->packs, list) {
2901 const typeof(int (*)(void)) *f;
2902 for (f = pack->check_apply; f < pack->check_apply_end; f++)
2903 if ((*f)() != 0)
2904 return (__force int)CALL_FAILED;
2907 /* Commit point: the update application will succeed. */
2909 update->stage = STAGE_APPLIED;
2910 #ifdef TAINT_KSPLICE
2911 add_taint(TAINT_KSPLICE);
2912 #endif
2914 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2915 list_add(&entry->list, &ksplice_module_list);
2917 list_for_each_entry(pack, &update->packs, list) {
2918 for (p = pack->patches; p < pack->patches_end; p++)
2919 insert_trampoline(p);
2922 list_for_each_entry(pack, &update->packs, list) {
2923 const typeof(void (*)(void)) *f;
2924 for (f = pack->apply; f < pack->apply_end; f++)
2925 (*f)();
2928 return (__force int)OK;
2931 /* Atomically remove the update; run from within stop_machine */
2932 static int __reverse_patches(void *updateptr)
2934 struct update *update = updateptr;
2935 struct ksplice_pack *pack;
2936 struct ksplice_module_list_entry *entry;
2937 const struct ksplice_patch *p;
2938 abort_t ret;
2940 if (update->stage != STAGE_APPLIED)
2941 return (__force int)OK;
2943 #ifdef CONFIG_MODULE_UNLOAD
2944 list_for_each_entry(pack, &update->packs, list) {
2945 if (module_refcount(pack->primary) != 1)
2946 return (__force int)MODULE_BUSY;
2948 #endif /* CONFIG_MODULE_UNLOAD */
2950 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2951 if (!entry->applied && find_module(entry->target_name) != NULL)
2952 return COLD_UPDATE_LOADED;
2955 ret = check_each_task(update);
2956 if (ret != OK)
2957 return (__force int)ret;
2959 list_for_each_entry(pack, &update->packs, list) {
2960 for (p = pack->patches; p < pack->patches_end; p++) {
2961 ret = verify_trampoline(pack, p);
2962 if (ret != OK)
2963 return (__force int)ret;
2967 list_for_each_entry(pack, &update->packs, list) {
2968 const typeof(int (*)(void)) *f;
2969 for (f = pack->check_reverse; f < pack->check_reverse_end; f++)
2970 if ((*f)() != 0)
2971 return (__force int)CALL_FAILED;
2974 /* Commit point: the update reversal will succeed. */
2976 update->stage = STAGE_REVERSED;
2978 list_for_each_entry(pack, &update->packs, list)
2979 module_put(pack->primary);
2981 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2982 list_del(&entry->list);
2984 list_for_each_entry(pack, &update->packs, list) {
2985 const typeof(void (*)(void)) *f;
2986 for (f = pack->reverse; f < pack->reverse_end; f++)
2987 (*f)();
2990 list_for_each_entry(pack, &update->packs, list) {
2991 for (p = pack->patches; p < pack->patches_end; p++)
2992 remove_trampoline(p);
2995 return (__force int)OK;
2999 * Check whether any thread's instruction pointer or any address of
3000 * its stack is contained in one of the safety_records associated with
3001 * the update.
3003 * check_each_task must be called from inside stop_machine, because it
3004 * does not take tasklist_lock (which cannot be held by anyone else
3005 * during stop_machine).
3007 static abort_t check_each_task(struct update *update)
3009 const struct task_struct *g, *p;
3010 abort_t status = OK, ret;
3011 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3012 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3013 read_lock(&tasklist_lock);
3014 #endif /* LINUX_VERSION_CODE */
3015 do_each_thread(g, p) {
3016 /* do_each_thread is a double loop! */
3017 ret = check_task(update, p, false);
3018 if (ret != OK) {
3019 check_task(update, p, true);
3020 status = ret;
3022 if (ret != OK && ret != CODE_BUSY)
3023 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3024 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3025 goto out;
3026 #else /* LINUX_VERSION_CODE < */
3027 return ret;
3028 #endif /* LINUX_VERSION_CODE */
3029 } while_each_thread(g, p);
3030 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3031 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3032 out:
3033 read_unlock(&tasklist_lock);
3034 #endif /* LINUX_VERSION_CODE */
3035 return status;
3038 static abort_t check_task(struct update *update,
3039 const struct task_struct *t, bool rerun)
3041 abort_t status, ret;
3042 struct conflict *conf = NULL;
3044 if (rerun) {
3045 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3046 if (conf == NULL)
3047 return OUT_OF_MEMORY;
3048 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3049 if (conf->process_name == NULL) {
3050 kfree(conf);
3051 return OUT_OF_MEMORY;
3053 conf->pid = t->pid;
3054 INIT_LIST_HEAD(&conf->stack);
3055 list_add(&conf->list, &update->conflicts);
3058 status = check_address(update, conf, KSPLICE_IP(t));
3059 if (t == current) {
3060 ret = check_stack(update, conf, task_thread_info(t),
3061 (unsigned long *)__builtin_frame_address(0));
3062 if (status == OK)
3063 status = ret;
3064 } else if (!task_curr(t)) {
3065 ret = check_stack(update, conf, task_thread_info(t),
3066 (unsigned long *)KSPLICE_SP(t));
3067 if (status == OK)
3068 status = ret;
3069 } else if (!is_stop_machine(t)) {
3070 status = UNEXPECTED_RUNNING_TASK;
3072 return status;
3075 static abort_t check_stack(struct update *update, struct conflict *conf,
3076 const struct thread_info *tinfo,
3077 const unsigned long *stack)
3079 abort_t status = OK, ret;
3080 unsigned long addr;
3082 while (valid_stack_ptr(tinfo, stack)) {
3083 addr = *stack++;
3084 ret = check_address(update, conf, addr);
3085 if (ret != OK)
3086 status = ret;
3088 return status;
3091 static abort_t check_address(struct update *update,
3092 struct conflict *conf, unsigned long addr)
3094 abort_t status = OK, ret;
3095 const struct safety_record *rec;
3096 struct ksplice_pack *pack;
3097 struct conflict_addr *ca = NULL;
3099 if (conf != NULL) {
3100 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3101 if (ca == NULL)
3102 return OUT_OF_MEMORY;
3103 ca->addr = addr;
3104 ca->has_conflict = false;
3105 ca->label = NULL;
3106 list_add(&ca->list, &conf->stack);
3109 list_for_each_entry(pack, &update->packs, list) {
3110 unsigned long tramp_addr = follow_trampolines(pack, addr);
3111 list_for_each_entry(rec, &pack->safety_records, list) {
3112 ret = check_record(ca, rec, tramp_addr);
3113 if (ret != OK)
3114 status = ret;
3117 return status;
3120 static abort_t check_record(struct conflict_addr *ca,
3121 const struct safety_record *rec, unsigned long addr)
3123 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3124 if (ca != NULL) {
3125 ca->label = rec->label;
3126 ca->has_conflict = true;
3128 return CODE_BUSY;
3130 return OK;
3133 /* Is the task one of the stop_machine tasks? */
3134 static bool is_stop_machine(const struct task_struct *t)
3136 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3137 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3138 const char *kstop_prefix = "kstop/";
3139 #else /* LINUX_VERSION_CODE < */
3140 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3141 const char *kstop_prefix = "kstop";
3142 #endif /* LINUX_VERSION_CODE */
3143 const char *num;
3144 if (!starts_with(t->comm, kstop_prefix))
3145 return false;
3146 num = t->comm + strlen(kstop_prefix);
3147 return num[strspn(num, "0123456789")] == '\0';
3148 #else /* LINUX_VERSION_CODE < */
3149 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3150 return strcmp(t->comm, "kstopmachine") == 0;
3151 #endif /* LINUX_VERSION_CODE */
3154 static void cleanup_conflicts(struct update *update)
3156 struct conflict *conf;
3157 list_for_each_entry(conf, &update->conflicts, list) {
3158 clear_list(&conf->stack, struct conflict_addr, list);
3159 kfree(conf->process_name);
3161 clear_list(&update->conflicts, struct conflict, list);
3164 static void print_conflicts(struct update *update)
3166 const struct conflict *conf;
3167 const struct conflict_addr *ca;
3168 list_for_each_entry(conf, &update->conflicts, list) {
3169 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3170 conf->process_name);
3171 list_for_each_entry(ca, &conf->stack, list) {
3172 _ksdebug(update, " %lx", ca->addr);
3173 if (ca->has_conflict)
3174 _ksdebug(update, " [<-CONFLICT]");
3176 _ksdebug(update, "\n");
3180 static void insert_trampoline(struct ksplice_patch *p)
3182 mm_segment_t old_fs = get_fs();
3183 set_fs(KERNEL_DS);
3184 memcpy(p->saved, p->vaddr, p->size);
3185 memcpy(p->vaddr, p->contents, p->size);
3186 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3187 set_fs(old_fs);
3190 static abort_t verify_trampoline(struct ksplice_pack *pack,
3191 const struct ksplice_patch *p)
3193 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3194 ksdebug(pack, "Aborted. Trampoline at %lx has been "
3195 "overwritten.\n", p->oldaddr);
3196 return CODE_BUSY;
3198 return OK;
3201 static void remove_trampoline(const struct ksplice_patch *p)
3203 mm_segment_t old_fs = get_fs();
3204 set_fs(KERNEL_DS);
3205 memcpy(p->vaddr, p->saved, p->size);
3206 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3207 set_fs(old_fs);
3210 /* Returns NO_MATCH if there's already a labelval with a different value */
3211 static abort_t create_labelval(struct ksplice_pack *pack,
3212 struct ksplice_symbol *ksym,
3213 unsigned long val, int status)
3215 val = follow_trampolines(pack, val);
3216 if (ksym->vals == NULL)
3217 return ksym->value == val ? OK : NO_MATCH;
3219 ksym->value = val;
3220 if (status == TEMP) {
3221 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3222 if (lv == NULL)
3223 return OUT_OF_MEMORY;
3224 lv->symbol = ksym;
3225 lv->saved_vals = ksym->vals;
3226 list_add(&lv->list, &pack->temp_labelvals);
3228 ksym->vals = NULL;
3229 return OK;
3233 * Creates a new safety_record for a helper section based on its
3234 * ksplice_section and run-pre matching information.
3236 static abort_t create_safety_record(struct ksplice_pack *pack,
3237 const struct ksplice_section *sect,
3238 struct list_head *record_list,
3239 unsigned long run_addr,
3240 unsigned long run_size)
3242 struct safety_record *rec;
3243 struct ksplice_patch *p;
3245 if (record_list == NULL)
3246 return OK;
3248 for (p = pack->patches; p < pack->patches_end; p++) {
3249 const struct ksplice_reloc *r = patch_reloc(pack, p);
3250 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3251 break;
3253 if (p >= pack->patches_end)
3254 return OK;
3256 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3257 if (rec == NULL)
3258 return OUT_OF_MEMORY;
3260 * The helper might be unloaded when checking reversing
3261 * patches, so we need to kstrdup the label here.
3263 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3264 if (rec->label == NULL) {
3265 kfree(rec);
3266 return OUT_OF_MEMORY;
3268 rec->addr = run_addr;
3269 rec->size = run_size;
3271 list_add(&rec->list, record_list);
3272 return OK;
3275 static abort_t add_candidate_val(struct ksplice_pack *pack,
3276 struct list_head *vals, unsigned long val)
3278 struct candidate_val *tmp, *new;
3281 * Careful: follow trampolines before comparing values so that we do
3282 * not mistake the obsolete function for another copy of the function.
3284 val = follow_trampolines(pack, val);
3286 list_for_each_entry(tmp, vals, list) {
3287 if (tmp->val == val)
3288 return OK;
3290 new = kmalloc(sizeof(*new), GFP_KERNEL);
3291 if (new == NULL)
3292 return OUT_OF_MEMORY;
3293 new->val = val;
3294 list_add(&new->list, vals);
3295 return OK;
3298 static void release_vals(struct list_head *vals)
3300 clear_list(vals, struct candidate_val, list);
3304 * The temp_labelvals list is used to cache those temporary labelvals
3305 * that have been created to cross-check the symbol values obtained
3306 * from different relocations within a single section being matched.
3308 * If status is VAL, commit the temp_labelvals as final values.
3310 * If status is NOVAL, restore the list of possible values to the
3311 * ksplice_symbol, so that it no longer has a known value.
3313 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
3315 struct labelval *lv, *n;
3316 list_for_each_entry_safe(lv, n, &pack->temp_labelvals, list) {
3317 if (status == NOVAL) {
3318 lv->symbol->vals = lv->saved_vals;
3319 } else {
3320 release_vals(lv->saved_vals);
3321 kfree(lv->saved_vals);
3323 list_del(&lv->list);
3324 kfree(lv);
3328 /* Is there a Ksplice canary with given howto at blank_addr? */
3329 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
3330 const struct ksplice_reloc_howto *howto)
3332 switch (howto->size) {
3333 case 1:
3334 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3335 (KSPLICE_CANARY & howto->dst_mask);
3336 case 2:
3337 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3338 (KSPLICE_CANARY & howto->dst_mask);
3339 case 4:
3340 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3341 (KSPLICE_CANARY & howto->dst_mask);
3342 #if BITS_PER_LONG >= 64
3343 case 8:
3344 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3345 (KSPLICE_CANARY & howto->dst_mask);
3346 #endif /* BITS_PER_LONG */
3347 default:
3348 ksdebug(pack, "Aborted. Invalid relocation size.\n");
3349 return -1;
3354 * Compute the address of the code you would actually run if you were
3355 * to call the function at addr (i.e., follow the sequence of jumps
3356 * starting at addr)
3358 static unsigned long follow_trampolines(struct ksplice_pack *pack,
3359 unsigned long addr)
3361 unsigned long new_addr;
3362 struct module *m;
3364 while (1) {
3365 #ifdef KSPLICE_STANDALONE
3366 if (!bootstrapped)
3367 return addr;
3368 #endif /* KSPLICE_STANDALONE */
3369 if (!__kernel_text_address(addr) ||
3370 trampoline_target(pack, addr, &new_addr) != OK)
3371 return addr;
3372 m = __module_text_address(new_addr);
3373 if (m == NULL || m == pack->target ||
3374 !starts_with(m->name, "ksplice"))
3375 return addr;
3376 addr = new_addr;
3380 /* Does module a patch module b? */
3381 static bool patches_module(const struct module *a, const struct module *b)
3383 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3384 const char *name;
3385 if (a == b)
3386 return true;
3387 if (a == NULL || !starts_with(a->name, "ksplice_"))
3388 return false;
3389 name = a->name + strlen("ksplice_");
3390 name += strcspn(name, "_");
3391 if (name[0] != '_')
3392 return false;
3393 name++;
3394 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
3395 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3396 struct ksplice_module_list_entry *entry;
3397 if (a == b)
3398 return true;
3399 list_for_each_entry(entry, &ksplice_module_list, list) {
3400 if (strcmp(entry->target_name, b->name) == 0 &&
3401 strcmp(entry->primary_name, a->name) == 0)
3402 return true;
3404 return false;
3405 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3408 static bool starts_with(const char *str, const char *prefix)
3410 return strncmp(str, prefix, strlen(prefix)) == 0;
3413 static bool singular(struct list_head *list)
3415 return !list_empty(list) && list->next->next == list;
3418 static void *bsearch(const void *key, const void *base, size_t n,
3419 size_t size, int (*cmp)(const void *key, const void *elt))
3421 int start = 0, end = n - 1, mid, result;
3422 if (n == 0)
3423 return NULL;
3424 while (start <= end) {
3425 mid = (start + end) / 2;
3426 result = cmp(key, base + mid * size);
3427 if (result < 0)
3428 end = mid - 1;
3429 else if (result > 0)
3430 start = mid + 1;
3431 else
3432 return (void *)base + mid * size;
3434 return NULL;
3437 static int compare_relocs(const void *a, const void *b)
3439 const struct ksplice_reloc *ra = a, *rb = b;
3440 if (ra->blank_addr > rb->blank_addr)
3441 return 1;
3442 else if (ra->blank_addr < rb->blank_addr)
3443 return -1;
3444 else
3445 return ra->howto->size - rb->howto->size;
3448 #ifdef KSPLICE_STANDALONE
3449 static int compare_system_map(const void *a, const void *b)
3451 const struct ksplice_system_map *sa = a, *sb = b;
3452 return strcmp(sa->label, sb->label);
3454 #endif /* KSPLICE_STANDALONE */
3456 #ifdef CONFIG_DEBUG_FS
3457 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3458 /* Old kernels don't have debugfs_create_blob */
3459 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3460 size_t count, loff_t *ppos)
3462 struct debugfs_blob_wrapper *blob = file->private_data;
3463 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3464 blob->size);
3467 static int blob_open(struct inode *inode, struct file *file)
3469 if (inode->i_private)
3470 file->private_data = inode->i_private;
3471 return 0;
3474 static struct file_operations fops_blob = {
3475 .read = read_file_blob,
3476 .open = blob_open,
3479 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3480 struct dentry *parent,
3481 struct debugfs_blob_wrapper *blob)
3483 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3485 #endif /* LINUX_VERSION_CODE */
3487 static abort_t init_debug_buf(struct update *update)
3489 update->debug_blob.size = 0;
3490 update->debug_blob.data = NULL;
3491 update->debugfs_dentry =
3492 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3493 &update->debug_blob);
3494 if (update->debugfs_dentry == NULL)
3495 return OUT_OF_MEMORY;
3496 return OK;
3499 static void clear_debug_buf(struct update *update)
3501 if (update->debugfs_dentry == NULL)
3502 return;
3503 debugfs_remove(update->debugfs_dentry);
3504 update->debugfs_dentry = NULL;
3505 update->debug_blob.size = 0;
3506 vfree(update->debug_blob.data);
3507 update->debug_blob.data = NULL;
3510 static int _ksdebug(struct update *update, const char *fmt, ...)
3512 va_list args;
3513 unsigned long size, old_size, new_size;
3515 if (update->debug == 0)
3516 return 0;
3518 /* size includes the trailing '\0' */
3519 va_start(args, fmt);
3520 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3521 va_end(args);
3522 old_size = update->debug_blob.size == 0 ? 0 :
3523 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3524 new_size = update->debug_blob.size + size == 0 ? 0 :
3525 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3526 if (new_size > old_size) {
3527 char *buf = vmalloc(new_size);
3528 if (buf == NULL)
3529 return -ENOMEM;
3530 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3531 vfree(update->debug_blob.data);
3532 update->debug_blob.data = buf;
3534 va_start(args, fmt);
3535 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3536 update->debug_blob.size,
3537 size, fmt, args);
3538 va_end(args);
3539 return 0;
3541 #else /* CONFIG_DEBUG_FS */
3542 static abort_t init_debug_buf(struct update *update)
3544 return OK;
3547 static void clear_debug_buf(struct update *update)
3549 return;
3552 static int _ksdebug(struct update *update, const char *fmt, ...)
3554 va_list args;
3556 if (update->debug == 0)
3557 return 0;
3559 if (!update->debug_continue_line)
3560 printk(KERN_DEBUG "ksplice: ");
3562 va_start(args, fmt);
3563 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3564 vprintk(fmt, args);
3565 #else /* LINUX_VERSION_CODE < */
3566 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3568 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3569 printk("%s", buf);
3570 kfree(buf);
3572 #endif /* LINUX_VERSION_CODE */
3573 va_end(args);
3575 update->debug_continue_line =
3576 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3577 return 0;
3579 #endif /* CONFIG_DEBUG_FS */
3581 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3582 #ifdef CONFIG_KALLSYMS
3583 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3584 struct module *, unsigned long),
3585 void *data)
3587 char namebuf[KSYM_NAME_LEN];
3588 unsigned long i;
3589 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3590 unsigned int off;
3591 #endif /* LINUX_VERSION_CODE */
3592 int ret;
3594 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3595 * 2.6.10 was the first release after this commit
3597 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3598 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3599 off = kallsyms_expand_symbol(off, namebuf);
3600 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3601 if (ret != 0)
3602 return ret;
3604 #else /* LINUX_VERSION_CODE < */
3605 char *knames;
3607 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3608 unsigned prefix = *knames++;
3610 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3612 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3613 if (ret != OK)
3614 return ret;
3616 knames += strlen(knames) + 1;
3618 #endif /* LINUX_VERSION_CODE */
3619 return module_kallsyms_on_each_symbol(fn, data);
3622 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3623 * 2.6.10 was the first release after this commit
3625 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3626 extern u8 kallsyms_token_table[];
3627 extern u16 kallsyms_token_index[];
3629 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3631 long len, skipped_first = 0;
3632 const u8 *tptr, *data;
3634 data = &kallsyms_names[off];
3635 len = *data;
3636 data++;
3638 off += len + 1;
3640 while (len) {
3641 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3642 data++;
3643 len--;
3645 while (*tptr) {
3646 if (skipped_first) {
3647 *result = *tptr;
3648 result++;
3649 } else
3650 skipped_first = 1;
3651 tptr++;
3655 *result = '\0';
3657 return off;
3659 #endif /* LINUX_VERSION_CODE */
3661 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3662 struct module *,
3663 unsigned long),
3664 void *data)
3666 struct module *mod;
3667 unsigned int i;
3668 int ret;
3670 list_for_each_entry(mod, &modules, list) {
3671 for (i = 0; i < mod->num_symtab; i++) {
3672 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3673 mod, mod->symtab[i].st_value);
3674 if (ret != 0)
3675 return ret;
3678 return 0;
3680 #endif /* CONFIG_KALLSYMS */
3682 static struct module *find_module(const char *name)
3684 struct module *mod;
3686 list_for_each_entry(mod, &modules, list) {
3687 if (strcmp(mod->name, name) == 0)
3688 return mod;
3690 return NULL;
3693 #ifdef CONFIG_MODULE_UNLOAD
3694 struct module_use {
3695 struct list_head list;
3696 struct module *module_which_uses;
3699 /* I'm not yet certain whether we need the strong form of this. */
3700 static inline int strong_try_module_get(struct module *mod)
3702 if (mod && mod->state != MODULE_STATE_LIVE)
3703 return -EBUSY;
3704 if (try_module_get(mod))
3705 return 0;
3706 return -ENOENT;
3709 /* Does a already use b? */
3710 static int already_uses(struct module *a, struct module *b)
3712 struct module_use *use;
3713 list_for_each_entry(use, &b->modules_which_use_me, list) {
3714 if (use->module_which_uses == a)
3715 return 1;
3717 return 0;
3720 /* Make it so module a uses b. Must be holding module_mutex */
3721 static int use_module(struct module *a, struct module *b)
3723 struct module_use *use;
3724 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3725 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3726 int no_warn;
3727 #endif /* LINUX_VERSION_CODE */
3728 if (b == NULL || already_uses(a, b))
3729 return 1;
3731 if (strong_try_module_get(b) < 0)
3732 return 0;
3734 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3735 if (!use) {
3736 module_put(b);
3737 return 0;
3739 use->module_which_uses = a;
3740 list_add(&use->list, &b->modules_which_use_me);
3741 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3742 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3743 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3744 #endif /* LINUX_VERSION_CODE */
3745 return 1;
3747 #else /* CONFIG_MODULE_UNLOAD */
3748 static int use_module(struct module *a, struct module *b)
3750 return 1;
3752 #endif /* CONFIG_MODULE_UNLOAD */
3754 #ifndef CONFIG_MODVERSIONS
3755 #define symversion(base, idx) NULL
3756 #else
3757 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3758 #endif
3760 static bool each_symbol_in_section(const struct symsearch *arr,
3761 unsigned int arrsize,
3762 struct module *owner,
3763 bool (*fn)(const struct symsearch *syms,
3764 struct module *owner,
3765 unsigned int symnum, void *data),
3766 void *data)
3768 unsigned int i, j;
3770 for (j = 0; j < arrsize; j++) {
3771 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3772 if (fn(&arr[j], owner, i, data))
3773 return true;
3776 return false;
3779 /* Returns true as soon as fn returns true, otherwise false. */
3780 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3781 struct module *owner,
3782 unsigned int symnum, void *data),
3783 void *data)
3785 struct module *mod;
3786 const struct symsearch arr[] = {
3787 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3788 NOT_GPL_ONLY, false },
3789 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3790 __start___kcrctab_gpl,
3791 GPL_ONLY, false },
3792 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3793 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3794 __start___kcrctab_gpl_future,
3795 WILL_BE_GPL_ONLY, false },
3796 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3797 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3798 { __start___ksymtab_unused, __stop___ksymtab_unused,
3799 __start___kcrctab_unused,
3800 NOT_GPL_ONLY, true },
3801 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3802 __start___kcrctab_unused_gpl,
3803 GPL_ONLY, true },
3804 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3807 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3808 return 1;
3810 list_for_each_entry(mod, &modules, list) {
3811 struct symsearch module_arr[] = {
3812 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3813 NOT_GPL_ONLY, false },
3814 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3815 mod->gpl_crcs,
3816 GPL_ONLY, false },
3817 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3818 { mod->gpl_future_syms,
3819 mod->gpl_future_syms + mod->num_gpl_future_syms,
3820 mod->gpl_future_crcs,
3821 WILL_BE_GPL_ONLY, false },
3822 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3823 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3824 { mod->unused_syms,
3825 mod->unused_syms + mod->num_unused_syms,
3826 mod->unused_crcs,
3827 NOT_GPL_ONLY, true },
3828 { mod->unused_gpl_syms,
3829 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3830 mod->unused_gpl_crcs,
3831 GPL_ONLY, true },
3832 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3835 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3836 mod, fn, data))
3837 return true;
3839 return false;
3842 struct find_symbol_arg {
3843 /* Input */
3844 const char *name;
3845 bool gplok;
3846 bool warn;
3848 /* Output */
3849 struct module *owner;
3850 const unsigned long *crc;
3851 const struct kernel_symbol *sym;
3854 static bool find_symbol_in_section(const struct symsearch *syms,
3855 struct module *owner,
3856 unsigned int symnum, void *data)
3858 struct find_symbol_arg *fsa = data;
3860 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3861 return false;
3863 if (!fsa->gplok) {
3864 if (syms->licence == GPL_ONLY)
3865 return false;
3866 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3867 printk(KERN_WARNING "Symbol %s is being used "
3868 "by a non-GPL module, which will not "
3869 "be allowed in the future\n", fsa->name);
3870 printk(KERN_WARNING "Please see the file "
3871 "Documentation/feature-removal-schedule.txt "
3872 "in the kernel source tree for more details.\n");
3876 #ifdef CONFIG_UNUSED_SYMBOLS
3877 if (syms->unused && fsa->warn) {
3878 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3879 "however this module is using it.\n", fsa->name);
3880 printk(KERN_WARNING
3881 "This symbol will go away in the future.\n");
3882 printk(KERN_WARNING
3883 "Please evalute if this is the right api to use and if "
3884 "it really is, submit a report the linux kernel "
3885 "mailinglist together with submitting your code for "
3886 "inclusion.\n");
3888 #endif
3890 fsa->owner = owner;
3891 fsa->crc = symversion(syms->crcs, symnum);
3892 fsa->sym = &syms->start[symnum];
3893 return true;
3896 /* Find a symbol and return it, along with, (optional) crc and
3897 * (optional) module which owns it */
3898 static const struct kernel_symbol *find_symbol(const char *name,
3899 struct module **owner,
3900 const unsigned long **crc,
3901 bool gplok, bool warn)
3903 struct find_symbol_arg fsa;
3905 fsa.name = name;
3906 fsa.gplok = gplok;
3907 fsa.warn = warn;
3909 if (each_symbol(find_symbol_in_section, &fsa)) {
3910 if (owner)
3911 *owner = fsa.owner;
3912 if (crc)
3913 *crc = fsa.crc;
3914 return fsa.sym;
3917 return NULL;
3920 static struct module *__module_data_address(unsigned long addr)
3922 struct module *mod;
3924 list_for_each_entry(mod, &modules, list) {
3925 if (addr >= (unsigned long)mod->module_core +
3926 mod->core_text_size &&
3927 addr < (unsigned long)mod->module_core + mod->core_size)
3928 return mod;
3930 return NULL;
3932 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3934 struct update_attribute {
3935 struct attribute attr;
3936 ssize_t (*show)(struct update *update, char *buf);
3937 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3940 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
3941 char *buf)
3943 struct update_attribute *attribute =
3944 container_of(attr, struct update_attribute, attr);
3945 struct update *update = container_of(kobj, struct update, kobj);
3946 if (attribute->show == NULL)
3947 return -EIO;
3948 return attribute->show(update, buf);
3951 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
3952 const char *buf, size_t len)
3954 struct update_attribute *attribute =
3955 container_of(attr, struct update_attribute, attr);
3956 struct update *update = container_of(kobj, struct update, kobj);
3957 if (attribute->store == NULL)
3958 return -EIO;
3959 return attribute->store(update, buf, len);
3962 static struct sysfs_ops update_sysfs_ops = {
3963 .show = update_attr_show,
3964 .store = update_attr_store,
3967 static void update_release(struct kobject *kobj)
3969 struct update *update;
3970 update = container_of(kobj, struct update, kobj);
3971 cleanup_ksplice_update(update);
3974 static ssize_t stage_show(struct update *update, char *buf)
3976 switch (update->stage) {
3977 case STAGE_PREPARING:
3978 return snprintf(buf, PAGE_SIZE, "preparing\n");
3979 case STAGE_APPLIED:
3980 return snprintf(buf, PAGE_SIZE, "applied\n");
3981 case STAGE_REVERSED:
3982 return snprintf(buf, PAGE_SIZE, "reversed\n");
3984 return 0;
3987 static ssize_t abort_cause_show(struct update *update, char *buf)
3989 switch (update->abort_cause) {
3990 case OK:
3991 return snprintf(buf, PAGE_SIZE, "ok\n");
3992 case NO_MATCH:
3993 return snprintf(buf, PAGE_SIZE, "no_match\n");
3994 #ifdef KSPLICE_STANDALONE
3995 case BAD_SYSTEM_MAP:
3996 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
3997 #endif /* KSPLICE_STANDALONE */
3998 case CODE_BUSY:
3999 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4000 case MODULE_BUSY:
4001 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4002 case OUT_OF_MEMORY:
4003 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4004 case FAILED_TO_FIND:
4005 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4006 case ALREADY_REVERSED:
4007 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4008 case MISSING_EXPORT:
4009 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4010 case UNEXPECTED_RUNNING_TASK:
4011 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4012 case TARGET_NOT_LOADED:
4013 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4014 case CALL_FAILED:
4015 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4016 case COLD_UPDATE_LOADED:
4017 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4018 case UNEXPECTED:
4019 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4020 default:
4021 return snprintf(buf, PAGE_SIZE, "unknown\n");
4023 return 0;
4026 static ssize_t conflict_show(struct update *update, char *buf)
4028 const struct conflict *conf;
4029 const struct conflict_addr *ca;
4030 int used = 0;
4031 mutex_lock(&module_mutex);
4032 list_for_each_entry(conf, &update->conflicts, list) {
4033 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4034 conf->process_name, conf->pid);
4035 list_for_each_entry(ca, &conf->stack, list) {
4036 if (!ca->has_conflict)
4037 continue;
4038 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4039 ca->label);
4041 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4043 mutex_unlock(&module_mutex);
4044 return used;
4047 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4048 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4050 struct update *update = updateptr;
4051 mutex_lock(&module_mutex);
4052 maybe_cleanup_ksplice_update(update);
4053 mutex_unlock(&module_mutex);
4054 return 0;
4057 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4059 enum stage old_stage;
4060 mutex_lock(&module_mutex);
4061 old_stage = update->stage;
4062 if ((strncmp(buf, "applied", len) == 0 ||
4063 strncmp(buf, "applied\n", len) == 0) &&
4064 update->stage == STAGE_PREPARING)
4065 update->abort_cause = apply_update(update);
4066 else if ((strncmp(buf, "reversed", len) == 0 ||
4067 strncmp(buf, "reversed\n", len) == 0) &&
4068 update->stage == STAGE_APPLIED)
4069 update->abort_cause = reverse_patches(update);
4070 else if ((strncmp(buf, "cleanup", len) == 0 ||
4071 strncmp(buf, "cleanup\n", len) == 0) &&
4072 update->stage == STAGE_REVERSED)
4073 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4074 "ksplice_cleanup_%s", update->kid);
4076 if (old_stage != STAGE_REVERSED && update->abort_cause == OK)
4077 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
4078 update->kid,
4079 update->stage == STAGE_APPLIED ? "applied" : "reversed");
4080 mutex_unlock(&module_mutex);
4081 return len;
4084 static ssize_t debug_show(struct update *update, char *buf)
4086 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4089 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4091 unsigned long l;
4092 int ret = strict_strtoul(buf, 10, &l);
4093 if (ret != 0)
4094 return ret;
4095 update->debug = l;
4096 return len;
4099 static ssize_t partial_show(struct update *update, char *buf)
4101 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4104 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4106 unsigned long l;
4107 int ret = strict_strtoul(buf, 10, &l);
4108 if (ret != 0)
4109 return ret;
4110 update->partial = l;
4111 return len;
4114 static struct update_attribute stage_attribute =
4115 __ATTR(stage, 0600, stage_show, stage_store);
4116 static struct update_attribute abort_cause_attribute =
4117 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4118 static struct update_attribute debug_attribute =
4119 __ATTR(debug, 0600, debug_show, debug_store);
4120 static struct update_attribute partial_attribute =
4121 __ATTR(partial, 0600, partial_show, partial_store);
4122 static struct update_attribute conflict_attribute =
4123 __ATTR(conflicts, 0400, conflict_show, NULL);
4125 static struct attribute *update_attrs[] = {
4126 &stage_attribute.attr,
4127 &abort_cause_attribute.attr,
4128 &debug_attribute.attr,
4129 &partial_attribute.attr,
4130 &conflict_attribute.attr,
4131 NULL
4134 static struct kobj_type update_ktype = {
4135 .sysfs_ops = &update_sysfs_ops,
4136 .release = update_release,
4137 .default_attrs = update_attrs,
4140 #ifdef KSPLICE_STANDALONE
4141 static int debug;
4142 module_param(debug, int, 0600);
4143 MODULE_PARM_DESC(debug, "Debug level");
4145 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4147 static struct ksplice_pack bootstrap_pack = {
4148 .name = "ksplice_" __stringify(KSPLICE_KID),
4149 .kid = "init_" __stringify(KSPLICE_KID),
4150 .target_name = NULL,
4151 .target = NULL,
4152 .map_printk = MAP_PRINTK,
4153 .primary = THIS_MODULE,
4154 .primary_system_map = ksplice_system_map,
4155 .primary_system_map_end = ksplice_system_map_end,
4157 #endif /* KSPLICE_STANDALONE */
4159 static int init_ksplice(void)
4161 #ifdef KSPLICE_STANDALONE
4162 struct ksplice_pack *pack = &bootstrap_pack;
4163 pack->update = init_ksplice_update(pack->kid);
4164 sort(pack->primary_system_map,
4165 pack->primary_system_map_end - pack->primary_system_map,
4166 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4167 if (pack->update == NULL)
4168 return -ENOMEM;
4169 add_to_update(pack, pack->update);
4170 pack->update->debug = debug;
4171 pack->update->abort_cause =
4172 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
4173 if (pack->update->abort_cause == OK)
4174 bootstrapped = true;
4175 cleanup_ksplice_update(bootstrap_pack.update);
4176 #else /* !KSPLICE_STANDALONE */
4177 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4178 if (ksplice_kobj == NULL)
4179 return -ENOMEM;
4180 #endif /* KSPLICE_STANDALONE */
4181 return 0;
4184 static void cleanup_ksplice(void)
4186 #ifndef KSPLICE_STANDALONE
4187 kobject_put(ksplice_kobj);
4188 #endif /* KSPLICE_STANDALONE */
4191 module_init(init_ksplice);
4192 module_exit(cleanup_ksplice);
4194 MODULE_AUTHOR("Ksplice, Inc.");
4195 MODULE_DESCRIPTION("Ksplice rebootless update system");
4196 #ifdef KSPLICE_VERSION
4197 MODULE_VERSION(KSPLICE_VERSION);
4198 #endif
4199 MODULE_LICENSE("GPL v2");