Read the modules list with RCU if appropriate.
[ksplice.git] / kmodsrc / ksplice.c
blob87857a200417cf77515c4559bc12c04b5e74f5b8
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 enum stage {
67 STAGE_PREPARING, /* the update is not yet applied */
68 STAGE_APPLIED, /* the update is applied */
69 STAGE_REVERSED, /* the update has been applied and reversed */
72 /* parameter to modify run-pre matching */
73 enum run_pre_mode {
74 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
75 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
76 RUN_PRE_FINAL, /* finalizes the matching */
77 #ifdef KSPLICE_STANDALONE
78 RUN_PRE_SILENT,
79 #endif /* KSPLICE_STANDALONE */
82 enum { NOVAL, TEMP, VAL };
84 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
85 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
86 #define __bitwise__
87 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
88 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
89 #define __bitwise__ __bitwise
90 #endif
92 typedef int __bitwise__ abort_t;
94 #define OK ((__force abort_t) 0)
95 #define NO_MATCH ((__force abort_t) 1)
96 #define CODE_BUSY ((__force abort_t) 2)
97 #define MODULE_BUSY ((__force abort_t) 3)
98 #define OUT_OF_MEMORY ((__force abort_t) 4)
99 #define FAILED_TO_FIND ((__force abort_t) 5)
100 #define ALREADY_REVERSED ((__force abort_t) 6)
101 #define MISSING_EXPORT ((__force abort_t) 7)
102 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
103 #define UNEXPECTED ((__force abort_t) 9)
104 #define TARGET_NOT_LOADED ((__force abort_t) 10)
105 #define CALL_FAILED ((__force abort_t) 11)
106 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
107 #ifdef KSPLICE_STANDALONE
108 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
109 #endif /* KSPLICE_STANDALONE */
111 struct update {
112 const char *kid;
113 const char *name;
114 struct kobject kobj;
115 enum stage stage;
116 abort_t abort_cause;
117 int debug;
118 #ifdef CONFIG_DEBUG_FS
119 struct debugfs_blob_wrapper debug_blob;
120 struct dentry *debugfs_dentry;
121 #else /* !CONFIG_DEBUG_FS */
122 bool debug_continue_line;
123 #endif /* CONFIG_DEBUG_FS */
124 bool partial; /* is it OK if some target mods aren't loaded */
125 struct list_head packs; /* packs for loaded target mods */
126 struct list_head unused_packs; /* packs for non-loaded target mods */
127 struct list_head conflicts;
128 struct list_head list;
129 struct list_head ksplice_module_list;
132 /* a process conflicting with an update */
133 struct conflict {
134 const char *process_name;
135 pid_t pid;
136 struct list_head stack;
137 struct list_head list;
140 /* an address on the stack of a conflict */
141 struct conflict_addr {
142 unsigned long addr; /* the address on the stack */
143 bool has_conflict; /* does this address in particular conflict? */
144 const char *label; /* the label of the conflicting safety_record */
145 struct list_head list;
148 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
149 /* Old kernels don't have debugfs_create_blob */
150 struct debugfs_blob_wrapper {
151 void *data;
152 unsigned long size;
154 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
156 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
157 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
158 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
159 #endif
161 struct labelval {
162 struct list_head list;
163 struct ksplice_symbol *symbol;
164 struct list_head *saved_vals;
167 /* region to be checked for conflicts in the stack check */
168 struct safety_record {
169 struct list_head list;
170 const char *label;
171 unsigned long addr; /* the address to be checked for conflicts
172 * (e.g. an obsolete function's starting addr)
174 unsigned long size; /* the size of the region to be checked */
177 /* possible value for a symbol */
178 struct candidate_val {
179 struct list_head list;
180 unsigned long val;
183 /* private struct used by init_symbol_array */
184 struct ksplice_lookup {
185 /* input */
186 struct ksplice_pack *pack;
187 struct ksplice_symbol **arr;
188 size_t size;
189 /* output */
190 abort_t ret;
193 #ifdef KSPLICE_NO_KERNEL_SUPPORT
194 struct symsearch {
195 const struct kernel_symbol *start, *stop;
196 const unsigned long *crcs;
197 enum {
198 NOT_GPL_ONLY,
199 GPL_ONLY,
200 WILL_BE_GPL_ONLY,
201 } licence;
202 bool unused;
204 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
206 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
207 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
209 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
210 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
211 static bool virtual_address_mapped(unsigned long addr)
213 char retval;
214 return probe_kernel_address(addr, retval) != -EFAULT;
216 #else /* LINUX_VERSION_CODE < */
217 static bool virtual_address_mapped(unsigned long addr);
218 #endif /* LINUX_VERSION_CODE */
220 static long probe_kernel_read(void *dst, void *src, size_t size)
222 if (size == 0)
223 return 0;
224 if (!virtual_address_mapped((unsigned long)src) ||
225 !virtual_address_mapped((unsigned long)src + size - 1))
226 return -EFAULT;
228 memcpy(dst, src, size);
229 return 0;
231 #endif /* LINUX_VERSION_CODE */
233 static LIST_HEAD(updates);
234 #ifdef KSPLICE_STANDALONE
235 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
236 extern struct list_head ksplice_module_list;
237 #else /* !CONFIG_KSPLICE */
238 LIST_HEAD(ksplice_module_list);
239 #endif /* CONFIG_KSPLICE */
240 #else /* !KSPLICE_STANDALONE */
241 LIST_HEAD(ksplice_module_list);
242 EXPORT_SYMBOL_GPL(ksplice_module_list);
243 static struct kobject *ksplice_kobj;
244 #endif /* KSPLICE_STANDALONE */
246 static struct kobj_type update_ktype;
248 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
249 /* Old kernels do not have kcalloc
250 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
252 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
254 char *mem;
255 if (n != 0 && size > ULONG_MAX / n)
256 return NULL;
257 mem = kmalloc(n * size, flags);
258 if (mem)
259 memset(mem, 0, n * size);
260 return mem;
262 #endif /* LINUX_VERSION_CODE */
264 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
265 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
266 static void u32_swap(void *a, void *b, int size)
268 u32 t = *(u32 *)a;
269 *(u32 *)a = *(u32 *)b;
270 *(u32 *)b = t;
273 static void generic_swap(void *a, void *b, int size)
275 char t;
277 do {
278 t = *(char *)a;
279 *(char *)a++ = *(char *)b;
280 *(char *)b++ = t;
281 } while (--size > 0);
285 * sort - sort an array of elements
286 * @base: pointer to data to sort
287 * @num: number of elements
288 * @size: size of each element
289 * @cmp: pointer to comparison function
290 * @swap: pointer to swap function or NULL
292 * This function does a heapsort on the given array. You may provide a
293 * swap function optimized to your element type.
295 * Sorting time is O(n log n) both on average and worst-case. While
296 * qsort is about 20% faster on average, it suffers from exploitable
297 * O(n*n) worst-case behavior and extra memory requirements that make
298 * it less suitable for kernel use.
301 void sort(void *base, size_t num, size_t size,
302 int (*cmp)(const void *, const void *),
303 void (*swap)(void *, void *, int size))
305 /* pre-scale counters for performance */
306 int i = (num / 2 - 1) * size, n = num * size, c, r;
308 if (!swap)
309 swap = (size == 4 ? u32_swap : generic_swap);
311 /* heapify */
312 for (; i >= 0; i -= size) {
313 for (r = i; r * 2 + size < n; r = c) {
314 c = r * 2 + size;
315 if (c < n - size && cmp(base + c, base + c + size) < 0)
316 c += size;
317 if (cmp(base + r, base + c) >= 0)
318 break;
319 swap(base + r, base + c, size);
323 /* sort */
324 for (i = n - size; i > 0; i -= size) {
325 swap(base, base + i, size);
326 for (r = 0; r * 2 + size < i; r = c) {
327 c = r * 2 + size;
328 if (c < i - size && cmp(base + c, base + c + size) < 0)
329 c += size;
330 if (cmp(base + r, base + c) >= 0)
331 break;
332 swap(base + r, base + c, size);
336 #endif /* LINUX_VERSION_CODE < */
338 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
339 /* Old kernels do not have kstrdup
340 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
342 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
344 size_t len;
345 char *buf;
347 if (!s)
348 return NULL;
350 len = strlen(s) + 1;
351 buf = kmalloc(len, gfp);
352 if (buf)
353 memcpy(buf, s, len);
354 return buf;
356 #endif /* LINUX_VERSION_CODE */
358 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
359 /* Old kernels use semaphore instead of mutex
360 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
362 #define mutex semaphore
363 #define mutex_lock down
364 #define mutex_unlock up
365 #endif /* LINUX_VERSION_CODE */
367 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
368 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
369 static char * __attribute_used__
370 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
372 unsigned int len;
373 char *p, dummy[1];
374 va_list aq;
376 va_copy(aq, ap);
377 len = vsnprintf(dummy, 0, fmt, aq);
378 va_end(aq);
380 p = kmalloc(len + 1, gfp);
381 if (!p)
382 return NULL;
384 vsnprintf(p, len + 1, fmt, ap);
386 return p;
388 #endif
390 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
391 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
392 static char * __attribute__((format (printf, 2, 3)))
393 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
395 va_list ap;
396 char *p;
398 va_start(ap, fmt);
399 p = kvasprintf(gfp, fmt, ap);
400 va_end(ap);
402 return p;
404 #endif
406 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
407 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
408 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
410 char *tail;
411 unsigned long val;
412 size_t len;
414 *res = 0;
415 len = strlen(cp);
416 if (len == 0)
417 return -EINVAL;
419 val = simple_strtoul(cp, &tail, base);
420 if ((*tail == '\0') ||
421 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
422 *res = val;
423 return 0;
426 return -EINVAL;
428 #endif
430 #ifndef task_thread_info
431 #define task_thread_info(task) (task)->thread_info
432 #endif /* !task_thread_info */
434 #ifdef KSPLICE_STANDALONE
436 static bool bootstrapped = false;
438 #ifdef CONFIG_KALLSYMS
439 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
440 extern u8 kallsyms_names[];
441 #endif /* CONFIG_KALLSYMS */
443 /* defined by ksplice-create */
444 extern const struct ksplice_reloc ksplice_init_relocs[],
445 ksplice_init_relocs_end[];
447 /* Obtained via System.map */
448 extern struct list_head modules;
449 extern struct mutex module_mutex;
450 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
451 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
452 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
453 #endif /* LINUX_VERSION_CODE */
454 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
455 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
456 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
457 #endif /* LINUX_VERSION_CODE */
458 extern const struct kernel_symbol __start___ksymtab[];
459 extern const struct kernel_symbol __stop___ksymtab[];
460 extern const unsigned long __start___kcrctab[];
461 extern const struct kernel_symbol __start___ksymtab_gpl[];
462 extern const struct kernel_symbol __stop___ksymtab_gpl[];
463 extern const unsigned long __start___kcrctab_gpl[];
464 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
465 extern const struct kernel_symbol __start___ksymtab_unused[];
466 extern const struct kernel_symbol __stop___ksymtab_unused[];
467 extern const unsigned long __start___kcrctab_unused[];
468 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
469 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
470 extern const unsigned long __start___kcrctab_unused_gpl[];
471 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
472 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
473 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
474 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
475 extern const unsigned long __start___kcrctab_gpl_future[];
476 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
478 #endif /* KSPLICE_STANDALONE */
480 static struct update *init_ksplice_update(const char *kid);
481 static void cleanup_ksplice_update(struct update *update);
482 static void maybe_cleanup_ksplice_update(struct update *update);
483 static void add_to_update(struct ksplice_pack *pack, struct update *update);
484 static int ksplice_sysfs_init(struct update *update);
486 /* Preparing the relocations and patches for application */
487 static abort_t apply_update(struct update *update);
488 static abort_t prepare_pack(struct ksplice_pack *pack);
489 static abort_t finalize_pack(struct ksplice_pack *pack);
490 static abort_t finalize_patches(struct ksplice_pack *pack);
491 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
492 unsigned long addr);
493 static abort_t map_trampoline_pages(struct update *update);
494 static void unmap_trampoline_pages(struct update *update);
495 static void *map_writable(void *addr, size_t len);
496 static abort_t apply_relocs(struct ksplice_pack *pack,
497 const struct ksplice_reloc *relocs,
498 const struct ksplice_reloc *relocs_end);
499 static abort_t apply_reloc(struct ksplice_pack *pack,
500 const struct ksplice_reloc *r);
501 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
502 const struct ksplice_reloc *r);
503 static abort_t apply_howto_date(struct ksplice_pack *pack,
504 const struct ksplice_reloc *r);
505 static abort_t read_reloc_value(struct ksplice_pack *pack,
506 const struct ksplice_reloc *r,
507 unsigned long addr, unsigned long *valp);
508 static abort_t write_reloc_value(struct ksplice_pack *pack,
509 const struct ksplice_reloc *r,
510 unsigned long addr, unsigned long sym_addr);
511 static abort_t create_module_list_entry(struct ksplice_pack *pack,
512 bool to_be_applied);
513 static void cleanup_module_list_entries(struct update *update);
514 static void __attribute__((noreturn)) ksplice_deleted(void);
516 /* run-pre matching */
517 static abort_t match_pack_sections(struct ksplice_pack *pack,
518 bool consider_data_sections);
519 static abort_t find_section(struct ksplice_pack *pack,
520 struct ksplice_section *sect);
521 static abort_t try_addr(struct ksplice_pack *pack,
522 struct ksplice_section *sect,
523 unsigned long run_addr,
524 struct list_head *safety_records,
525 enum run_pre_mode mode);
526 static abort_t run_pre_cmp(struct ksplice_pack *pack,
527 const struct ksplice_section *sect,
528 unsigned long run_addr,
529 struct list_head *safety_records,
530 enum run_pre_mode mode);
531 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
532 /* defined in arch/ARCH/kernel/ksplice-arch.c */
533 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
534 struct ksplice_section *sect,
535 unsigned long run_addr,
536 struct list_head *safety_records,
537 enum run_pre_mode mode);
538 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
539 static void print_bytes(struct ksplice_pack *pack,
540 const unsigned char *run, int runc,
541 const unsigned char *pre, int prec);
542 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
543 static abort_t brute_search(struct ksplice_pack *pack,
544 struct ksplice_section *sect,
545 const void *start, unsigned long len,
546 struct list_head *vals);
547 static abort_t brute_search_all(struct ksplice_pack *pack,
548 struct ksplice_section *sect,
549 struct list_head *vals);
550 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
551 static const struct ksplice_reloc *
552 init_reloc_search(struct ksplice_pack *pack,
553 const struct ksplice_section *sect);
554 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
555 const struct ksplice_reloc *end,
556 unsigned long address,
557 unsigned long size);
558 static abort_t lookup_reloc(struct ksplice_pack *pack,
559 const struct ksplice_reloc **fingerp,
560 unsigned long addr,
561 const struct ksplice_reloc **relocp);
562 static abort_t handle_reloc(struct ksplice_pack *pack,
563 const struct ksplice_section *sect,
564 const struct ksplice_reloc *r,
565 unsigned long run_addr, enum run_pre_mode mode);
566 static abort_t handle_howto_date(struct ksplice_pack *pack,
567 const struct ksplice_section *sect,
568 const struct ksplice_reloc *r,
569 unsigned long run_addr,
570 enum run_pre_mode mode);
571 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
572 const struct ksplice_section *sect,
573 const struct ksplice_reloc *r,
574 unsigned long run_addr,
575 enum run_pre_mode mode);
576 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
577 const struct ksplice_symbol *sym);
578 static int compare_section_labels(const void *va, const void *vb);
579 static int symbol_section_bsearch_compare(const void *a, const void *b);
580 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
581 const struct ksplice_patch *p);
583 /* Computing possible addresses for symbols */
584 static abort_t lookup_symbol(struct ksplice_pack *pack,
585 const struct ksplice_symbol *ksym,
586 struct list_head *vals);
587 static void cleanup_symbol_arrays(struct ksplice_pack *pack);
588 static abort_t init_symbol_arrays(struct ksplice_pack *pack);
589 static abort_t init_symbol_array(struct ksplice_pack *pack,
590 struct ksplice_symbol *start,
591 struct ksplice_symbol *end);
592 static abort_t uniquify_symbols(struct ksplice_pack *pack);
593 static abort_t add_matching_values(struct ksplice_lookup *lookup,
594 const char *sym_name, unsigned long sym_val);
595 static bool add_export_values(const struct symsearch *syms,
596 struct module *owner,
597 unsigned int symnum, void *data);
598 static int symbolp_bsearch_compare(const void *key, const void *elt);
599 static int compare_symbolp_names(const void *a, const void *b);
600 static int compare_symbolp_labels(const void *a, const void *b);
601 #ifdef CONFIG_KALLSYMS
602 static int add_kallsyms_values(void *data, const char *name,
603 struct module *owner, unsigned long val);
604 #endif /* CONFIG_KALLSYMS */
605 #ifdef KSPLICE_STANDALONE
606 static abort_t
607 add_system_map_candidates(struct ksplice_pack *pack,
608 const struct ksplice_system_map *start,
609 const struct ksplice_system_map *end,
610 const char *label, struct list_head *vals);
611 static int compare_system_map(const void *a, const void *b);
612 static int system_map_bsearch_compare(const void *key, const void *elt);
613 #endif /* KSPLICE_STANDALONE */
614 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
615 struct list_head *vals);
617 /* Atomic update trampoline insertion and removal */
618 static abort_t apply_patches(struct update *update);
619 static abort_t reverse_patches(struct update *update);
620 static int __apply_patches(void *update);
621 static int __reverse_patches(void *update);
622 static abort_t check_each_task(struct update *update);
623 static abort_t check_task(struct update *update,
624 const struct task_struct *t, bool rerun);
625 static abort_t check_stack(struct update *update, struct conflict *conf,
626 const struct thread_info *tinfo,
627 const unsigned long *stack);
628 static abort_t check_address(struct update *update,
629 struct conflict *conf, unsigned long addr);
630 static abort_t check_record(struct conflict_addr *ca,
631 const struct safety_record *rec,
632 unsigned long addr);
633 static bool is_stop_machine(const struct task_struct *t);
634 static void cleanup_conflicts(struct update *update);
635 static void print_conflicts(struct update *update);
636 static void insert_trampoline(struct ksplice_patch *p);
637 static abort_t verify_trampoline(struct ksplice_pack *pack,
638 const struct ksplice_patch *p);
639 static void remove_trampoline(const struct ksplice_patch *p);
641 static abort_t create_labelval(struct ksplice_pack *pack,
642 struct ksplice_symbol *ksym,
643 unsigned long val, int status);
644 static abort_t create_safety_record(struct ksplice_pack *pack,
645 const struct ksplice_section *sect,
646 struct list_head *record_list,
647 unsigned long run_addr,
648 unsigned long run_size);
649 static abort_t add_candidate_val(struct ksplice_pack *pack,
650 struct list_head *vals, unsigned long val);
651 static void release_vals(struct list_head *vals);
652 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
654 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
655 const struct ksplice_reloc_howto *howto);
656 static unsigned long follow_trampolines(struct ksplice_pack *pack,
657 unsigned long addr);
658 static bool patches_module(const struct module *a, const struct module *b);
659 static bool starts_with(const char *str, const char *prefix);
660 static bool singular(struct list_head *list);
661 static void *bsearch(const void *key, const void *base, size_t n,
662 size_t size, int (*cmp)(const void *key, const void *elt));
663 static int compare_relocs(const void *a, const void *b);
664 static int reloc_bsearch_compare(const void *key, const void *elt);
666 /* Debugging */
667 static abort_t init_debug_buf(struct update *update);
668 static void clear_debug_buf(struct update *update);
669 static int __attribute__((format(printf, 2, 3)))
670 _ksdebug(struct update *update, const char *fmt, ...);
671 #define ksdebug(pack, fmt, ...) \
672 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
674 #ifdef KSPLICE_NO_KERNEL_SUPPORT
675 /* Functions defined here that will be exported in later kernels */
676 #ifdef CONFIG_KALLSYMS
677 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
678 struct module *, unsigned long),
679 void *data);
680 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
681 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
682 #endif /* LINUX_VERSION_CODE */
683 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
684 struct module *,
685 unsigned long),
686 void *data);
687 #endif /* CONFIG_KALLSYMS */
688 static struct module *find_module(const char *name);
689 static int use_module(struct module *a, struct module *b);
690 static const struct kernel_symbol *find_symbol(const char *name,
691 struct module **owner,
692 const unsigned long **crc,
693 bool gplok, bool warn);
694 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
695 struct module *owner,
696 unsigned int symnum, void *data),
697 void *data);
698 static struct module *__module_address(unsigned long addr);
699 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
701 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
703 /* Prepare a trampoline for the given patch */
704 static abort_t prepare_trampoline(struct ksplice_pack *pack,
705 struct ksplice_patch *p);
706 /* What address does the trampoline at addr jump to? */
707 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
708 unsigned long *new_addr);
709 /* Hook to handle pc-relative jumps inserted by parainstructions */
710 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
711 unsigned long run, int *matched);
712 /* Called for relocations of type KSPLICE_HOWTO_BUG */
713 static abort_t handle_bug(struct ksplice_pack *pack,
714 const struct ksplice_reloc *r,
715 unsigned long run_addr);
716 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
717 static abort_t handle_extable(struct ksplice_pack *pack,
718 const struct ksplice_reloc *r,
719 unsigned long run_addr);
720 /* Is address p on the stack of the given thread? */
721 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
723 #ifndef KSPLICE_STANDALONE
724 #include "ksplice-arch.c"
725 #elif defined CONFIG_X86
726 #include "x86/ksplice-arch.c"
727 #elif defined CONFIG_ARM
728 #include "arm/ksplice-arch.c"
729 #endif /* KSPLICE_STANDALONE */
731 #define clear_list(head, type, member) \
732 do { \
733 struct list_head *_pos, *_n; \
734 list_for_each_safe(_pos, _n, head) { \
735 list_del(_pos); \
736 kfree(list_entry(_pos, type, member)); \
738 } while (0)
741 * init_ksplice_pack() - Initializes a ksplice pack
742 * @pack: The pack to be initialized. All of the public fields of the
743 * pack and its associated data structures should be populated
744 * before this function is called. The values of the private
745 * fields will be ignored.
747 int init_ksplice_pack(struct ksplice_pack *pack)
749 struct update *update;
750 struct ksplice_patch *p;
751 struct ksplice_section *s;
752 int ret = 0;
754 #ifdef KSPLICE_STANDALONE
755 if (!bootstrapped)
756 return -1;
757 #endif /* KSPLICE_STANDALONE */
759 INIT_LIST_HEAD(&pack->temp_labelvals);
760 INIT_LIST_HEAD(&pack->safety_records);
762 sort(pack->helper_relocs,
763 pack->helper_relocs_end - pack->helper_relocs,
764 sizeof(*pack->helper_relocs), compare_relocs, NULL);
765 sort(pack->primary_relocs,
766 pack->primary_relocs_end - pack->primary_relocs,
767 sizeof(*pack->primary_relocs), compare_relocs, NULL);
768 sort(pack->helper_sections,
769 pack->helper_sections_end - pack->helper_sections,
770 sizeof(*pack->helper_sections), compare_section_labels, NULL);
771 #ifdef KSPLICE_STANDALONE
772 sort(pack->primary_system_map,
773 pack->primary_system_map_end - pack->primary_system_map,
774 sizeof(*pack->primary_system_map), compare_system_map, NULL);
775 sort(pack->helper_system_map,
776 pack->helper_system_map_end - pack->helper_system_map,
777 sizeof(*pack->helper_system_map), compare_system_map, NULL);
778 #endif /* KSPLICE_STANDALONE */
780 for (p = pack->patches; p < pack->patches_end; p++)
781 p->vaddr = NULL;
782 for (s = pack->helper_sections; s < pack->helper_sections_end; s++)
783 s->match_map = NULL;
784 for (p = pack->patches; p < pack->patches_end; p++) {
785 const struct ksplice_reloc *r = patch_reloc(pack, p);
786 if (r == NULL)
787 return -ENOENT;
788 if (p->type == KSPLICE_PATCH_DATA) {
789 s = symbol_section(pack, r->symbol);
790 if (s == NULL)
791 return -ENOENT;
792 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
793 * to modify rodata sections that have been explicitly
794 * marked for patching using the ksplice-patch.h macro
795 * ksplice_assume_rodata. Here we modify the section
796 * flags appropriately.
798 if (s->flags & KSPLICE_SECTION_DATA)
799 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
800 KSPLICE_SECTION_RODATA;
804 mutex_lock(&module_mutex);
805 list_for_each_entry(update, &updates, list) {
806 if (strcmp(pack->kid, update->kid) == 0) {
807 if (update->stage != STAGE_PREPARING) {
808 ret = -EPERM;
809 goto out;
811 add_to_update(pack, update);
812 ret = 0;
813 goto out;
816 update = init_ksplice_update(pack->kid);
817 if (update == NULL) {
818 ret = -ENOMEM;
819 goto out;
821 ret = ksplice_sysfs_init(update);
822 if (ret != 0) {
823 cleanup_ksplice_update(update);
824 goto out;
826 add_to_update(pack, update);
827 out:
828 mutex_unlock(&module_mutex);
829 return ret;
831 EXPORT_SYMBOL_GPL(init_ksplice_pack);
834 * cleanup_ksplice_pack() - Cleans up a pack
835 * @pack: The pack to be cleaned up
837 void cleanup_ksplice_pack(struct ksplice_pack *pack)
839 if (pack->update == NULL)
840 return;
842 mutex_lock(&module_mutex);
843 if (pack->update->stage == STAGE_APPLIED) {
844 /* If the pack wasn't actually applied (because we
845 * only applied this update to loaded modules and this
846 * target was not loaded), then unregister the pack
847 * from the list of unused packs.
849 struct ksplice_pack *p;
850 bool found = false;
852 list_for_each_entry(p, &pack->update->unused_packs, list) {
853 if (p == pack)
854 found = true;
856 if (found)
857 list_del(&pack->list);
858 mutex_unlock(&module_mutex);
859 return;
861 list_del(&pack->list);
862 if (pack->update->stage == STAGE_PREPARING)
863 maybe_cleanup_ksplice_update(pack->update);
864 pack->update = NULL;
865 mutex_unlock(&module_mutex);
867 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
869 static struct update *init_ksplice_update(const char *kid)
871 struct update *update;
872 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
873 if (update == NULL)
874 return NULL;
875 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
876 if (update->name == NULL) {
877 kfree(update);
878 return NULL;
880 update->kid = kstrdup(kid, GFP_KERNEL);
881 if (update->kid == NULL) {
882 kfree(update->name);
883 kfree(update);
884 return NULL;
886 if (try_module_get(THIS_MODULE) != 1) {
887 kfree(update->kid);
888 kfree(update->name);
889 kfree(update);
890 return NULL;
892 INIT_LIST_HEAD(&update->packs);
893 INIT_LIST_HEAD(&update->unused_packs);
894 INIT_LIST_HEAD(&update->ksplice_module_list);
895 if (init_debug_buf(update) != OK) {
896 module_put(THIS_MODULE);
897 kfree(update->kid);
898 kfree(update->name);
899 kfree(update);
900 return NULL;
902 list_add(&update->list, &updates);
903 update->stage = STAGE_PREPARING;
904 update->abort_cause = OK;
905 update->partial = 0;
906 INIT_LIST_HEAD(&update->conflicts);
907 return update;
910 static void cleanup_ksplice_update(struct update *update)
912 list_del(&update->list);
913 cleanup_conflicts(update);
914 clear_debug_buf(update);
915 cleanup_module_list_entries(update);
916 kfree(update->kid);
917 kfree(update->name);
918 kfree(update);
919 module_put(THIS_MODULE);
922 /* Clean up the update if it no longer has any packs */
923 static void maybe_cleanup_ksplice_update(struct update *update)
925 if (list_empty(&update->packs) && list_empty(&update->unused_packs))
926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
927 kobject_put(&update->kobj);
928 #else /* LINUX_VERSION_CODE < */
929 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
930 kobject_unregister(&update->kobj);
931 #endif /* LINUX_VERSION_CODE */
934 static void add_to_update(struct ksplice_pack *pack, struct update *update)
936 pack->update = update;
937 list_add(&pack->list, &update->unused_packs);
940 static int ksplice_sysfs_init(struct update *update)
942 int ret = 0;
943 memset(&update->kobj, 0, sizeof(update->kobj));
944 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
945 #ifndef KSPLICE_STANDALONE
946 ret = kobject_init_and_add(&update->kobj, &update_ktype,
947 ksplice_kobj, "%s", update->kid);
948 #else /* KSPLICE_STANDALONE */
949 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
950 ret = kobject_init_and_add(&update->kobj, &update_ktype,
951 &THIS_MODULE->mkobj.kobj, "ksplice");
952 #endif /* KSPLICE_STANDALONE */
953 #else /* LINUX_VERSION_CODE < */
954 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
955 if (ret != 0)
956 return ret;
957 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
958 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
959 #else /* LINUX_VERSION_CODE < */
960 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
961 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
962 #endif /* LINUX_VERSION_CODE */
963 update->kobj.ktype = &update_ktype;
964 ret = kobject_register(&update->kobj);
965 #endif /* LINUX_VERSION_CODE */
966 if (ret != 0)
967 return ret;
968 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
969 kobject_uevent(&update->kobj, KOBJ_ADD);
970 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
971 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
972 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
973 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
974 #endif /* LINUX_VERSION_CODE */
975 return 0;
978 static abort_t apply_update(struct update *update)
980 struct ksplice_pack *pack, *n;
981 abort_t ret;
982 int retval;
984 list_for_each_entry(pack, &update->packs, list) {
985 ret = create_module_list_entry(pack, true);
986 if (ret != OK)
987 goto out;
990 list_for_each_entry_safe(pack, n, &update->unused_packs, list) {
991 if (strcmp(pack->target_name, "vmlinux") == 0) {
992 pack->target = NULL;
993 } else if (pack->target == NULL) {
994 pack->target = find_module(pack->target_name);
995 if (pack->target == NULL ||
996 !module_is_live(pack->target)) {
997 if (!update->partial) {
998 ret = TARGET_NOT_LOADED;
999 goto out;
1001 ret = create_module_list_entry(pack, false);
1002 if (ret != OK)
1003 goto out;
1004 continue;
1006 retval = use_module(pack->primary, pack->target);
1007 if (retval != 1) {
1008 ret = UNEXPECTED;
1009 goto out;
1012 ret = create_module_list_entry(pack, true);
1013 if (ret != OK)
1014 goto out;
1015 list_del(&pack->list);
1016 list_add_tail(&pack->list, &update->packs);
1018 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1019 if (pack->target == NULL) {
1020 apply_paravirt(pack->primary_parainstructions,
1021 pack->primary_parainstructions_end);
1022 apply_paravirt(pack->helper_parainstructions,
1023 pack->helper_parainstructions_end);
1025 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1028 list_for_each_entry(pack, &update->packs, list) {
1029 const struct ksplice_section *sect;
1030 for (sect = pack->primary_sections;
1031 sect < pack->primary_sections_end; sect++) {
1032 struct safety_record *rec = kmalloc(sizeof(*rec),
1033 GFP_KERNEL);
1034 if (rec == NULL) {
1035 ret = OUT_OF_MEMORY;
1036 goto out;
1038 rec->addr = sect->address;
1039 rec->size = sect->size;
1040 rec->label = sect->symbol->label;
1041 list_add(&rec->list, &pack->safety_records);
1045 list_for_each_entry(pack, &update->packs, list) {
1046 ret = init_symbol_arrays(pack);
1047 if (ret != OK) {
1048 cleanup_symbol_arrays(pack);
1049 goto out;
1051 ret = prepare_pack(pack);
1052 cleanup_symbol_arrays(pack);
1053 if (ret != OK)
1054 goto out;
1056 ret = apply_patches(update);
1057 out:
1058 list_for_each_entry(pack, &update->packs, list) {
1059 struct ksplice_section *s;
1060 if (update->stage == STAGE_PREPARING)
1061 clear_list(&pack->safety_records, struct safety_record,
1062 list);
1063 for (s = pack->helper_sections; s < pack->helper_sections_end;
1064 s++) {
1065 if (s->match_map != NULL) {
1066 vfree(s->match_map);
1067 s->match_map = NULL;
1071 if (update->stage == STAGE_PREPARING)
1072 cleanup_module_list_entries(update);
1073 return ret;
1076 static int compare_symbolp_names(const void *a, const void *b)
1078 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1079 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1080 return 0;
1081 if ((*sympa)->name == NULL)
1082 return -1;
1083 if ((*sympb)->name == NULL)
1084 return 1;
1085 return strcmp((*sympa)->name, (*sympb)->name);
1088 static int compare_symbolp_labels(const void *a, const void *b)
1090 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1091 return strcmp((*sympa)->label, (*sympb)->label);
1094 static int symbolp_bsearch_compare(const void *key, const void *elt)
1096 const char *name = key;
1097 const struct ksplice_symbol *const *symp = elt;
1098 const struct ksplice_symbol *sym = *symp;
1099 if (sym->name == NULL)
1100 return 1;
1101 return strcmp(name, sym->name);
1104 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1105 const char *sym_name, unsigned long sym_val)
1107 struct ksplice_symbol **symp;
1108 abort_t ret;
1110 symp = bsearch(sym_name, lookup->arr, lookup->size,
1111 sizeof(*lookup->arr), symbolp_bsearch_compare);
1112 if (symp == NULL)
1113 return OK;
1115 while (symp > lookup->arr &&
1116 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1117 symp--;
1119 for (; symp < lookup->arr + lookup->size; symp++) {
1120 struct ksplice_symbol *sym = *symp;
1121 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1122 break;
1123 ret = add_candidate_val(lookup->pack, sym->vals, sym_val);
1124 if (ret != OK)
1125 return ret;
1127 return OK;
1130 #ifdef CONFIG_KALLSYMS
1131 static int add_kallsyms_values(void *data, const char *name,
1132 struct module *owner, unsigned long val)
1134 struct ksplice_lookup *lookup = data;
1135 if (owner == lookup->pack->primary ||
1136 !patches_module(owner, lookup->pack->target))
1137 return (__force int)OK;
1138 return (__force int)add_matching_values(lookup, name, val);
1140 #endif /* CONFIG_KALLSYMS */
1142 static bool add_export_values(const struct symsearch *syms,
1143 struct module *owner,
1144 unsigned int symnum, void *data)
1146 struct ksplice_lookup *lookup = data;
1147 abort_t ret;
1149 ret = add_matching_values(lookup, syms->start[symnum].name,
1150 syms->start[symnum].value);
1151 if (ret != OK) {
1152 lookup->ret = ret;
1153 return true;
1155 return false;
1158 static void cleanup_symbol_arrays(struct ksplice_pack *pack)
1160 struct ksplice_symbol *sym;
1161 for (sym = pack->primary_symbols; sym < pack->primary_symbols_end;
1162 sym++) {
1163 if (sym->vals != NULL) {
1164 clear_list(sym->vals, struct candidate_val, list);
1165 kfree(sym->vals);
1166 sym->vals = NULL;
1169 for (sym = pack->helper_symbols; sym < pack->helper_symbols_end; sym++) {
1170 if (sym->vals != NULL) {
1171 clear_list(sym->vals, struct candidate_val, list);
1172 kfree(sym->vals);
1173 sym->vals = NULL;
1179 * The primary and helper modules each have their own independent
1180 * ksplice_symbol structures. uniquify_symbols unifies these separate
1181 * pieces of kernel symbol information by replacing all references to
1182 * the helper copy of symbols with references to the primary copy.
1184 static abort_t uniquify_symbols(struct ksplice_pack *pack)
1186 struct ksplice_reloc *r;
1187 struct ksplice_section *s;
1188 struct ksplice_symbol *sym, **sym_arr, **symp;
1189 size_t size = pack->primary_symbols_end - pack->primary_symbols;
1191 if (size == 0)
1192 return OK;
1194 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1195 if (sym_arr == NULL)
1196 return OUT_OF_MEMORY;
1198 for (symp = sym_arr, sym = pack->primary_symbols;
1199 symp < sym_arr + size && sym < pack->primary_symbols_end;
1200 sym++, symp++)
1201 *symp = sym;
1203 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1205 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1206 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1207 compare_symbolp_labels);
1208 if (symp != NULL) {
1209 if ((*symp)->name == NULL)
1210 (*symp)->name = r->symbol->name;
1211 r->symbol = *symp;
1215 for (s = pack->helper_sections; s < pack->helper_sections_end; s++) {
1216 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1217 compare_symbolp_labels);
1218 if (symp != NULL) {
1219 if ((*symp)->name == NULL)
1220 (*symp)->name = s->symbol->name;
1221 s->symbol = *symp;
1225 vfree(sym_arr);
1226 return OK;
1230 * Initialize the ksplice_symbol structures in the given array using
1231 * the kallsyms and exported symbol tables.
1233 static abort_t init_symbol_array(struct ksplice_pack *pack,
1234 struct ksplice_symbol *start,
1235 struct ksplice_symbol *end)
1237 struct ksplice_symbol *sym, **sym_arr, **symp;
1238 struct ksplice_lookup lookup;
1239 size_t size = end - start;
1240 abort_t ret;
1242 if (size == 0)
1243 return OK;
1245 for (sym = start; sym < end; sym++) {
1246 if (starts_with(sym->label, "__ksymtab")) {
1247 const struct kernel_symbol *ksym;
1248 const char *colon = strchr(sym->label, ':');
1249 const char *name = colon + 1;
1250 if (colon == NULL)
1251 continue;
1252 ksym = find_symbol(name, NULL, NULL, true, false);
1253 if (ksym == NULL) {
1254 ksdebug(pack, "Could not find kernel_symbol "
1255 "structure for %s\n", name);
1256 continue;
1258 sym->value = (unsigned long)ksym;
1259 sym->vals = NULL;
1260 continue;
1263 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1264 if (sym->vals == NULL)
1265 return OUT_OF_MEMORY;
1266 INIT_LIST_HEAD(sym->vals);
1267 sym->value = 0;
1270 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1271 if (sym_arr == NULL)
1272 return OUT_OF_MEMORY;
1274 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1275 sym++, symp++)
1276 *symp = sym;
1278 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1280 lookup.pack = pack;
1281 lookup.arr = sym_arr;
1282 lookup.size = size;
1283 lookup.ret = OK;
1285 each_symbol(add_export_values, &lookup);
1286 ret = lookup.ret;
1287 #ifdef CONFIG_KALLSYMS
1288 if (ret == OK)
1289 ret = (__force abort_t)
1290 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1291 #endif /* CONFIG_KALLSYMS */
1292 vfree(sym_arr);
1293 return ret;
1296 /* Prepare the pack's ksplice_symbol structures for run-pre matching */
1297 static abort_t init_symbol_arrays(struct ksplice_pack *pack)
1299 abort_t ret;
1301 ret = uniquify_symbols(pack);
1302 if (ret != OK)
1303 return ret;
1305 ret = init_symbol_array(pack, pack->helper_symbols,
1306 pack->helper_symbols_end);
1307 if (ret != OK)
1308 return ret;
1310 ret = init_symbol_array(pack, pack->primary_symbols,
1311 pack->primary_symbols_end);
1312 if (ret != OK)
1313 return ret;
1315 return OK;
1318 static abort_t prepare_pack(struct ksplice_pack *pack)
1320 abort_t ret;
1322 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1323 ret = match_pack_sections(pack, false);
1324 if (ret == NO_MATCH) {
1325 /* It is possible that by using relocations from .data sections
1326 * we can successfully run-pre match the rest of the sections.
1327 * To avoid using any symbols obtained from .data sections
1328 * (which may be unreliable) in the post code, we first prepare
1329 * the post code and then try to run-pre match the remaining
1330 * sections with the help of .data sections.
1332 ksdebug(pack, "Continuing without some sections; we might "
1333 "find them later.\n");
1334 ret = finalize_pack(pack);
1335 if (ret != OK) {
1336 ksdebug(pack, "Aborted. Unable to continue without "
1337 "the unmatched sections.\n");
1338 return ret;
1341 ksdebug(pack, "run-pre: Considering .data sections to find the "
1342 "unmatched sections\n");
1343 ret = match_pack_sections(pack, true);
1344 if (ret != OK)
1345 return ret;
1347 ksdebug(pack, "run-pre: Found all previously unmatched "
1348 "sections\n");
1349 return OK;
1350 } else if (ret != OK) {
1351 return ret;
1354 return finalize_pack(pack);
1358 * Finish preparing the pack for insertion into the kernel.
1359 * Afterwards, the replacement code should be ready to run and the
1360 * ksplice_patches should all be ready for trampoline insertion.
1362 static abort_t finalize_pack(struct ksplice_pack *pack)
1364 abort_t ret;
1365 ret = apply_relocs(pack, pack->primary_relocs,
1366 pack->primary_relocs_end);
1367 if (ret != OK)
1368 return ret;
1370 ret = finalize_patches(pack);
1371 if (ret != OK)
1372 return ret;
1374 return OK;
1377 static abort_t finalize_patches(struct ksplice_pack *pack)
1379 struct ksplice_patch *p;
1380 struct safety_record *rec;
1381 abort_t ret;
1383 for (p = pack->patches; p < pack->patches_end; p++) {
1384 bool found = false;
1385 list_for_each_entry(rec, &pack->safety_records, list) {
1386 if (rec->addr <= p->oldaddr &&
1387 p->oldaddr < rec->addr + rec->size) {
1388 found = true;
1389 break;
1392 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1393 const struct ksplice_reloc *r = patch_reloc(pack, p);
1394 if (r == NULL) {
1395 ksdebug(pack, "A patch with no ksplice_reloc at"
1396 " its oldaddr has no safety record\n");
1397 return NO_MATCH;
1399 ksdebug(pack, "No safety record for patch with oldaddr "
1400 "%s+%lx\n", r->symbol->label, r->target_addend);
1401 return NO_MATCH;
1404 if (p->type == KSPLICE_PATCH_TEXT) {
1405 ret = prepare_trampoline(pack, p);
1406 if (ret != OK)
1407 return ret;
1410 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1411 ksdebug(pack, "Safety record %s is too short for "
1412 "patch\n", rec->label);
1413 return UNEXPECTED;
1416 if (p->type == KSPLICE_PATCH_TEXT) {
1417 if (p->repladdr == 0)
1418 p->repladdr = (unsigned long)ksplice_deleted;
1421 return OK;
1424 static abort_t map_trampoline_pages(struct update *update)
1426 struct ksplice_pack *pack;
1427 list_for_each_entry(pack, &update->packs, list) {
1428 struct ksplice_patch *p;
1429 for (p = pack->patches; p < pack->patches_end; p++) {
1430 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1431 if (p->vaddr == NULL) {
1432 ksdebug(pack, "Unable to map oldaddr read/write"
1433 "\n");
1434 unmap_trampoline_pages(update);
1435 return UNEXPECTED;
1439 return OK;
1442 static void unmap_trampoline_pages(struct update *update)
1444 struct ksplice_pack *pack;
1445 list_for_each_entry(pack, &update->packs, list) {
1446 struct ksplice_patch *p;
1447 for (p = pack->patches; p < pack->patches_end; p++) {
1448 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1449 p->vaddr = NULL;
1455 * map_writable creates a shadow page mapping of the range
1456 * [addr, addr + len) so that we can write to code mapped read-only.
1458 * It is similar to a generalized version of x86's text_poke. But
1459 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1460 * map_writable to map the pages before stop_machine, then use the
1461 * mapping inside stop_machine, and unmap the pages afterwards.
1463 static void *map_writable(void *addr, size_t len)
1465 void *vaddr;
1466 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1467 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1468 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1469 int i;
1471 if (pages == NULL)
1472 return NULL;
1474 for (i = 0; i < nr_pages; i++) {
1475 if (__module_address((unsigned long)page_addr) == NULL) {
1476 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1477 pages[i] = virt_to_page(page_addr);
1478 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1479 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1480 pages[i] =
1481 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1482 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1483 WARN_ON(!PageReserved(pages[i]));
1484 } else {
1485 pages[i] = vmalloc_to_page(addr);
1487 if (pages[i] == NULL) {
1488 kfree(pages);
1489 return NULL;
1491 page_addr += PAGE_SIZE;
1493 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1494 kfree(pages);
1495 if (vaddr == NULL)
1496 return NULL;
1497 return vaddr + offset_in_page(addr);
1501 * Ksplice adds a dependency on any symbol address used to resolve relocations
1502 * in the primary module.
1504 * Be careful to follow_trampolines so that we always depend on the
1505 * latest version of the target function, since that's the code that
1506 * will run if we call addr.
1508 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
1509 unsigned long addr)
1511 struct ksplice_pack *p;
1512 struct module *m =
1513 __module_text_address(follow_trampolines(pack, addr));
1514 if (m == NULL)
1515 return OK;
1516 list_for_each_entry(p, &pack->update->packs, list) {
1517 if (m == p->primary)
1518 return OK;
1520 if (use_module(pack->primary, m) != 1)
1521 return MODULE_BUSY;
1522 return OK;
1525 static abort_t apply_relocs(struct ksplice_pack *pack,
1526 const struct ksplice_reloc *relocs,
1527 const struct ksplice_reloc *relocs_end)
1529 const struct ksplice_reloc *r;
1530 for (r = relocs; r < relocs_end; r++) {
1531 abort_t ret = apply_reloc(pack, r);
1532 if (ret != OK)
1533 return ret;
1535 return OK;
1538 static abort_t apply_reloc(struct ksplice_pack *pack,
1539 const struct ksplice_reloc *r)
1541 switch (r->howto->type) {
1542 case KSPLICE_HOWTO_RELOC:
1543 case KSPLICE_HOWTO_RELOC_PATCH:
1544 return apply_howto_reloc(pack, r);
1545 case KSPLICE_HOWTO_DATE:
1546 case KSPLICE_HOWTO_TIME:
1547 return apply_howto_date(pack, r);
1548 default:
1549 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
1550 return UNEXPECTED;
1555 * Applies a relocation. Aborts if the symbol referenced in it has
1556 * not been uniquely resolved.
1558 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
1559 const struct ksplice_reloc *r)
1561 abort_t ret;
1562 int canary_ret;
1563 unsigned long sym_addr;
1564 LIST_HEAD(vals);
1566 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
1567 if (canary_ret < 0)
1568 return UNEXPECTED;
1569 if (canary_ret == 0) {
1570 ksdebug(pack, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1571 r->blank_addr, r->symbol->label, r->target_addend);
1572 return OK;
1575 #ifdef KSPLICE_STANDALONE
1576 if (!bootstrapped) {
1577 ret = add_system_map_candidates(pack,
1578 pack->primary_system_map,
1579 pack->primary_system_map_end,
1580 r->symbol->label, &vals);
1581 if (ret != OK) {
1582 release_vals(&vals);
1583 return ret;
1586 #endif /* KSPLICE_STANDALONE */
1587 ret = lookup_symbol(pack, r->symbol, &vals);
1588 if (ret != OK) {
1589 release_vals(&vals);
1590 return ret;
1593 * Relocations for the oldaddr fields of patches must have
1594 * been resolved via run-pre matching.
1596 if (!singular(&vals) || (r->symbol->vals != NULL &&
1597 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1598 release_vals(&vals);
1599 ksdebug(pack, "Failed to find %s for reloc\n",
1600 r->symbol->label);
1601 return FAILED_TO_FIND;
1603 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1604 release_vals(&vals);
1606 ret = write_reloc_value(pack, r, r->blank_addr,
1607 r->howto->pcrel ? sym_addr - r->blank_addr :
1608 sym_addr);
1609 if (ret != OK)
1610 return ret;
1612 ksdebug(pack, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1613 r->symbol->label, r->target_addend, sym_addr);
1614 switch (r->howto->size) {
1615 case 1:
1616 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1617 break;
1618 case 2:
1619 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1620 break;
1621 case 4:
1622 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1623 break;
1624 #if BITS_PER_LONG >= 64
1625 case 8:
1626 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1627 break;
1628 #endif /* BITS_PER_LONG */
1629 default:
1630 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1631 return UNEXPECTED;
1633 #ifdef KSPLICE_STANDALONE
1634 if (!bootstrapped)
1635 return OK;
1636 #endif /* KSPLICE_STANDALONE */
1639 * Create labelvals so that we can verify our choices in the
1640 * second round of run-pre matching that considers data sections.
1642 ret = create_labelval(pack, r->symbol, sym_addr, VAL);
1643 if (ret != OK)
1644 return ret;
1646 return add_dependency_on_address(pack, sym_addr);
1650 * Date relocations are created wherever __DATE__ or __TIME__ is used
1651 * in the kernel; we resolve them by simply copying in the date/time
1652 * obtained from run-pre matching the relevant compilation unit.
1654 static abort_t apply_howto_date(struct ksplice_pack *pack,
1655 const struct ksplice_reloc *r)
1657 if (r->symbol->vals != NULL) {
1658 ksdebug(pack, "Failed to find %s for date\n", r->symbol->label);
1659 return FAILED_TO_FIND;
1661 memcpy((unsigned char *)r->blank_addr,
1662 (const unsigned char *)r->symbol->value, r->howto->size);
1663 return OK;
1667 * Given a relocation and its run address, compute the address of the
1668 * symbol the relocation referenced, and store it in *valp.
1670 static abort_t read_reloc_value(struct ksplice_pack *pack,
1671 const struct ksplice_reloc *r,
1672 unsigned long addr, unsigned long *valp)
1674 unsigned char bytes[sizeof(long)];
1675 unsigned long val;
1676 const struct ksplice_reloc_howto *howto = r->howto;
1678 if (howto->size <= 0 || howto->size > sizeof(long)) {
1679 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1680 return UNEXPECTED;
1683 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1684 return NO_MATCH;
1686 switch (howto->size) {
1687 case 1:
1688 val = *(uint8_t *)bytes;
1689 break;
1690 case 2:
1691 val = *(uint16_t *)bytes;
1692 break;
1693 case 4:
1694 val = *(uint32_t *)bytes;
1695 break;
1696 #if BITS_PER_LONG >= 64
1697 case 8:
1698 val = *(uint64_t *)bytes;
1699 break;
1700 #endif /* BITS_PER_LONG */
1701 default:
1702 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1703 return UNEXPECTED;
1706 val &= howto->dst_mask;
1707 if (howto->signed_addend)
1708 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1709 val <<= howto->rightshift;
1710 val -= r->insn_addend + r->target_addend;
1711 *valp = val;
1712 return OK;
1716 * Given a relocation, the address of its storage unit, and the
1717 * address of the symbol the relocation references, write the
1718 * relocation's final value into the storage unit.
1720 static abort_t write_reloc_value(struct ksplice_pack *pack,
1721 const struct ksplice_reloc *r,
1722 unsigned long addr, unsigned long sym_addr)
1724 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1725 const struct ksplice_reloc_howto *howto = r->howto;
1726 val >>= howto->rightshift;
1727 switch (howto->size) {
1728 case 1:
1729 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1730 (val & howto->dst_mask);
1731 break;
1732 case 2:
1733 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1734 (val & howto->dst_mask);
1735 break;
1736 case 4:
1737 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1738 (val & howto->dst_mask);
1739 break;
1740 #if BITS_PER_LONG >= 64
1741 case 8:
1742 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1743 (val & howto->dst_mask);
1744 break;
1745 #endif /* BITS_PER_LONG */
1746 default:
1747 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1748 return UNEXPECTED;
1751 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1752 ksdebug(pack, "Aborted. Relocation overflow.\n");
1753 return UNEXPECTED;
1756 return OK;
1759 static abort_t create_module_list_entry(struct ksplice_pack *pack,
1760 bool to_be_applied)
1762 struct ksplice_module_list_entry *entry =
1763 kmalloc(sizeof(*entry), GFP_KERNEL);
1764 if (entry == NULL)
1765 return OUT_OF_MEMORY;
1766 entry->primary_name = kstrdup(pack->primary->name, GFP_KERNEL);
1767 if (entry->primary_name == NULL) {
1768 kfree(entry);
1769 return OUT_OF_MEMORY;
1771 entry->target_name = kstrdup(pack->target_name, GFP_KERNEL);
1772 if (entry->target_name == NULL) {
1773 kfree(entry->primary_name);
1774 kfree(entry);
1775 return OUT_OF_MEMORY;
1777 /* The update's kid is guaranteed to outlast the module_list_entry */
1778 entry->kid = pack->update->kid;
1779 entry->applied = to_be_applied;
1780 list_add(&entry->update_list, &pack->update->ksplice_module_list);
1781 return OK;
1784 static void cleanup_module_list_entries(struct update *update)
1786 struct ksplice_module_list_entry *entry;
1787 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1788 kfree(entry->target_name);
1789 kfree(entry->primary_name);
1791 clear_list(&update->ksplice_module_list,
1792 struct ksplice_module_list_entry, update_list);
1795 /* Replacement address used for functions deleted by the patch */
1796 static void __attribute__((noreturn)) ksplice_deleted(void)
1798 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1799 BUG();
1800 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1801 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1802 for (;;);
1803 #endif
1806 /* Floodfill to run-pre match the sections within a pack. */
1807 static abort_t match_pack_sections(struct ksplice_pack *pack,
1808 bool consider_data_sections)
1810 struct ksplice_section *sect;
1811 abort_t ret;
1812 int remaining = 0;
1813 bool progress;
1815 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1816 sect++) {
1817 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1818 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1819 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1820 remaining++;
1823 while (remaining > 0) {
1824 progress = false;
1825 for (sect = pack->helper_sections;
1826 sect < pack->helper_sections_end; sect++) {
1827 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1828 continue;
1829 if ((!consider_data_sections &&
1830 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1831 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1832 continue;
1833 ret = find_section(pack, sect);
1834 if (ret == OK) {
1835 sect->flags |= KSPLICE_SECTION_MATCHED;
1836 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1837 remaining--;
1838 progress = true;
1839 } else if (ret != NO_MATCH) {
1840 return ret;
1844 if (progress)
1845 continue;
1847 for (sect = pack->helper_sections;
1848 sect < pack->helper_sections_end; sect++) {
1849 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1850 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1851 continue;
1852 ksdebug(pack, "run-pre: could not match %s "
1853 "section %s\n",
1854 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1855 "data" :
1856 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1857 "rodata" : "text", sect->symbol->label);
1859 ksdebug(pack, "Aborted. run-pre: could not match some "
1860 "sections.\n");
1861 return NO_MATCH;
1863 return OK;
1867 * Search for the section in the running kernel. Returns OK if and
1868 * only if it finds precisely one address in the kernel matching the
1869 * section.
1871 static abort_t find_section(struct ksplice_pack *pack,
1872 struct ksplice_section *sect)
1874 int i;
1875 abort_t ret;
1876 unsigned long run_addr;
1877 LIST_HEAD(vals);
1878 struct candidate_val *v, *n;
1880 #ifdef KSPLICE_STANDALONE
1881 ret = add_system_map_candidates(pack, pack->helper_system_map,
1882 pack->helper_system_map_end,
1883 sect->symbol->label, &vals);
1884 if (ret != OK) {
1885 release_vals(&vals);
1886 return ret;
1888 #endif /* KSPLICE_STANDALONE */
1889 ret = lookup_symbol(pack, sect->symbol, &vals);
1890 if (ret != OK) {
1891 release_vals(&vals);
1892 return ret;
1895 ksdebug(pack, "run-pre: starting sect search for %s\n",
1896 sect->symbol->label);
1898 list_for_each_entry_safe(v, n, &vals, list) {
1899 run_addr = v->val;
1901 yield();
1902 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1903 if (ret == NO_MATCH) {
1904 list_del(&v->list);
1905 kfree(v);
1906 } else if (ret != OK) {
1907 release_vals(&vals);
1908 return ret;
1912 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1913 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1914 ret = brute_search_all(pack, sect, &vals);
1915 if (ret != OK) {
1916 release_vals(&vals);
1917 return ret;
1920 * Make sure run-pre matching output is displayed if
1921 * brute_search succeeds.
1923 if (singular(&vals)) {
1924 run_addr = list_entry(vals.next, struct candidate_val,
1925 list)->val;
1926 ret = try_addr(pack, sect, run_addr, NULL,
1927 RUN_PRE_INITIAL);
1928 if (ret != OK) {
1929 ksdebug(pack, "run-pre: Debug run failed for "
1930 "sect %s:\n", sect->symbol->label);
1931 release_vals(&vals);
1932 return ret;
1936 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1938 if (singular(&vals)) {
1939 LIST_HEAD(safety_records);
1940 run_addr = list_entry(vals.next, struct candidate_val,
1941 list)->val;
1942 ret = try_addr(pack, sect, run_addr, &safety_records,
1943 RUN_PRE_FINAL);
1944 release_vals(&vals);
1945 if (ret != OK) {
1946 clear_list(&safety_records, struct safety_record, list);
1947 ksdebug(pack, "run-pre: Final run failed for sect "
1948 "%s:\n", sect->symbol->label);
1949 } else {
1950 list_splice(&safety_records, &pack->safety_records);
1952 return ret;
1953 } else if (!list_empty(&vals)) {
1954 struct candidate_val *val;
1955 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1956 sect->symbol->label);
1957 i = 0;
1958 list_for_each_entry(val, &vals, list) {
1959 i++;
1960 ksdebug(pack, "%lx\n", val->val);
1961 if (i > 5) {
1962 ksdebug(pack, "...\n");
1963 break;
1966 release_vals(&vals);
1967 return NO_MATCH;
1969 release_vals(&vals);
1970 return NO_MATCH;
1974 * try_addr is the the interface to run-pre matching. Its primary
1975 * purpose is to manage debugging information for run-pre matching;
1976 * all the hard work is in run_pre_cmp.
1978 static abort_t try_addr(struct ksplice_pack *pack,
1979 struct ksplice_section *sect,
1980 unsigned long run_addr,
1981 struct list_head *safety_records,
1982 enum run_pre_mode mode)
1984 abort_t ret;
1985 const struct module *run_module = __module_address(run_addr);
1987 if (run_module == pack->primary) {
1988 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1989 "module %s for sect %s\n", run_addr, run_module->name,
1990 sect->symbol->label);
1991 return UNEXPECTED;
1993 if (!patches_module(run_module, pack->target)) {
1994 ksdebug(pack, "run-pre: ignoring address %lx in other module "
1995 "%s for sect %s\n", run_addr, run_module == NULL ?
1996 "vmlinux" : run_module->name, sect->symbol->label);
1997 return NO_MATCH;
2000 ret = create_labelval(pack, sect->symbol, run_addr, TEMP);
2001 if (ret != OK)
2002 return ret;
2004 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2005 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
2006 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2007 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2008 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
2009 mode);
2010 else
2011 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
2012 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2013 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2014 set_temp_labelvals(pack, NOVAL);
2015 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
2016 "p_a=%lx s=%lx)\n",
2017 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2018 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2019 "text", sect->symbol->label, run_addr, sect->address,
2020 sect->size);
2021 ksdebug(pack, "run-pre: ");
2022 if (pack->update->debug >= 1) {
2023 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2024 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
2025 RUN_PRE_DEBUG);
2026 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2027 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2028 ret = arch_run_pre_cmp(pack, sect, run_addr,
2029 safety_records,
2030 RUN_PRE_DEBUG);
2031 else
2032 ret = run_pre_cmp(pack, sect, run_addr,
2033 safety_records,
2034 RUN_PRE_DEBUG);
2035 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2036 set_temp_labelvals(pack, NOVAL);
2038 ksdebug(pack, "\n");
2039 return ret;
2040 } else if (ret != OK) {
2041 set_temp_labelvals(pack, NOVAL);
2042 return ret;
2045 if (mode != RUN_PRE_FINAL) {
2046 set_temp_labelvals(pack, NOVAL);
2047 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
2048 sect->symbol->label, run_addr);
2049 return OK;
2052 set_temp_labelvals(pack, VAL);
2053 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2054 run_addr);
2055 return OK;
2059 * run_pre_cmp is the primary run-pre matching function; it determines
2060 * whether the given ksplice_section matches the code or data in the
2061 * running kernel starting at run_addr.
2063 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2064 * section is created.
2066 * The run_pre_mode is also used to determine what debugging
2067 * information to display.
2069 static abort_t run_pre_cmp(struct ksplice_pack *pack,
2070 const struct ksplice_section *sect,
2071 unsigned long run_addr,
2072 struct list_head *safety_records,
2073 enum run_pre_mode mode)
2075 int matched = 0;
2076 abort_t ret;
2077 const struct ksplice_reloc *r, *finger;
2078 const unsigned char *pre, *run, *pre_start, *run_start;
2079 unsigned char runval;
2081 pre_start = (const unsigned char *)sect->address;
2082 run_start = (const unsigned char *)run_addr;
2084 finger = init_reloc_search(pack, sect);
2086 pre = pre_start;
2087 run = run_start;
2088 while (pre < pre_start + sect->size) {
2089 unsigned long offset = pre - pre_start;
2090 ret = lookup_reloc(pack, &finger, (unsigned long)pre, &r);
2091 if (ret == OK) {
2092 ret = handle_reloc(pack, sect, r, (unsigned long)run,
2093 mode);
2094 if (ret != OK) {
2095 if (mode == RUN_PRE_INITIAL)
2096 ksdebug(pack, "reloc in sect does not "
2097 "match after %lx/%lx bytes\n",
2098 offset, sect->size);
2099 return ret;
2101 if (mode == RUN_PRE_DEBUG)
2102 print_bytes(pack, run, r->howto->size, pre,
2103 r->howto->size);
2104 pre += r->howto->size;
2105 run += r->howto->size;
2106 finger++;
2107 continue;
2108 } else if (ret != NO_MATCH) {
2109 return ret;
2112 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2113 ret = handle_paravirt(pack, (unsigned long)pre,
2114 (unsigned long)run, &matched);
2115 if (ret != OK)
2116 return ret;
2117 if (matched != 0) {
2118 if (mode == RUN_PRE_DEBUG)
2119 print_bytes(pack, run, matched, pre,
2120 matched);
2121 pre += matched;
2122 run += matched;
2123 continue;
2127 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2128 if (mode == RUN_PRE_INITIAL)
2129 ksdebug(pack, "sect unmapped after %lx/%lx "
2130 "bytes\n", offset, sect->size);
2131 return NO_MATCH;
2134 if (runval != *pre &&
2135 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2136 if (mode == RUN_PRE_INITIAL)
2137 ksdebug(pack, "sect does not match after "
2138 "%lx/%lx bytes\n", offset, sect->size);
2139 if (mode == RUN_PRE_DEBUG) {
2140 print_bytes(pack, run, 1, pre, 1);
2141 ksdebug(pack, "[p_o=%lx] ! ", offset);
2142 print_bytes(pack, run + 1, 2, pre + 1, 2);
2144 return NO_MATCH;
2146 if (mode == RUN_PRE_DEBUG)
2147 print_bytes(pack, run, 1, pre, 1);
2148 pre++;
2149 run++;
2151 return create_safety_record(pack, sect, safety_records, run_addr,
2152 run - run_start);
2155 static void print_bytes(struct ksplice_pack *pack,
2156 const unsigned char *run, int runc,
2157 const unsigned char *pre, int prec)
2159 int o;
2160 int matched = min(runc, prec);
2161 for (o = 0; o < matched; o++) {
2162 if (run[o] == pre[o])
2163 ksdebug(pack, "%02x ", run[o]);
2164 else
2165 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
2167 for (o = matched; o < runc; o++)
2168 ksdebug(pack, "%02x/ ", run[o]);
2169 for (o = matched; o < prec; o++)
2170 ksdebug(pack, "/%02x ", pre[o]);
2173 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2174 static abort_t brute_search(struct ksplice_pack *pack,
2175 struct ksplice_section *sect,
2176 const void *start, unsigned long len,
2177 struct list_head *vals)
2179 unsigned long addr;
2180 char run, pre;
2181 abort_t ret;
2183 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2184 addr++) {
2185 if (addr % 100000 == 0)
2186 yield();
2188 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2189 return OK;
2191 pre = *(const unsigned char *)(sect->address);
2193 if (run != pre)
2194 continue;
2196 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
2197 if (ret == OK) {
2198 ret = add_candidate_val(pack, vals, addr);
2199 if (ret != OK)
2200 return ret;
2201 } else if (ret != NO_MATCH) {
2202 return ret;
2206 return OK;
2209 static abort_t brute_search_all(struct ksplice_pack *pack,
2210 struct ksplice_section *sect,
2211 struct list_head *vals)
2213 struct module *m;
2214 abort_t ret = OK;
2215 int saved_debug;
2217 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
2218 saved_debug = pack->update->debug;
2219 pack->update->debug = 0;
2221 list_for_each_entry(m, &modules, list) {
2222 if (!patches_module(m, pack->target) || m == pack->primary)
2223 continue;
2224 ret = brute_search(pack, sect, m->module_core, m->core_size,
2225 vals);
2226 if (ret != OK)
2227 goto out;
2228 ret = brute_search(pack, sect, m->module_init, m->init_size,
2229 vals);
2230 if (ret != OK)
2231 goto out;
2234 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
2235 init_mm.end_code - init_mm.start_code, vals);
2237 out:
2238 pack->update->debug = saved_debug;
2239 return ret;
2241 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2243 struct range {
2244 unsigned long address;
2245 unsigned long size;
2248 static int reloc_bsearch_compare(const void *key, const void *elt)
2250 const struct range *range = key;
2251 const struct ksplice_reloc *r = elt;
2252 if (range->address + range->size <= r->blank_addr)
2253 return -1;
2254 if (range->address > r->blank_addr)
2255 return 1;
2256 return 0;
2259 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2260 const struct ksplice_reloc *end,
2261 unsigned long address,
2262 unsigned long size)
2264 const struct ksplice_reloc *r;
2265 struct range range = { address, size };
2266 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2267 reloc_bsearch_compare);
2268 if (r == NULL)
2269 return NULL;
2270 while (r > start && (r - 1)->blank_addr >= address)
2271 r--;
2272 return r;
2275 static const struct ksplice_reloc *
2276 init_reloc_search(struct ksplice_pack *pack, const struct ksplice_section *sect)
2278 const struct ksplice_reloc *r;
2279 r = find_reloc(pack->helper_relocs, pack->helper_relocs_end,
2280 sect->address, sect->size);
2281 if (r == NULL)
2282 return pack->helper_relocs_end;
2283 return r;
2287 * lookup_reloc implements an amortized O(1) lookup for the next
2288 * helper relocation. It must be called with a strictly increasing
2289 * sequence of addresses.
2291 * The fingerp is private data for lookup_reloc, and needs to have
2292 * been initialized as a pointer to the result of find_reloc (or
2293 * init_reloc_search).
2295 static abort_t lookup_reloc(struct ksplice_pack *pack,
2296 const struct ksplice_reloc **fingerp,
2297 unsigned long addr,
2298 const struct ksplice_reloc **relocp)
2300 const struct ksplice_reloc *r = *fingerp;
2301 int canary_ret;
2303 while (r < pack->helper_relocs_end &&
2304 addr >= r->blank_addr + r->howto->size &&
2305 !(addr == r->blank_addr && r->howto->size == 0))
2306 r++;
2307 *fingerp = r;
2308 if (r == pack->helper_relocs_end)
2309 return NO_MATCH;
2310 if (addr < r->blank_addr)
2311 return NO_MATCH;
2312 *relocp = r;
2313 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2314 return OK;
2316 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
2317 if (canary_ret < 0)
2318 return UNEXPECTED;
2319 if (canary_ret == 0) {
2320 ksdebug(pack, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2321 "(altinstr)\n", r->blank_addr, r->symbol->label,
2322 r->target_addend);
2323 return NO_MATCH;
2325 if (addr != r->blank_addr) {
2326 ksdebug(pack, "Invalid nonzero relocation offset\n");
2327 return UNEXPECTED;
2329 return OK;
2332 static abort_t handle_reloc(struct ksplice_pack *pack,
2333 const struct ksplice_section *sect,
2334 const struct ksplice_reloc *r,
2335 unsigned long run_addr, enum run_pre_mode mode)
2337 switch (r->howto->type) {
2338 case KSPLICE_HOWTO_RELOC:
2339 return handle_howto_reloc(pack, sect, r, run_addr, mode);
2340 case KSPLICE_HOWTO_DATE:
2341 case KSPLICE_HOWTO_TIME:
2342 return handle_howto_date(pack, sect, r, run_addr, mode);
2343 case KSPLICE_HOWTO_BUG:
2344 return handle_bug(pack, r, run_addr);
2345 case KSPLICE_HOWTO_EXTABLE:
2346 return handle_extable(pack, r, run_addr);
2347 default:
2348 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
2349 return UNEXPECTED;
2354 * For date/time relocations, we check that the sequence of bytes
2355 * matches the format of a date or time.
2357 static abort_t handle_howto_date(struct ksplice_pack *pack,
2358 const struct ksplice_section *sect,
2359 const struct ksplice_reloc *r,
2360 unsigned long run_addr, enum run_pre_mode mode)
2362 abort_t ret;
2363 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2365 if (buf == NULL)
2366 return OUT_OF_MEMORY;
2367 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2368 ret = NO_MATCH;
2369 goto out;
2372 switch (r->howto->type) {
2373 case KSPLICE_HOWTO_TIME:
2374 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2375 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2376 isdigit(buf[6]) && isdigit(buf[7]))
2377 ret = OK;
2378 else
2379 ret = NO_MATCH;
2380 break;
2381 case KSPLICE_HOWTO_DATE:
2382 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2383 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2384 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2385 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2386 ret = OK;
2387 else
2388 ret = NO_MATCH;
2389 break;
2390 default:
2391 ret = UNEXPECTED;
2393 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2394 ksdebug(pack, "%s string: \"%.*s\" does not match format\n",
2395 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2396 r->howto->size, buf);
2398 if (ret != OK)
2399 goto out;
2400 ret = create_labelval(pack, r->symbol, run_addr, TEMP);
2401 out:
2402 kfree(buf);
2403 return ret;
2407 * Extract the value of a symbol used in a relocation in the pre code
2408 * during run-pre matching, giving an error if it conflicts with a
2409 * previously found value of that symbol
2411 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
2412 const struct ksplice_section *sect,
2413 const struct ksplice_reloc *r,
2414 unsigned long run_addr,
2415 enum run_pre_mode mode)
2417 struct ksplice_section *sym_sect = symbol_section(pack, r->symbol);
2418 unsigned long offset = r->target_addend;
2419 unsigned long val;
2420 abort_t ret;
2422 ret = read_reloc_value(pack, r, run_addr, &val);
2423 if (ret != OK)
2424 return ret;
2425 if (r->howto->pcrel)
2426 val += run_addr;
2428 #ifdef KSPLICE_STANDALONE
2429 /* The match_map is only used in KSPLICE_STANDALONE */
2430 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2432 } else if (offset < 0 || offset >= sym_sect->size) {
2433 ksdebug(pack, "Out of range relocation: %s+%lx -> %s+%lx",
2434 sect->symbol->label, r->blank_addr - sect->address,
2435 r->symbol->label, offset);
2436 return NO_MATCH;
2437 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2438 sym_sect->match_map[offset] =
2439 (const unsigned char *)r->symbol->value + offset;
2440 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2441 r->symbol->value + offset) {
2443 } else if (sect == sym_sect) {
2444 ksdebug(pack, "Relocations to nonmatching locations within "
2445 "section %s: %lx does not match %lx\n",
2446 sect->symbol->label, offset,
2447 (unsigned long)sect->match_map[offset] -
2448 r->symbol->value);
2449 return NO_MATCH;
2450 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2451 if (mode == RUN_PRE_INITIAL)
2452 ksdebug(pack, "Delaying matching of %s due to reloc "
2453 "from to unmatching section: %s+%lx\n",
2454 sect->symbol->label, r->symbol->label, offset);
2455 return NO_MATCH;
2456 } else if (sym_sect->match_map[offset] == NULL) {
2457 if (mode == RUN_PRE_INITIAL)
2458 ksdebug(pack, "Relocation not to instruction boundary: "
2459 "%s+%lx -> %s+%lx", sect->symbol->label,
2460 r->blank_addr - sect->address, r->symbol->label,
2461 offset);
2462 return NO_MATCH;
2463 } else if ((unsigned long)sym_sect->match_map[offset] !=
2464 r->symbol->value + offset) {
2465 if (mode == RUN_PRE_INITIAL)
2466 ksdebug(pack, "Match map shift %s+%lx: %lx != %lx\n",
2467 r->symbol->label, offset,
2468 r->symbol->value + offset,
2469 (unsigned long)sym_sect->match_map[offset]);
2470 val += r->symbol->value + offset -
2471 (unsigned long)sym_sect->match_map[offset];
2473 #endif /* KSPLICE_STANDALONE */
2475 if (mode == RUN_PRE_INITIAL)
2476 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2477 "found %s = %lx\n", run_addr, r->blank_addr,
2478 r->symbol->label, offset, r->symbol->label, val);
2480 if (contains_canary(pack, run_addr, r->howto) != 0) {
2481 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
2482 "\n", run_addr);
2483 return UNEXPECTED;
2486 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2487 sect->symbol == r->symbol)
2488 return OK;
2489 ret = create_labelval(pack, r->symbol, val, TEMP);
2490 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2491 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2492 "= %lx does not match expected %lx\n", run_addr,
2493 r->blank_addr, r->symbol->label, r->symbol->value, val);
2495 if (ret != OK)
2496 return ret;
2497 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2498 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2499 if (mode == RUN_PRE_INITIAL)
2500 ksdebug(pack, "Recursively comparing string section "
2501 "%s\n", sym_sect->symbol->label);
2502 else if (mode == RUN_PRE_DEBUG)
2503 ksdebug(pack, "[str start] ");
2504 ret = run_pre_cmp(pack, sym_sect, val, NULL, mode);
2505 if (mode == RUN_PRE_DEBUG)
2506 ksdebug(pack, "[str end] ");
2507 if (ret == OK && mode == RUN_PRE_INITIAL)
2508 ksdebug(pack, "Successfully matched string section %s"
2509 "\n", sym_sect->symbol->label);
2510 else if (mode == RUN_PRE_INITIAL)
2511 ksdebug(pack, "Failed to match string section %s\n",
2512 sym_sect->symbol->label);
2514 return ret;
2517 static int symbol_section_bsearch_compare(const void *a, const void *b)
2519 const struct ksplice_symbol *sym = a;
2520 const struct ksplice_section *sect = b;
2521 return strcmp(sym->label, sect->symbol->label);
2524 static int compare_section_labels(const void *va, const void *vb)
2526 const struct ksplice_section *a = va, *b = vb;
2527 return strcmp(a->symbol->label, b->symbol->label);
2530 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
2531 const struct ksplice_symbol *sym)
2533 return bsearch(sym, pack->helper_sections, pack->helper_sections_end -
2534 pack->helper_sections, sizeof(struct ksplice_section),
2535 symbol_section_bsearch_compare);
2538 /* Find the relocation for the oldaddr of a ksplice_patch */
2539 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
2540 const struct ksplice_patch *p)
2542 unsigned long addr = (unsigned long)&p->oldaddr;
2543 const struct ksplice_reloc *r =
2544 find_reloc(pack->primary_relocs, pack->primary_relocs_end, addr,
2545 sizeof(addr));
2546 if (r == NULL || r->blank_addr < addr ||
2547 r->blank_addr >= addr + sizeof(addr))
2548 return NULL;
2549 return r;
2553 * Populates vals with the possible values for ksym from the various
2554 * sources Ksplice uses to resolve symbols
2556 static abort_t lookup_symbol(struct ksplice_pack *pack,
2557 const struct ksplice_symbol *ksym,
2558 struct list_head *vals)
2560 abort_t ret;
2562 #ifdef KSPLICE_STANDALONE
2563 if (!bootstrapped)
2564 return OK;
2565 #endif /* KSPLICE_STANDALONE */
2567 if (ksym->vals == NULL) {
2568 release_vals(vals);
2569 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
2570 ksym->value);
2571 return add_candidate_val(pack, vals, ksym->value);
2574 #ifdef CONFIG_MODULE_UNLOAD
2575 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2576 && pack->target->exit != NULL) {
2577 ret = add_candidate_val(pack, vals,
2578 (unsigned long)pack->target->exit);
2579 if (ret != OK)
2580 return ret;
2582 #endif
2584 if (ksym->name != NULL) {
2585 struct candidate_val *val;
2586 list_for_each_entry(val, ksym->vals, list) {
2587 ret = add_candidate_val(pack, vals, val->val);
2588 if (ret != OK)
2589 return ret;
2592 ret = new_export_lookup(pack, ksym->name, vals);
2593 if (ret != OK)
2594 return ret;
2597 return OK;
2600 #ifdef KSPLICE_STANDALONE
2601 static abort_t
2602 add_system_map_candidates(struct ksplice_pack *pack,
2603 const struct ksplice_system_map *start,
2604 const struct ksplice_system_map *end,
2605 const char *label, struct list_head *vals)
2607 abort_t ret;
2608 long off;
2609 int i;
2610 const struct ksplice_system_map *smap;
2612 /* Some Fedora kernel releases have System.map files whose symbol
2613 * addresses disagree with the running kernel by a constant address
2614 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2615 * values used to compile these kernels. This constant address offset
2616 * is always a multiple of 0x100000.
2618 * If we observe an offset that is NOT a multiple of 0x100000, then the
2619 * user provided us with an incorrect System.map file, and we should
2620 * abort.
2621 * If we observe an offset that is a multiple of 0x100000, then we can
2622 * adjust the System.map address values accordingly and proceed.
2624 off = (unsigned long)printk - pack->map_printk;
2625 if (off & 0xfffff) {
2626 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
2627 return BAD_SYSTEM_MAP;
2630 smap = bsearch(label, start, end - start, sizeof(*smap),
2631 system_map_bsearch_compare);
2632 if (smap == NULL)
2633 return OK;
2635 for (i = 0; i < smap->nr_candidates; i++) {
2636 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
2637 if (ret != OK)
2638 return ret;
2640 return OK;
2643 static int system_map_bsearch_compare(const void *key, const void *elt)
2645 const struct ksplice_system_map *map = elt;
2646 const char *label = key;
2647 return strcmp(label, map->label);
2649 #endif /* !KSPLICE_STANDALONE */
2652 * An update could one module to export a symbol and at the same time
2653 * change another module to use that symbol. This violates the normal
2654 * situation where the packs can be handled independently.
2656 * new_export_lookup obtains symbol values from the changes to the
2657 * exported symbol table made by other packs.
2659 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
2660 struct list_head *vals)
2662 struct ksplice_pack *pack;
2663 struct ksplice_patch *p;
2664 list_for_each_entry(pack, &ipack->update->packs, list) {
2665 for (p = pack->patches; p < pack->patches_end; p++) {
2666 const struct kernel_symbol *sym;
2667 const struct ksplice_reloc *r;
2668 if (p->type != KSPLICE_PATCH_EXPORT ||
2669 strcmp(name, *(const char **)p->contents) != 0)
2670 continue;
2672 /* Check that the p->oldaddr reloc has been resolved. */
2673 r = patch_reloc(pack, p);
2674 if (r == NULL ||
2675 contains_canary(pack, r->blank_addr, r->howto) != 0)
2676 continue;
2677 sym = (const struct kernel_symbol *)r->symbol->value;
2680 * Check that the sym->value reloc has been resolved,
2681 * if there is a Ksplice relocation there.
2683 r = find_reloc(pack->primary_relocs,
2684 pack->primary_relocs_end,
2685 (unsigned long)&sym->value,
2686 sizeof(&sym->value));
2687 if (r != NULL &&
2688 r->blank_addr == (unsigned long)&sym->value &&
2689 contains_canary(pack, r->blank_addr, r->howto) != 0)
2690 continue;
2691 return add_candidate_val(ipack, vals, sym->value);
2694 return OK;
2698 * When apply_patches is called, the update should be fully prepared.
2699 * apply_patches will try to actually insert trampolines for the
2700 * update.
2702 static abort_t apply_patches(struct update *update)
2704 int i;
2705 abort_t ret;
2706 struct ksplice_pack *pack;
2708 ret = map_trampoline_pages(update);
2709 if (ret != OK)
2710 return ret;
2712 list_for_each_entry(pack, &update->packs, list) {
2713 const typeof(int (*)(void)) *f;
2714 for (f = pack->pre_apply; f < pack->pre_apply_end; f++) {
2715 if ((*f)() != 0) {
2716 ret = CALL_FAILED;
2717 goto out;
2722 for (i = 0; i < 5; i++) {
2723 cleanup_conflicts(update);
2724 #ifdef KSPLICE_STANDALONE
2725 bust_spinlocks(1);
2726 #endif /* KSPLICE_STANDALONE */
2727 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2728 ret = (__force abort_t)stop_machine(__apply_patches, update,
2729 NULL);
2730 #else /* LINUX_VERSION_CODE < */
2731 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2732 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2733 NR_CPUS);
2734 #endif /* LINUX_VERSION_CODE */
2735 #ifdef KSPLICE_STANDALONE
2736 bust_spinlocks(0);
2737 #endif /* KSPLICE_STANDALONE */
2738 if (ret != CODE_BUSY)
2739 break;
2740 set_current_state(TASK_INTERRUPTIBLE);
2741 schedule_timeout(msecs_to_jiffies(1000));
2743 out:
2744 unmap_trampoline_pages(update);
2746 if (ret == CODE_BUSY) {
2747 print_conflicts(update);
2748 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2749 "code is busy.\n", update->kid);
2750 } else if (ret == ALREADY_REVERSED) {
2751 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2752 "reversed.\n", update->kid, update->kid);
2755 if (ret != OK) {
2756 list_for_each_entry(pack, &update->packs, list) {
2757 const typeof(void (*)(void)) *f;
2758 for (f = pack->fail_apply; f < pack->fail_apply_end;
2759 f++)
2760 (*f)();
2763 return ret;
2766 list_for_each_entry(pack, &update->packs, list) {
2767 const typeof(void (*)(void)) *f;
2768 for (f = pack->post_apply; f < pack->post_apply_end; f++)
2769 (*f)();
2772 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2773 update->kid);
2774 return OK;
2777 static abort_t reverse_patches(struct update *update)
2779 int i;
2780 abort_t ret;
2781 struct ksplice_pack *pack;
2783 clear_debug_buf(update);
2784 ret = init_debug_buf(update);
2785 if (ret != OK)
2786 return ret;
2788 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2790 ret = map_trampoline_pages(update);
2791 if (ret != OK)
2792 return ret;
2794 list_for_each_entry(pack, &update->packs, list) {
2795 const typeof(int (*)(void)) *f;
2796 for (f = pack->pre_reverse; f < pack->pre_reverse_end; f++) {
2797 if ((*f)() != 0) {
2798 ret = CALL_FAILED;
2799 goto out;
2804 for (i = 0; i < 5; i++) {
2805 cleanup_conflicts(update);
2806 clear_list(&update->conflicts, struct conflict, list);
2807 #ifdef KSPLICE_STANDALONE
2808 bust_spinlocks(1);
2809 #endif /* KSPLICE_STANDALONE */
2810 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2811 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2812 NULL);
2813 #else /* LINUX_VERSION_CODE < */
2814 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2815 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2816 update, NR_CPUS);
2817 #endif /* LINUX_VERSION_CODE */
2818 #ifdef KSPLICE_STANDALONE
2819 bust_spinlocks(0);
2820 #endif /* KSPLICE_STANDALONE */
2821 if (ret != CODE_BUSY)
2822 break;
2823 set_current_state(TASK_INTERRUPTIBLE);
2824 schedule_timeout(msecs_to_jiffies(1000));
2826 out:
2827 unmap_trampoline_pages(update);
2829 if (ret == CODE_BUSY) {
2830 print_conflicts(update);
2831 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2832 "code is busy.\n", update->kid);
2833 } else if (ret == MODULE_BUSY) {
2834 _ksdebug(update, "Update %s is in use by another module\n",
2835 update->kid);
2838 if (ret != OK) {
2839 list_for_each_entry(pack, &update->packs, list) {
2840 const typeof(void (*)(void)) *f;
2841 for (f = pack->fail_reverse; f < pack->fail_reverse_end;
2842 f++)
2843 (*f)();
2846 return ret;
2849 list_for_each_entry(pack, &update->packs, list) {
2850 const typeof(void (*)(void)) *f;
2851 for (f = pack->post_reverse; f < pack->post_reverse_end; f++)
2852 (*f)();
2855 list_for_each_entry(pack, &update->packs, list)
2856 clear_list(&pack->safety_records, struct safety_record, list);
2858 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2859 return OK;
2862 /* Atomically insert the update; run from within stop_machine */
2863 static int __apply_patches(void *updateptr)
2865 struct update *update = updateptr;
2866 struct ksplice_pack *pack;
2867 struct ksplice_module_list_entry *entry;
2868 struct ksplice_patch *p;
2869 abort_t ret;
2871 if (update->stage == STAGE_APPLIED)
2872 return (__force int)OK;
2874 if (update->stage != STAGE_PREPARING)
2875 return (__force int)UNEXPECTED;
2877 ret = check_each_task(update);
2878 if (ret != OK)
2879 return (__force int)ret;
2881 list_for_each_entry(pack, &update->packs, list) {
2882 if (try_module_get(pack->primary) != 1) {
2883 struct ksplice_pack *pack1;
2884 list_for_each_entry(pack1, &update->packs, list) {
2885 if (pack1 == pack)
2886 break;
2887 module_put(pack1->primary);
2889 module_put(THIS_MODULE);
2890 return (__force int)UNEXPECTED;
2894 list_for_each_entry(pack, &update->packs, list) {
2895 const typeof(int (*)(void)) *f;
2896 for (f = pack->check_apply; f < pack->check_apply_end; f++)
2897 if ((*f)() != 0)
2898 return (__force int)CALL_FAILED;
2901 /* Commit point: the update application will succeed. */
2903 update->stage = STAGE_APPLIED;
2904 #ifdef TAINT_KSPLICE
2905 add_taint(TAINT_KSPLICE);
2906 #endif
2908 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2909 list_add(&entry->list, &ksplice_module_list);
2911 list_for_each_entry(pack, &update->packs, list) {
2912 for (p = pack->patches; p < pack->patches_end; p++)
2913 insert_trampoline(p);
2916 list_for_each_entry(pack, &update->packs, list) {
2917 const typeof(void (*)(void)) *f;
2918 for (f = pack->apply; f < pack->apply_end; f++)
2919 (*f)();
2922 return (__force int)OK;
2925 /* Atomically remove the update; run from within stop_machine */
2926 static int __reverse_patches(void *updateptr)
2928 struct update *update = updateptr;
2929 struct ksplice_pack *pack;
2930 struct ksplice_module_list_entry *entry;
2931 const struct ksplice_patch *p;
2932 abort_t ret;
2934 if (update->stage != STAGE_APPLIED)
2935 return (__force int)OK;
2937 #ifdef CONFIG_MODULE_UNLOAD
2938 list_for_each_entry(pack, &update->packs, list) {
2939 if (module_refcount(pack->primary) != 1)
2940 return (__force int)MODULE_BUSY;
2942 #endif /* CONFIG_MODULE_UNLOAD */
2944 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2945 if (!entry->applied && find_module(entry->target_name) != NULL)
2946 return COLD_UPDATE_LOADED;
2949 ret = check_each_task(update);
2950 if (ret != OK)
2951 return (__force int)ret;
2953 list_for_each_entry(pack, &update->packs, list) {
2954 for (p = pack->patches; p < pack->patches_end; p++) {
2955 ret = verify_trampoline(pack, p);
2956 if (ret != OK)
2957 return (__force int)ret;
2961 list_for_each_entry(pack, &update->packs, list) {
2962 const typeof(int (*)(void)) *f;
2963 for (f = pack->check_reverse; f < pack->check_reverse_end; f++)
2964 if ((*f)() != 0)
2965 return (__force int)CALL_FAILED;
2968 /* Commit point: the update reversal will succeed. */
2970 update->stage = STAGE_REVERSED;
2972 list_for_each_entry(pack, &update->packs, list)
2973 module_put(pack->primary);
2975 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2976 list_del(&entry->list);
2978 list_for_each_entry(pack, &update->packs, list) {
2979 const typeof(void (*)(void)) *f;
2980 for (f = pack->reverse; f < pack->reverse_end; f++)
2981 (*f)();
2984 list_for_each_entry(pack, &update->packs, list) {
2985 for (p = pack->patches; p < pack->patches_end; p++)
2986 remove_trampoline(p);
2989 return (__force int)OK;
2993 * Check whether any thread's instruction pointer or any address of
2994 * its stack is contained in one of the safety_records associated with
2995 * the update.
2997 * check_each_task must be called from inside stop_machine, because it
2998 * does not take tasklist_lock (which cannot be held by anyone else
2999 * during stop_machine).
3001 static abort_t check_each_task(struct update *update)
3003 const struct task_struct *g, *p;
3004 abort_t status = OK, ret;
3005 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3006 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3007 read_lock(&tasklist_lock);
3008 #endif /* LINUX_VERSION_CODE */
3009 do_each_thread(g, p) {
3010 /* do_each_thread is a double loop! */
3011 ret = check_task(update, p, false);
3012 if (ret != OK) {
3013 check_task(update, p, true);
3014 status = ret;
3016 if (ret != OK && ret != CODE_BUSY)
3017 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3018 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3019 goto out;
3020 #else /* LINUX_VERSION_CODE < */
3021 return ret;
3022 #endif /* LINUX_VERSION_CODE */
3023 } while_each_thread(g, p);
3024 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3025 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3026 out:
3027 read_unlock(&tasklist_lock);
3028 #endif /* LINUX_VERSION_CODE */
3029 return status;
3032 static abort_t check_task(struct update *update,
3033 const struct task_struct *t, bool rerun)
3035 abort_t status, ret;
3036 struct conflict *conf = NULL;
3038 if (rerun) {
3039 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3040 if (conf == NULL)
3041 return OUT_OF_MEMORY;
3042 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3043 if (conf->process_name == NULL) {
3044 kfree(conf);
3045 return OUT_OF_MEMORY;
3047 conf->pid = t->pid;
3048 INIT_LIST_HEAD(&conf->stack);
3049 list_add(&conf->list, &update->conflicts);
3052 status = check_address(update, conf, KSPLICE_IP(t));
3053 if (t == current) {
3054 ret = check_stack(update, conf, task_thread_info(t),
3055 (unsigned long *)__builtin_frame_address(0));
3056 if (status == OK)
3057 status = ret;
3058 } else if (!task_curr(t)) {
3059 ret = check_stack(update, conf, task_thread_info(t),
3060 (unsigned long *)KSPLICE_SP(t));
3061 if (status == OK)
3062 status = ret;
3063 } else if (!is_stop_machine(t)) {
3064 status = UNEXPECTED_RUNNING_TASK;
3066 return status;
3069 static abort_t check_stack(struct update *update, struct conflict *conf,
3070 const struct thread_info *tinfo,
3071 const unsigned long *stack)
3073 abort_t status = OK, ret;
3074 unsigned long addr;
3076 while (valid_stack_ptr(tinfo, stack)) {
3077 addr = *stack++;
3078 ret = check_address(update, conf, addr);
3079 if (ret != OK)
3080 status = ret;
3082 return status;
3085 static abort_t check_address(struct update *update,
3086 struct conflict *conf, unsigned long addr)
3088 abort_t status = OK, ret;
3089 const struct safety_record *rec;
3090 struct ksplice_pack *pack;
3091 struct conflict_addr *ca = NULL;
3093 if (conf != NULL) {
3094 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3095 if (ca == NULL)
3096 return OUT_OF_MEMORY;
3097 ca->addr = addr;
3098 ca->has_conflict = false;
3099 ca->label = NULL;
3100 list_add(&ca->list, &conf->stack);
3103 list_for_each_entry(pack, &update->packs, list) {
3104 unsigned long tramp_addr = follow_trampolines(pack, addr);
3105 list_for_each_entry(rec, &pack->safety_records, list) {
3106 ret = check_record(ca, rec, tramp_addr);
3107 if (ret != OK)
3108 status = ret;
3111 return status;
3114 static abort_t check_record(struct conflict_addr *ca,
3115 const struct safety_record *rec, unsigned long addr)
3117 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3118 if (ca != NULL) {
3119 ca->label = rec->label;
3120 ca->has_conflict = true;
3122 return CODE_BUSY;
3124 return OK;
3127 /* Is the task one of the stop_machine tasks? */
3128 static bool is_stop_machine(const struct task_struct *t)
3130 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3131 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3132 const char *kstop_prefix = "kstop/";
3133 #else /* LINUX_VERSION_CODE < */
3134 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3135 const char *kstop_prefix = "kstop";
3136 #endif /* LINUX_VERSION_CODE */
3137 const char *num;
3138 if (!starts_with(t->comm, kstop_prefix))
3139 return false;
3140 num = t->comm + strlen(kstop_prefix);
3141 return num[strspn(num, "0123456789")] == '\0';
3142 #else /* LINUX_VERSION_CODE < */
3143 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3144 return strcmp(t->comm, "kstopmachine") == 0;
3145 #endif /* LINUX_VERSION_CODE */
3148 static void cleanup_conflicts(struct update *update)
3150 struct conflict *conf;
3151 list_for_each_entry(conf, &update->conflicts, list) {
3152 clear_list(&conf->stack, struct conflict_addr, list);
3153 kfree(conf->process_name);
3155 clear_list(&update->conflicts, struct conflict, list);
3158 static void print_conflicts(struct update *update)
3160 const struct conflict *conf;
3161 const struct conflict_addr *ca;
3162 list_for_each_entry(conf, &update->conflicts, list) {
3163 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3164 conf->process_name);
3165 list_for_each_entry(ca, &conf->stack, list) {
3166 _ksdebug(update, " %lx", ca->addr);
3167 if (ca->has_conflict)
3168 _ksdebug(update, " [<-CONFLICT]");
3170 _ksdebug(update, "\n");
3174 static void insert_trampoline(struct ksplice_patch *p)
3176 mm_segment_t old_fs = get_fs();
3177 set_fs(KERNEL_DS);
3178 memcpy(p->saved, p->vaddr, p->size);
3179 memcpy(p->vaddr, p->contents, p->size);
3180 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3181 set_fs(old_fs);
3184 static abort_t verify_trampoline(struct ksplice_pack *pack,
3185 const struct ksplice_patch *p)
3187 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3188 ksdebug(pack, "Aborted. Trampoline at %lx has been "
3189 "overwritten.\n", p->oldaddr);
3190 return CODE_BUSY;
3192 return OK;
3195 static void remove_trampoline(const struct ksplice_patch *p)
3197 mm_segment_t old_fs = get_fs();
3198 set_fs(KERNEL_DS);
3199 memcpy(p->vaddr, p->saved, p->size);
3200 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3201 set_fs(old_fs);
3204 /* Returns NO_MATCH if there's already a labelval with a different value */
3205 static abort_t create_labelval(struct ksplice_pack *pack,
3206 struct ksplice_symbol *ksym,
3207 unsigned long val, int status)
3209 val = follow_trampolines(pack, val);
3210 if (ksym->vals == NULL)
3211 return ksym->value == val ? OK : NO_MATCH;
3213 ksym->value = val;
3214 if (status == TEMP) {
3215 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3216 if (lv == NULL)
3217 return OUT_OF_MEMORY;
3218 lv->symbol = ksym;
3219 lv->saved_vals = ksym->vals;
3220 list_add(&lv->list, &pack->temp_labelvals);
3222 ksym->vals = NULL;
3223 return OK;
3227 * Creates a new safety_record for a helper section based on its
3228 * ksplice_section and run-pre matching information.
3230 static abort_t create_safety_record(struct ksplice_pack *pack,
3231 const struct ksplice_section *sect,
3232 struct list_head *record_list,
3233 unsigned long run_addr,
3234 unsigned long run_size)
3236 struct safety_record *rec;
3237 struct ksplice_patch *p;
3239 if (record_list == NULL)
3240 return OK;
3242 for (p = pack->patches; p < pack->patches_end; p++) {
3243 const struct ksplice_reloc *r = patch_reloc(pack, p);
3244 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3245 break;
3247 if (p >= pack->patches_end)
3248 return OK;
3250 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3251 if (rec == NULL)
3252 return OUT_OF_MEMORY;
3254 * The helper might be unloaded when checking reversing
3255 * patches, so we need to kstrdup the label here.
3257 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3258 if (rec->label == NULL) {
3259 kfree(rec);
3260 return OUT_OF_MEMORY;
3262 rec->addr = run_addr;
3263 rec->size = run_size;
3265 list_add(&rec->list, record_list);
3266 return OK;
3269 static abort_t add_candidate_val(struct ksplice_pack *pack,
3270 struct list_head *vals, unsigned long val)
3272 struct candidate_val *tmp, *new;
3275 * Careful: follow trampolines before comparing values so that we do
3276 * not mistake the obsolete function for another copy of the function.
3278 val = follow_trampolines(pack, val);
3280 list_for_each_entry(tmp, vals, list) {
3281 if (tmp->val == val)
3282 return OK;
3284 new = kmalloc(sizeof(*new), GFP_KERNEL);
3285 if (new == NULL)
3286 return OUT_OF_MEMORY;
3287 new->val = val;
3288 list_add(&new->list, vals);
3289 return OK;
3292 static void release_vals(struct list_head *vals)
3294 clear_list(vals, struct candidate_val, list);
3298 * The temp_labelvals list is used to cache those temporary labelvals
3299 * that have been created to cross-check the symbol values obtained
3300 * from different relocations within a single section being matched.
3302 * If status is VAL, commit the temp_labelvals as final values.
3304 * If status is NOVAL, restore the list of possible values to the
3305 * ksplice_symbol, so that it no longer has a known value.
3307 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
3309 struct labelval *lv, *n;
3310 list_for_each_entry_safe(lv, n, &pack->temp_labelvals, list) {
3311 if (status == NOVAL) {
3312 lv->symbol->vals = lv->saved_vals;
3313 } else {
3314 release_vals(lv->saved_vals);
3315 kfree(lv->saved_vals);
3317 list_del(&lv->list);
3318 kfree(lv);
3322 /* Is there a Ksplice canary with given howto at blank_addr? */
3323 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
3324 const struct ksplice_reloc_howto *howto)
3326 switch (howto->size) {
3327 case 1:
3328 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3329 (KSPLICE_CANARY & howto->dst_mask);
3330 case 2:
3331 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3332 (KSPLICE_CANARY & howto->dst_mask);
3333 case 4:
3334 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3335 (KSPLICE_CANARY & howto->dst_mask);
3336 #if BITS_PER_LONG >= 64
3337 case 8:
3338 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3339 (KSPLICE_CANARY & howto->dst_mask);
3340 #endif /* BITS_PER_LONG */
3341 default:
3342 ksdebug(pack, "Aborted. Invalid relocation size.\n");
3343 return -1;
3348 * Compute the address of the code you would actually run if you were
3349 * to call the function at addr (i.e., follow the sequence of jumps
3350 * starting at addr)
3352 static unsigned long follow_trampolines(struct ksplice_pack *pack,
3353 unsigned long addr)
3355 unsigned long new_addr;
3356 struct module *m;
3358 while (1) {
3359 #ifdef KSPLICE_STANDALONE
3360 if (!bootstrapped)
3361 return addr;
3362 #endif /* KSPLICE_STANDALONE */
3363 if (!__kernel_text_address(addr) ||
3364 trampoline_target(pack, addr, &new_addr) != OK)
3365 return addr;
3366 m = __module_text_address(new_addr);
3367 if (m == NULL || m == pack->target ||
3368 !starts_with(m->name, "ksplice"))
3369 return addr;
3370 addr = new_addr;
3374 /* Does module a patch module b? */
3375 static bool patches_module(const struct module *a, const struct module *b)
3377 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3378 const char *name;
3379 if (a == b)
3380 return true;
3381 if (a == NULL || !starts_with(a->name, "ksplice_"))
3382 return false;
3383 name = a->name + strlen("ksplice_");
3384 name += strcspn(name, "_");
3385 if (name[0] != '_')
3386 return false;
3387 name++;
3388 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
3389 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3390 struct ksplice_module_list_entry *entry;
3391 if (a == b)
3392 return true;
3393 list_for_each_entry(entry, &ksplice_module_list, list) {
3394 if (strcmp(entry->target_name, b->name) == 0 &&
3395 strcmp(entry->primary_name, a->name) == 0)
3396 return true;
3398 return false;
3399 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3402 static bool starts_with(const char *str, const char *prefix)
3404 return strncmp(str, prefix, strlen(prefix)) == 0;
3407 static bool singular(struct list_head *list)
3409 return !list_empty(list) && list->next->next == list;
3412 static void *bsearch(const void *key, const void *base, size_t n,
3413 size_t size, int (*cmp)(const void *key, const void *elt))
3415 int start = 0, end = n - 1, mid, result;
3416 if (n == 0)
3417 return NULL;
3418 while (start <= end) {
3419 mid = (start + end) / 2;
3420 result = cmp(key, base + mid * size);
3421 if (result < 0)
3422 end = mid - 1;
3423 else if (result > 0)
3424 start = mid + 1;
3425 else
3426 return (void *)base + mid * size;
3428 return NULL;
3431 static int compare_relocs(const void *a, const void *b)
3433 const struct ksplice_reloc *ra = a, *rb = b;
3434 if (ra->blank_addr > rb->blank_addr)
3435 return 1;
3436 else if (ra->blank_addr < rb->blank_addr)
3437 return -1;
3438 else
3439 return ra->howto->size - rb->howto->size;
3442 #ifdef KSPLICE_STANDALONE
3443 static int compare_system_map(const void *a, const void *b)
3445 const struct ksplice_system_map *sa = a, *sb = b;
3446 return strcmp(sa->label, sb->label);
3448 #endif /* KSPLICE_STANDALONE */
3450 #ifdef CONFIG_DEBUG_FS
3451 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3452 /* Old kernels don't have debugfs_create_blob */
3453 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3454 size_t count, loff_t *ppos)
3456 struct debugfs_blob_wrapper *blob = file->private_data;
3457 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3458 blob->size);
3461 static int blob_open(struct inode *inode, struct file *file)
3463 if (inode->i_private)
3464 file->private_data = inode->i_private;
3465 return 0;
3468 static struct file_operations fops_blob = {
3469 .read = read_file_blob,
3470 .open = blob_open,
3473 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3474 struct dentry *parent,
3475 struct debugfs_blob_wrapper *blob)
3477 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3479 #endif /* LINUX_VERSION_CODE */
3481 static abort_t init_debug_buf(struct update *update)
3483 update->debug_blob.size = 0;
3484 update->debug_blob.data = NULL;
3485 update->debugfs_dentry =
3486 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3487 &update->debug_blob);
3488 if (update->debugfs_dentry == NULL)
3489 return OUT_OF_MEMORY;
3490 return OK;
3493 static void clear_debug_buf(struct update *update)
3495 if (update->debugfs_dentry == NULL)
3496 return;
3497 debugfs_remove(update->debugfs_dentry);
3498 update->debugfs_dentry = NULL;
3499 update->debug_blob.size = 0;
3500 vfree(update->debug_blob.data);
3501 update->debug_blob.data = NULL;
3504 static int _ksdebug(struct update *update, const char *fmt, ...)
3506 va_list args;
3507 unsigned long size, old_size, new_size;
3509 if (update->debug == 0)
3510 return 0;
3512 /* size includes the trailing '\0' */
3513 va_start(args, fmt);
3514 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3515 va_end(args);
3516 old_size = update->debug_blob.size == 0 ? 0 :
3517 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3518 new_size = update->debug_blob.size + size == 0 ? 0 :
3519 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3520 if (new_size > old_size) {
3521 char *buf = vmalloc(new_size);
3522 if (buf == NULL)
3523 return -ENOMEM;
3524 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3525 vfree(update->debug_blob.data);
3526 update->debug_blob.data = buf;
3528 va_start(args, fmt);
3529 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3530 update->debug_blob.size,
3531 size, fmt, args);
3532 va_end(args);
3533 return 0;
3535 #else /* CONFIG_DEBUG_FS */
3536 static abort_t init_debug_buf(struct update *update)
3538 return OK;
3541 static void clear_debug_buf(struct update *update)
3543 return;
3546 static int _ksdebug(struct update *update, const char *fmt, ...)
3548 va_list args;
3550 if (update->debug == 0)
3551 return 0;
3553 if (!update->debug_continue_line)
3554 printk(KERN_DEBUG "ksplice: ");
3556 va_start(args, fmt);
3557 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3558 vprintk(fmt, args);
3559 #else /* LINUX_VERSION_CODE < */
3560 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3562 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3563 printk("%s", buf);
3564 kfree(buf);
3566 #endif /* LINUX_VERSION_CODE */
3567 va_end(args);
3569 update->debug_continue_line =
3570 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3571 return 0;
3573 #endif /* CONFIG_DEBUG_FS */
3575 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3576 #ifdef CONFIG_KALLSYMS
3577 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3578 struct module *, unsigned long),
3579 void *data)
3581 char namebuf[KSYM_NAME_LEN];
3582 unsigned long i;
3583 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3584 unsigned int off;
3585 #endif /* LINUX_VERSION_CODE */
3586 int ret;
3588 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3589 * 2.6.10 was the first release after this commit
3591 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3592 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3593 off = kallsyms_expand_symbol(off, namebuf);
3594 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3595 if (ret != 0)
3596 return ret;
3598 #else /* LINUX_VERSION_CODE < */
3599 char *knames;
3601 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3602 unsigned prefix = *knames++;
3604 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3606 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3607 if (ret != OK)
3608 return ret;
3610 knames += strlen(knames) + 1;
3612 #endif /* LINUX_VERSION_CODE */
3613 return module_kallsyms_on_each_symbol(fn, data);
3616 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3617 * 2.6.10 was the first release after this commit
3619 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3620 extern u8 kallsyms_token_table[];
3621 extern u16 kallsyms_token_index[];
3623 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3625 long len, skipped_first = 0;
3626 const u8 *tptr, *data;
3628 data = &kallsyms_names[off];
3629 len = *data;
3630 data++;
3632 off += len + 1;
3634 while (len) {
3635 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3636 data++;
3637 len--;
3639 while (*tptr) {
3640 if (skipped_first) {
3641 *result = *tptr;
3642 result++;
3643 } else
3644 skipped_first = 1;
3645 tptr++;
3649 *result = '\0';
3651 return off;
3653 #endif /* LINUX_VERSION_CODE */
3655 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3656 struct module *,
3657 unsigned long),
3658 void *data)
3660 struct module *mod;
3661 unsigned int i;
3662 int ret;
3664 list_for_each_entry(mod, &modules, list) {
3665 for (i = 0; i < mod->num_symtab; i++) {
3666 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3667 mod, mod->symtab[i].st_value);
3668 if (ret != 0)
3669 return ret;
3672 return 0;
3674 #endif /* CONFIG_KALLSYMS */
3676 static struct module *find_module(const char *name)
3678 struct module *mod;
3680 list_for_each_entry(mod, &modules, list) {
3681 if (strcmp(mod->name, name) == 0)
3682 return mod;
3684 return NULL;
3687 #ifdef CONFIG_MODULE_UNLOAD
3688 struct module_use {
3689 struct list_head list;
3690 struct module *module_which_uses;
3693 /* I'm not yet certain whether we need the strong form of this. */
3694 static inline int strong_try_module_get(struct module *mod)
3696 if (mod && mod->state != MODULE_STATE_LIVE)
3697 return -EBUSY;
3698 if (try_module_get(mod))
3699 return 0;
3700 return -ENOENT;
3703 /* Does a already use b? */
3704 static int already_uses(struct module *a, struct module *b)
3706 struct module_use *use;
3707 list_for_each_entry(use, &b->modules_which_use_me, list) {
3708 if (use->module_which_uses == a)
3709 return 1;
3711 return 0;
3714 /* Make it so module a uses b. Must be holding module_mutex */
3715 static int use_module(struct module *a, struct module *b)
3717 struct module_use *use;
3718 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3719 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3720 int no_warn;
3721 #endif /* LINUX_VERSION_CODE */
3722 if (b == NULL || already_uses(a, b))
3723 return 1;
3725 if (strong_try_module_get(b) < 0)
3726 return 0;
3728 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3729 if (!use) {
3730 module_put(b);
3731 return 0;
3733 use->module_which_uses = a;
3734 list_add(&use->list, &b->modules_which_use_me);
3735 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3736 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3737 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3738 #endif /* LINUX_VERSION_CODE */
3739 return 1;
3741 #else /* CONFIG_MODULE_UNLOAD */
3742 static int use_module(struct module *a, struct module *b)
3744 return 1;
3746 #endif /* CONFIG_MODULE_UNLOAD */
3748 #ifndef CONFIG_MODVERSIONS
3749 #define symversion(base, idx) NULL
3750 #else
3751 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3752 #endif
3754 static bool each_symbol_in_section(const struct symsearch *arr,
3755 unsigned int arrsize,
3756 struct module *owner,
3757 bool (*fn)(const struct symsearch *syms,
3758 struct module *owner,
3759 unsigned int symnum, void *data),
3760 void *data)
3762 unsigned int i, j;
3764 for (j = 0; j < arrsize; j++) {
3765 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3766 if (fn(&arr[j], owner, i, data))
3767 return true;
3770 return false;
3773 /* Returns true as soon as fn returns true, otherwise false. */
3774 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3775 struct module *owner,
3776 unsigned int symnum, void *data),
3777 void *data)
3779 struct module *mod;
3780 const struct symsearch arr[] = {
3781 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3782 NOT_GPL_ONLY, false },
3783 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3784 __start___kcrctab_gpl,
3785 GPL_ONLY, false },
3786 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3787 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3788 __start___kcrctab_gpl_future,
3789 WILL_BE_GPL_ONLY, false },
3790 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3791 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3792 { __start___ksymtab_unused, __stop___ksymtab_unused,
3793 __start___kcrctab_unused,
3794 NOT_GPL_ONLY, true },
3795 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3796 __start___kcrctab_unused_gpl,
3797 GPL_ONLY, true },
3798 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3801 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3802 return 1;
3804 list_for_each_entry(mod, &modules, list) {
3805 struct symsearch module_arr[] = {
3806 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3807 NOT_GPL_ONLY, false },
3808 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3809 mod->gpl_crcs,
3810 GPL_ONLY, false },
3811 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3812 { mod->gpl_future_syms,
3813 mod->gpl_future_syms + mod->num_gpl_future_syms,
3814 mod->gpl_future_crcs,
3815 WILL_BE_GPL_ONLY, false },
3816 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3817 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3818 { mod->unused_syms,
3819 mod->unused_syms + mod->num_unused_syms,
3820 mod->unused_crcs,
3821 NOT_GPL_ONLY, true },
3822 { mod->unused_gpl_syms,
3823 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3824 mod->unused_gpl_crcs,
3825 GPL_ONLY, true },
3826 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3829 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3830 mod, fn, data))
3831 return true;
3833 return false;
3836 struct find_symbol_arg {
3837 /* Input */
3838 const char *name;
3839 bool gplok;
3840 bool warn;
3842 /* Output */
3843 struct module *owner;
3844 const unsigned long *crc;
3845 const struct kernel_symbol *sym;
3848 static bool find_symbol_in_section(const struct symsearch *syms,
3849 struct module *owner,
3850 unsigned int symnum, void *data)
3852 struct find_symbol_arg *fsa = data;
3854 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3855 return false;
3857 if (!fsa->gplok) {
3858 if (syms->licence == GPL_ONLY)
3859 return false;
3860 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3861 printk(KERN_WARNING "Symbol %s is being used "
3862 "by a non-GPL module, which will not "
3863 "be allowed in the future\n", fsa->name);
3864 printk(KERN_WARNING "Please see the file "
3865 "Documentation/feature-removal-schedule.txt "
3866 "in the kernel source tree for more details.\n");
3870 #ifdef CONFIG_UNUSED_SYMBOLS
3871 if (syms->unused && fsa->warn) {
3872 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3873 "however this module is using it.\n", fsa->name);
3874 printk(KERN_WARNING
3875 "This symbol will go away in the future.\n");
3876 printk(KERN_WARNING
3877 "Please evalute if this is the right api to use and if "
3878 "it really is, submit a report the linux kernel "
3879 "mailinglist together with submitting your code for "
3880 "inclusion.\n");
3882 #endif
3884 fsa->owner = owner;
3885 fsa->crc = symversion(syms->crcs, symnum);
3886 fsa->sym = &syms->start[symnum];
3887 return true;
3890 /* Find a symbol and return it, along with, (optional) crc and
3891 * (optional) module which owns it */
3892 static const struct kernel_symbol *find_symbol(const char *name,
3893 struct module **owner,
3894 const unsigned long **crc,
3895 bool gplok, bool warn)
3897 struct find_symbol_arg fsa;
3899 fsa.name = name;
3900 fsa.gplok = gplok;
3901 fsa.warn = warn;
3903 if (each_symbol(find_symbol_in_section, &fsa)) {
3904 if (owner)
3905 *owner = fsa.owner;
3906 if (crc)
3907 *crc = fsa.crc;
3908 return fsa.sym;
3911 return NULL;
3914 static inline int within_module_core(unsigned long addr, struct module *mod)
3916 return (unsigned long)mod->module_core <= addr &&
3917 addr < (unsigned long)mod->module_core + mod->core_size;
3920 static inline int within_module_init(unsigned long addr, struct module *mod)
3922 return (unsigned long)mod->module_init <= addr &&
3923 addr < (unsigned long)mod->module_init + mod->init_size;
3926 static struct module *__module_address(unsigned long addr)
3928 struct module *mod;
3930 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3931 list_for_each_entry_rcu(mod, &modules, list)
3932 #else
3933 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3934 list_for_each_entry(mod, &modules, list)
3935 #endif
3936 if (within_module_core(addr, mod) ||
3937 within_module_init(addr, mod))
3938 return mod;
3939 return NULL;
3941 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3943 struct update_attribute {
3944 struct attribute attr;
3945 ssize_t (*show)(struct update *update, char *buf);
3946 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3949 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
3950 char *buf)
3952 struct update_attribute *attribute =
3953 container_of(attr, struct update_attribute, attr);
3954 struct update *update = container_of(kobj, struct update, kobj);
3955 if (attribute->show == NULL)
3956 return -EIO;
3957 return attribute->show(update, buf);
3960 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
3961 const char *buf, size_t len)
3963 struct update_attribute *attribute =
3964 container_of(attr, struct update_attribute, attr);
3965 struct update *update = container_of(kobj, struct update, kobj);
3966 if (attribute->store == NULL)
3967 return -EIO;
3968 return attribute->store(update, buf, len);
3971 static struct sysfs_ops update_sysfs_ops = {
3972 .show = update_attr_show,
3973 .store = update_attr_store,
3976 static void update_release(struct kobject *kobj)
3978 struct update *update;
3979 update = container_of(kobj, struct update, kobj);
3980 cleanup_ksplice_update(update);
3983 static ssize_t stage_show(struct update *update, char *buf)
3985 switch (update->stage) {
3986 case STAGE_PREPARING:
3987 return snprintf(buf, PAGE_SIZE, "preparing\n");
3988 case STAGE_APPLIED:
3989 return snprintf(buf, PAGE_SIZE, "applied\n");
3990 case STAGE_REVERSED:
3991 return snprintf(buf, PAGE_SIZE, "reversed\n");
3993 return 0;
3996 static ssize_t abort_cause_show(struct update *update, char *buf)
3998 switch (update->abort_cause) {
3999 case OK:
4000 return snprintf(buf, PAGE_SIZE, "ok\n");
4001 case NO_MATCH:
4002 return snprintf(buf, PAGE_SIZE, "no_match\n");
4003 #ifdef KSPLICE_STANDALONE
4004 case BAD_SYSTEM_MAP:
4005 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4006 #endif /* KSPLICE_STANDALONE */
4007 case CODE_BUSY:
4008 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4009 case MODULE_BUSY:
4010 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4011 case OUT_OF_MEMORY:
4012 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4013 case FAILED_TO_FIND:
4014 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4015 case ALREADY_REVERSED:
4016 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4017 case MISSING_EXPORT:
4018 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4019 case UNEXPECTED_RUNNING_TASK:
4020 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4021 case TARGET_NOT_LOADED:
4022 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4023 case CALL_FAILED:
4024 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4025 case COLD_UPDATE_LOADED:
4026 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4027 case UNEXPECTED:
4028 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4029 default:
4030 return snprintf(buf, PAGE_SIZE, "unknown\n");
4032 return 0;
4035 static ssize_t conflict_show(struct update *update, char *buf)
4037 const struct conflict *conf;
4038 const struct conflict_addr *ca;
4039 int used = 0;
4040 mutex_lock(&module_mutex);
4041 list_for_each_entry(conf, &update->conflicts, list) {
4042 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4043 conf->process_name, conf->pid);
4044 list_for_each_entry(ca, &conf->stack, list) {
4045 if (!ca->has_conflict)
4046 continue;
4047 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4048 ca->label);
4050 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4052 mutex_unlock(&module_mutex);
4053 return used;
4056 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4057 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4059 struct update *update = updateptr;
4060 mutex_lock(&module_mutex);
4061 maybe_cleanup_ksplice_update(update);
4062 mutex_unlock(&module_mutex);
4063 return 0;
4066 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4068 enum stage old_stage;
4069 mutex_lock(&module_mutex);
4070 old_stage = update->stage;
4071 if ((strncmp(buf, "applied", len) == 0 ||
4072 strncmp(buf, "applied\n", len) == 0) &&
4073 update->stage == STAGE_PREPARING)
4074 update->abort_cause = apply_update(update);
4075 else if ((strncmp(buf, "reversed", len) == 0 ||
4076 strncmp(buf, "reversed\n", len) == 0) &&
4077 update->stage == STAGE_APPLIED)
4078 update->abort_cause = reverse_patches(update);
4079 else if ((strncmp(buf, "cleanup", len) == 0 ||
4080 strncmp(buf, "cleanup\n", len) == 0) &&
4081 update->stage == STAGE_REVERSED)
4082 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4083 "ksplice_cleanup_%s", update->kid);
4085 if (old_stage != STAGE_REVERSED && update->abort_cause == OK)
4086 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
4087 update->kid,
4088 update->stage == STAGE_APPLIED ? "applied" : "reversed");
4089 mutex_unlock(&module_mutex);
4090 return len;
4093 static ssize_t debug_show(struct update *update, char *buf)
4095 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4098 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4100 unsigned long l;
4101 int ret = strict_strtoul(buf, 10, &l);
4102 if (ret != 0)
4103 return ret;
4104 update->debug = l;
4105 return len;
4108 static ssize_t partial_show(struct update *update, char *buf)
4110 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4113 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4115 unsigned long l;
4116 int ret = strict_strtoul(buf, 10, &l);
4117 if (ret != 0)
4118 return ret;
4119 update->partial = l;
4120 return len;
4123 static struct update_attribute stage_attribute =
4124 __ATTR(stage, 0600, stage_show, stage_store);
4125 static struct update_attribute abort_cause_attribute =
4126 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4127 static struct update_attribute debug_attribute =
4128 __ATTR(debug, 0600, debug_show, debug_store);
4129 static struct update_attribute partial_attribute =
4130 __ATTR(partial, 0600, partial_show, partial_store);
4131 static struct update_attribute conflict_attribute =
4132 __ATTR(conflicts, 0400, conflict_show, NULL);
4134 static struct attribute *update_attrs[] = {
4135 &stage_attribute.attr,
4136 &abort_cause_attribute.attr,
4137 &debug_attribute.attr,
4138 &partial_attribute.attr,
4139 &conflict_attribute.attr,
4140 NULL
4143 static struct kobj_type update_ktype = {
4144 .sysfs_ops = &update_sysfs_ops,
4145 .release = update_release,
4146 .default_attrs = update_attrs,
4149 #ifdef KSPLICE_STANDALONE
4150 static int debug;
4151 module_param(debug, int, 0600);
4152 MODULE_PARM_DESC(debug, "Debug level");
4154 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4156 static struct ksplice_pack bootstrap_pack = {
4157 .name = "ksplice_" __stringify(KSPLICE_KID),
4158 .kid = "init_" __stringify(KSPLICE_KID),
4159 .target_name = NULL,
4160 .target = NULL,
4161 .map_printk = MAP_PRINTK,
4162 .primary = THIS_MODULE,
4163 .primary_system_map = ksplice_system_map,
4164 .primary_system_map_end = ksplice_system_map_end,
4166 #endif /* KSPLICE_STANDALONE */
4168 static int init_ksplice(void)
4170 #ifdef KSPLICE_STANDALONE
4171 struct ksplice_pack *pack = &bootstrap_pack;
4172 pack->update = init_ksplice_update(pack->kid);
4173 sort(pack->primary_system_map,
4174 pack->primary_system_map_end - pack->primary_system_map,
4175 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4176 if (pack->update == NULL)
4177 return -ENOMEM;
4178 add_to_update(pack, pack->update);
4179 pack->update->debug = debug;
4180 pack->update->abort_cause =
4181 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
4182 if (pack->update->abort_cause == OK)
4183 bootstrapped = true;
4184 cleanup_ksplice_update(bootstrap_pack.update);
4185 #else /* !KSPLICE_STANDALONE */
4186 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4187 if (ksplice_kobj == NULL)
4188 return -ENOMEM;
4189 #endif /* KSPLICE_STANDALONE */
4190 return 0;
4193 static void cleanup_ksplice(void)
4195 #ifndef KSPLICE_STANDALONE
4196 kobject_put(ksplice_kobj);
4197 #endif /* KSPLICE_STANDALONE */
4200 module_init(init_ksplice);
4201 module_exit(cleanup_ksplice);
4203 MODULE_AUTHOR("Ksplice, Inc.");
4204 MODULE_DESCRIPTION("Ksplice rebootless update system");
4205 #ifdef KSPLICE_VERSION
4206 MODULE_VERSION(KSPLICE_VERSION);
4207 #endif
4208 MODULE_LICENSE("GPL v2");