Clean up the KSPLICE_STANDALONE version of init_ksplice.
[ksplice.git] / kmodsrc / ksplice.c
blobe133dbbf0a5c4777e0942759e082b809c719ebda
1 /* Copyright (C) 2007-2008 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kasoerg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #if defined(KSPLICE_STANDALONE) && \
62 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
67 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
68 #define bool _Bool
69 #define false 0
70 #define true 1
71 #endif /* LINUX_VERSION_CODE */
73 enum stage {
74 STAGE_PREPARING, /* the update is not yet applied */
75 STAGE_APPLIED, /* the update is applied */
76 STAGE_REVERSED, /* the update has been applied and reversed */
79 /* parameter to modify run-pre matching */
80 enum run_pre_mode {
81 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
82 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
83 RUN_PRE_FINAL, /* finalizes the matching */
84 #ifdef KSPLICE_STANDALONE
85 RUN_PRE_SILENT,
86 #endif /* KSPLICE_STANDALONE */
89 enum { NOVAL, TEMP, VAL };
91 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
92 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
93 #define __bitwise__
94 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
95 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
96 #define __bitwise__ __bitwise
97 #endif
99 typedef int __bitwise__ abort_t;
101 #define OK ((__force abort_t) 0)
102 #define NO_MATCH ((__force abort_t) 1)
103 #define CODE_BUSY ((__force abort_t) 2)
104 #define MODULE_BUSY ((__force abort_t) 3)
105 #define OUT_OF_MEMORY ((__force abort_t) 4)
106 #define FAILED_TO_FIND ((__force abort_t) 5)
107 #define ALREADY_REVERSED ((__force abort_t) 6)
108 #define MISSING_EXPORT ((__force abort_t) 7)
109 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
110 #define UNEXPECTED ((__force abort_t) 9)
111 #define TARGET_NOT_LOADED ((__force abort_t) 10)
112 #define CALL_FAILED ((__force abort_t) 11)
113 #ifdef KSPLICE_STANDALONE
114 #define BAD_SYSTEM_MAP ((__force abort_t) 12)
115 #endif /* KSPLICE_STANDALONE */
117 struct update {
118 const char *kid;
119 const char *name;
120 struct kobject kobj;
121 enum stage stage;
122 abort_t abort_cause;
123 int debug;
124 #ifdef CONFIG_DEBUG_FS
125 struct debugfs_blob_wrapper debug_blob;
126 struct dentry *debugfs_dentry;
127 #else /* !CONFIG_DEBUG_FS */
128 bool debug_continue_line;
129 #endif /* CONFIG_DEBUG_FS */
130 bool partial; /* is it OK if some target mods aren't loaded */
131 struct list_head packs; /* packs for loaded target mods */
132 struct list_head unused_packs; /* packs for non-loaded target mods */
133 struct list_head conflicts;
134 struct list_head list;
137 /* a process conflicting with an update */
138 struct conflict {
139 const char *process_name;
140 pid_t pid;
141 struct list_head stack;
142 struct list_head list;
145 /* an address on the stack of a conflict */
146 struct conflict_addr {
147 unsigned long addr; /* the address on the stack */
148 bool has_conflict; /* does this address in particular conflict? */
149 const char *label; /* the label of the conflicting safety_record */
150 struct list_head list;
153 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
154 /* Old kernels don't have debugfs_create_blob */
155 struct debugfs_blob_wrapper {
156 void *data;
157 unsigned long size;
159 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
162 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
163 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
164 #endif
166 struct labelval {
167 struct list_head list;
168 struct ksplice_symbol *symbol;
169 struct list_head *saved_vals;
172 /* region to be checked for conflicts in the stack check */
173 struct safety_record {
174 struct list_head list;
175 const char *label;
176 unsigned long addr; /* the address to be checked for conflicts
177 * (e.g. an obsolete function's starting addr)
179 unsigned long size; /* the size of the region to be checked */
182 /* possible value for a symbol */
183 struct candidate_val {
184 struct list_head list;
185 unsigned long val;
188 /* private struct used by init_symbol_array */
189 struct ksplice_lookup {
190 /* input */
191 struct ksplice_pack *pack;
192 struct ksplice_symbol **arr;
193 size_t size;
194 /* output */
195 abort_t ret;
198 #ifdef KSPLICE_NO_KERNEL_SUPPORT
199 struct symsearch {
200 const struct kernel_symbol *start, *stop;
201 const unsigned long *crcs;
202 enum {
203 NOT_GPL_ONLY,
204 GPL_ONLY,
205 WILL_BE_GPL_ONLY,
206 } licence;
207 bool unused;
209 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
211 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
212 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
214 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
215 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
216 static bool virtual_address_mapped(unsigned long addr)
218 char retval;
219 return probe_kernel_address(addr, retval) != -EFAULT;
221 #else /* LINUX_VERSION_CODE < */
222 static bool virtual_address_mapped(unsigned long addr);
223 #endif /* LINUX_VERSION_CODE */
225 static long probe_kernel_read(void *dst, void *src, size_t size)
227 if (size == 0)
228 return 0;
229 if (!virtual_address_mapped((unsigned long)src) ||
230 !virtual_address_mapped((unsigned long)src + size - 1))
231 return -EFAULT;
233 memcpy(dst, src, size);
234 return 0;
236 #endif /* LINUX_VERSION_CODE */
238 static LIST_HEAD(updates);
239 #ifdef KSPLICE_STANDALONE
240 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
241 extern struct list_head ksplice_module_list;
242 #else /* !CONFIG_KSPLICE */
243 LIST_HEAD(ksplice_module_list);
244 #endif /* CONFIG_KSPLICE */
245 #else /* !KSPLICE_STANDALONE */
246 LIST_HEAD(ksplice_module_list);
247 EXPORT_SYMBOL_GPL(ksplice_module_list);
248 static struct kobject *ksplice_kobj;
249 #endif /* KSPLICE_STANDALONE */
251 static struct kobj_type ksplice_ktype;
253 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
254 /* Old kernels do not have kcalloc
255 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
257 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
259 char *mem;
260 if (n != 0 && size > ULONG_MAX / n)
261 return NULL;
262 mem = kmalloc(n * size, flags);
263 if (mem)
264 memset(mem, 0, n * size);
265 return mem;
267 #endif /* LINUX_VERSION_CODE */
269 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
270 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
271 static void u32_swap(void *a, void *b, int size)
273 u32 t = *(u32 *)a;
274 *(u32 *)a = *(u32 *)b;
275 *(u32 *)b = t;
278 static void generic_swap(void *a, void *b, int size)
280 char t;
282 do {
283 t = *(char *)a;
284 *(char *)a++ = *(char *)b;
285 *(char *)b++ = t;
286 } while (--size > 0);
290 * sort - sort an array of elements
291 * @base: pointer to data to sort
292 * @num: number of elements
293 * @size: size of each element
294 * @cmp: pointer to comparison function
295 * @swap: pointer to swap function or NULL
297 * This function does a heapsort on the given array. You may provide a
298 * swap function optimized to your element type.
300 * Sorting time is O(n log n) both on average and worst-case. While
301 * qsort is about 20% faster on average, it suffers from exploitable
302 * O(n*n) worst-case behavior and extra memory requirements that make
303 * it less suitable for kernel use.
306 void sort(void *base, size_t num, size_t size,
307 int (*cmp)(const void *, const void *),
308 void (*swap)(void *, void *, int size))
310 /* pre-scale counters for performance */
311 int i = (num / 2 - 1) * size, n = num * size, c, r;
313 if (!swap)
314 swap = (size == 4 ? u32_swap : generic_swap);
316 /* heapify */
317 for (; i >= 0; i -= size) {
318 for (r = i; r * 2 + size < n; r = c) {
319 c = r * 2 + size;
320 if (c < n - size && cmp(base + c, base + c + size) < 0)
321 c += size;
322 if (cmp(base + r, base + c) >= 0)
323 break;
324 swap(base + r, base + c, size);
328 /* sort */
329 for (i = n - size; i > 0; i -= size) {
330 swap(base, base + i, size);
331 for (r = 0; r * 2 + size < i; r = c) {
332 c = r * 2 + size;
333 if (c < i - size && cmp(base + c, base + c + size) < 0)
334 c += size;
335 if (cmp(base + r, base + c) >= 0)
336 break;
337 swap(base + r, base + c, size);
341 #endif /* LINUX_VERSION_CODE < */
343 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
344 /* Old kernels do not have kstrdup
345 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
347 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
349 size_t len;
350 char *buf;
352 if (!s)
353 return NULL;
355 len = strlen(s) + 1;
356 buf = kmalloc(len, gfp);
357 if (buf)
358 memcpy(buf, s, len);
359 return buf;
361 #endif /* LINUX_VERSION_CODE */
363 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
364 /* Old kernels use semaphore instead of mutex
365 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
367 #define mutex semaphore
368 #define mutex_lock down
369 #define mutex_unlock up
370 #endif /* LINUX_VERSION_CODE */
372 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
373 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
374 static char * __attribute_used__
375 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
377 unsigned int len;
378 char *p, dummy[1];
379 va_list aq;
381 va_copy(aq, ap);
382 len = vsnprintf(dummy, 0, fmt, aq);
383 va_end(aq);
385 p = kmalloc(len + 1, gfp);
386 if (!p)
387 return NULL;
389 vsnprintf(p, len + 1, fmt, ap);
391 return p;
393 #endif
395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
396 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
397 static char * __attribute__((format (printf, 2, 3)))
398 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
400 va_list ap;
401 char *p;
403 va_start(ap, fmt);
404 p = kvasprintf(gfp, fmt, ap);
405 va_end(ap);
407 return p;
409 #endif
411 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
412 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
413 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
415 char *tail;
416 unsigned long val;
417 size_t len;
419 *res = 0;
420 len = strlen(cp);
421 if (len == 0)
422 return -EINVAL;
424 val = simple_strtoul(cp, &tail, base);
425 if ((*tail == '\0') ||
426 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
427 *res = val;
428 return 0;
431 return -EINVAL;
433 #endif
435 #ifndef task_thread_info
436 #define task_thread_info(task) (task)->thread_info
437 #endif /* !task_thread_info */
439 #ifdef KSPLICE_STANDALONE
441 static bool bootstrapped = false;
443 #ifdef CONFIG_KALLSYMS
444 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
445 extern u8 kallsyms_names[];
446 #endif /* CONFIG_KALLSYMS */
448 /* defined by ksplice-create */
449 extern const struct ksplice_reloc ksplice_init_relocs[],
450 ksplice_init_relocs_end[];
452 /* Obtained via System.map */
453 extern struct list_head modules;
454 extern struct mutex module_mutex;
455 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
456 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
457 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
458 #endif /* LINUX_VERSION_CODE */
459 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
460 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
461 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
462 #endif /* LINUX_VERSION_CODE */
463 extern const struct kernel_symbol __start___ksymtab[];
464 extern const struct kernel_symbol __stop___ksymtab[];
465 extern const unsigned long __start___kcrctab[];
466 extern const struct kernel_symbol __start___ksymtab_gpl[];
467 extern const struct kernel_symbol __stop___ksymtab_gpl[];
468 extern const unsigned long __start___kcrctab_gpl[];
469 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
470 extern const struct kernel_symbol __start___ksymtab_unused[];
471 extern const struct kernel_symbol __stop___ksymtab_unused[];
472 extern const unsigned long __start___kcrctab_unused[];
473 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
474 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
475 extern const unsigned long __start___kcrctab_unused_gpl[];
476 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
477 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
478 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
479 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
480 extern const unsigned long __start___kcrctab_gpl_future[];
481 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
483 #endif /* KSPLICE_STANDALONE */
485 static struct update *init_ksplice_update(const char *kid);
486 static void cleanup_ksplice_update(struct update *update);
487 static void maybe_cleanup_ksplice_update(struct update *update);
488 static void add_to_update(struct ksplice_pack *pack, struct update *update);
489 static int ksplice_sysfs_init(struct update *update);
491 /* Preparing the relocations and patches for application */
492 static abort_t apply_update(struct update *update);
493 static abort_t prepare_pack(struct ksplice_pack *pack);
494 static abort_t finalize_pack(struct ksplice_pack *pack);
495 static abort_t finalize_patches(struct ksplice_pack *pack);
496 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
497 unsigned long addr);
498 static abort_t map_trampoline_pages(struct update *update);
499 static void unmap_trampoline_pages(struct update *update);
500 static void *map_writable(void *addr, size_t len);
501 static abort_t apply_relocs(struct ksplice_pack *pack,
502 const struct ksplice_reloc *relocs,
503 const struct ksplice_reloc *relocs_end);
504 static abort_t apply_reloc(struct ksplice_pack *pack,
505 const struct ksplice_reloc *r);
506 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
507 const struct ksplice_reloc *r);
508 static abort_t apply_howto_date(struct ksplice_pack *pack,
509 const struct ksplice_reloc *r);
510 static abort_t read_reloc_value(struct ksplice_pack *pack,
511 const struct ksplice_reloc *r,
512 unsigned long addr, unsigned long *valp);
513 static abort_t write_reloc_value(struct ksplice_pack *pack,
514 const struct ksplice_reloc *r,
515 unsigned long addr, unsigned long sym_addr);
516 static void __attribute__((noreturn)) ksplice_deleted(void);
518 /* run-pre matching */
519 static abort_t match_pack_sections(struct ksplice_pack *pack,
520 bool consider_data_sections);
521 static abort_t find_section(struct ksplice_pack *pack,
522 struct ksplice_section *sect);
523 static abort_t try_addr(struct ksplice_pack *pack,
524 struct ksplice_section *sect,
525 unsigned long run_addr,
526 struct list_head *safety_records,
527 enum run_pre_mode mode);
528 static abort_t run_pre_cmp(struct ksplice_pack *pack,
529 const struct ksplice_section *sect,
530 unsigned long run_addr,
531 struct list_head *safety_records,
532 enum run_pre_mode mode);
533 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
534 /* defined in arch/ARCH/kernel/ksplice-arch.c */
535 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
536 struct ksplice_section *sect,
537 unsigned long run_addr,
538 struct list_head *safety_records,
539 enum run_pre_mode mode);
540 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
541 static void print_bytes(struct ksplice_pack *pack,
542 const unsigned char *run, int runc,
543 const unsigned char *pre, int prec);
544 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
545 static abort_t brute_search(struct ksplice_pack *pack,
546 struct ksplice_section *sect,
547 const void *start, unsigned long len,
548 struct list_head *vals);
549 static abort_t brute_search_all(struct ksplice_pack *pack,
550 struct ksplice_section *sect,
551 struct list_head *vals);
552 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
553 static const struct ksplice_reloc *
554 init_reloc_search(struct ksplice_pack *pack,
555 const struct ksplice_section *sect);
556 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
557 const struct ksplice_reloc *end,
558 unsigned long address,
559 unsigned long size);
560 static abort_t lookup_reloc(struct ksplice_pack *pack,
561 const struct ksplice_reloc **fingerp,
562 unsigned long addr,
563 const struct ksplice_reloc **relocp);
564 static abort_t handle_reloc(struct ksplice_pack *pack,
565 const struct ksplice_section *sect,
566 const struct ksplice_reloc *r,
567 unsigned long run_addr, enum run_pre_mode mode);
568 static abort_t handle_howto_date(struct ksplice_pack *pack,
569 const struct ksplice_section *sect,
570 const struct ksplice_reloc *r,
571 unsigned long run_addr,
572 enum run_pre_mode mode);
573 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
574 const struct ksplice_section *sect,
575 const struct ksplice_reloc *r,
576 unsigned long run_addr,
577 enum run_pre_mode mode);
578 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
579 const struct ksplice_symbol *sym);
580 static int compare_section_labels(const void *va, const void *vb);
581 static int symbol_section_bsearch_compare(const void *a, const void *b);
582 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
583 const struct ksplice_patch *p);
585 /* Computing possible addresses for symbols */
586 static abort_t lookup_symbol(struct ksplice_pack *pack,
587 const struct ksplice_symbol *ksym,
588 struct list_head *vals);
589 static void cleanup_symbol_arrays(struct ksplice_pack *pack);
590 static abort_t init_symbol_arrays(struct ksplice_pack *pack);
591 static abort_t init_symbol_array(struct ksplice_pack *pack,
592 struct ksplice_symbol *start,
593 struct ksplice_symbol *end);
594 static abort_t uniquify_symbols(struct ksplice_pack *pack);
595 static abort_t add_matching_values(struct ksplice_lookup *lookup,
596 const char *sym_name, unsigned long sym_val);
597 static bool add_export_values(const struct symsearch *syms,
598 struct module *owner,
599 unsigned int symnum, void *data);
600 static int symbolp_bsearch_compare(const void *key, const void *elt);
601 static int compare_symbolp_names(const void *a, const void *b);
602 static int compare_symbolp_labels(const void *a, const void *b);
603 #ifdef CONFIG_KALLSYMS
604 static int add_kallsyms_values(void *data, const char *name,
605 struct module *owner, unsigned long val);
606 #endif /* CONFIG_KALLSYMS */
607 #ifdef KSPLICE_STANDALONE
608 static abort_t
609 add_system_map_candidates(struct ksplice_pack *pack,
610 const struct ksplice_system_map *start,
611 const struct ksplice_system_map *end,
612 const char *label, struct list_head *vals);
613 static int compare_system_map(const void *a, const void *b);
614 static int system_map_bsearch_compare(const void *key, const void *elt);
615 #endif /* KSPLICE_STANDALONE */
616 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
617 struct list_head *vals);
619 /* Atomic update trampoline insertion and removal */
620 static abort_t apply_patches(struct update *update);
621 static abort_t reverse_patches(struct update *update);
622 static int __apply_patches(void *update);
623 static int __reverse_patches(void *update);
624 static abort_t check_each_task(struct update *update);
625 static abort_t check_task(struct update *update,
626 const struct task_struct *t, bool rerun);
627 static abort_t check_stack(struct update *update, struct conflict *conf,
628 const struct thread_info *tinfo,
629 const unsigned long *stack);
630 static abort_t check_address(struct update *update,
631 struct conflict *conf, unsigned long addr);
632 static abort_t check_record(struct conflict_addr *ca,
633 const struct safety_record *rec,
634 unsigned long addr);
635 static bool is_stop_machine(const struct task_struct *t);
636 static void cleanup_conflicts(struct update *update);
637 static void print_conflicts(struct update *update);
638 static void insert_trampoline(struct ksplice_patch *p);
639 static abort_t verify_trampoline(struct ksplice_pack *pack,
640 const struct ksplice_patch *p);
641 static void remove_trampoline(const struct ksplice_patch *p);
643 static abort_t create_labelval(struct ksplice_pack *pack,
644 struct ksplice_symbol *ksym,
645 unsigned long val, int status);
646 static abort_t create_safety_record(struct ksplice_pack *pack,
647 const struct ksplice_section *sect,
648 struct list_head *record_list,
649 unsigned long run_addr,
650 unsigned long run_size);
651 static abort_t add_candidate_val(struct ksplice_pack *pack,
652 struct list_head *vals, unsigned long val);
653 static void release_vals(struct list_head *vals);
654 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
656 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
657 const struct ksplice_reloc_howto *howto);
658 static unsigned long follow_trampolines(struct ksplice_pack *pack,
659 unsigned long addr);
660 static bool patches_module(const struct module *a, const struct module *b);
661 static bool starts_with(const char *str, const char *prefix);
662 static bool singular(struct list_head *list);
663 static void *bsearch(const void *key, const void *base, size_t n,
664 size_t size, int (*cmp)(const void *key, const void *elt));
665 static int compare_relocs(const void *a, const void *b);
666 static int reloc_bsearch_compare(const void *key, const void *elt);
668 /* Debugging */
669 static abort_t init_debug_buf(struct update *update);
670 static void clear_debug_buf(struct update *update);
671 static int __attribute__((format(printf, 2, 3)))
672 _ksdebug(struct update *update, const char *fmt, ...);
673 #define ksdebug(pack, fmt, ...) \
674 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
676 #ifdef KSPLICE_NO_KERNEL_SUPPORT
677 /* Functions defined here that will be exported in later kernels */
678 #ifdef CONFIG_KALLSYMS
679 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
680 struct module *, unsigned long),
681 void *data);
682 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
683 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
684 #endif /* LINUX_VERSION_CODE */
685 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
686 struct module *,
687 unsigned long),
688 void *data);
689 #endif /* CONFIG_KALLSYMS */
690 static struct module *find_module(const char *name);
691 static int use_module(struct module *a, struct module *b);
692 static const struct kernel_symbol *find_symbol(const char *name,
693 struct module **owner,
694 const unsigned long **crc,
695 bool gplok, bool warn);
696 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
697 struct module *owner,
698 unsigned int symnum, void *data),
699 void *data);
700 static struct module *__module_data_address(unsigned long addr);
701 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
703 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
705 /* Prepare a trampoline for the given patch */
706 static abort_t prepare_trampoline(struct ksplice_pack *pack,
707 struct ksplice_patch *p);
708 /* What address does the trampoline at addr jump to? */
709 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
710 unsigned long *new_addr);
711 /* Hook to handle pc-relative jumps inserted by parainstructions */
712 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
713 unsigned long run, int *matched);
714 /* Called for relocations of type KSPLICE_HOWTO_BUG */
715 static abort_t handle_bug(struct ksplice_pack *pack,
716 const struct ksplice_reloc *r,
717 unsigned long run_addr);
718 /* Called for relocations of type KSPLICE_HOWTO_EXTABLE */
719 static abort_t handle_extable(struct ksplice_pack *pack,
720 const struct ksplice_reloc *r,
721 unsigned long run_addr);
722 /* Is address p on the stack of the given thread? */
723 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
725 #ifndef KSPLICE_STANDALONE
726 #include "ksplice-arch.c"
727 #elif defined CONFIG_X86
728 #include "x86/ksplice-arch.c"
729 #elif defined CONFIG_ARM
730 #include "arm/ksplice-arch.c"
731 #endif /* KSPLICE_STANDALONE */
733 #define clear_list(head, type, member) \
734 do { \
735 struct list_head *_pos, *_n; \
736 list_for_each_safe(_pos, _n, head) { \
737 list_del(_pos); \
738 kfree(list_entry(_pos, type, member)); \
740 } while (0)
743 * init_ksplice_pack() - Initializes a ksplice pack
744 * @pack: The pack to be initialized. All of the public fields of the
745 * pack and its associated data structures should be populated
746 * before this function is called. The values of the private
747 * fields will be ignored.
749 int init_ksplice_pack(struct ksplice_pack *pack)
751 struct update *update;
752 struct ksplice_patch *p;
753 struct ksplice_section *s;
754 int ret = 0;
756 #ifdef KSPLICE_STANDALONE
757 if (!bootstrapped)
758 return -1;
759 #endif /* KSPLICE_STANDALONE */
761 INIT_LIST_HEAD(&pack->temp_labelvals);
762 INIT_LIST_HEAD(&pack->safety_records);
764 sort(pack->helper_relocs,
765 pack->helper_relocs_end - pack->helper_relocs,
766 sizeof(*pack->helper_relocs), compare_relocs, NULL);
767 sort(pack->primary_relocs,
768 pack->primary_relocs_end - pack->primary_relocs,
769 sizeof(*pack->primary_relocs), compare_relocs, NULL);
770 sort(pack->helper_sections,
771 pack->helper_sections_end - pack->helper_sections,
772 sizeof(*pack->helper_sections), compare_section_labels, NULL);
773 #ifdef KSPLICE_STANDALONE
774 sort(pack->primary_system_map,
775 pack->primary_system_map_end - pack->primary_system_map,
776 sizeof(*pack->primary_system_map), compare_system_map, NULL);
777 sort(pack->helper_system_map,
778 pack->helper_system_map_end - pack->helper_system_map,
779 sizeof(*pack->helper_system_map), compare_system_map, NULL);
780 #endif /* KSPLICE_STANDALONE */
782 for (p = pack->patches; p < pack->patches_end; p++)
783 p->vaddr = NULL;
784 for (s = pack->helper_sections; s < pack->helper_sections_end; s++)
785 s->match_map = NULL;
786 for (p = pack->patches; p < pack->patches_end; p++) {
787 const struct ksplice_reloc *r = patch_reloc(pack, p);
788 if (r == NULL)
789 return -ENOENT;
790 if (p->type == KSPLICE_PATCH_DATA) {
791 s = symbol_section(pack, r->symbol);
792 if (s == NULL)
793 return -ENOENT;
794 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
795 * to modify rodata sections that have been explicitly
796 * marked for patching using the ksplice-patch.h macro
797 * ksplice_assume_rodata. Here we modify the section
798 * flags appropriately.
800 if (s->flags & KSPLICE_SECTION_DATA)
801 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
802 KSPLICE_SECTION_RODATA;
806 mutex_lock(&module_mutex);
807 list_for_each_entry(update, &updates, list) {
808 if (strcmp(pack->kid, update->kid) == 0) {
809 if (update->stage != STAGE_PREPARING) {
810 ret = -EPERM;
811 goto out;
813 add_to_update(pack, update);
814 ret = 0;
815 goto out;
818 update = init_ksplice_update(pack->kid);
819 if (update == NULL) {
820 ret = -ENOMEM;
821 goto out;
823 ret = ksplice_sysfs_init(update);
824 if (ret != 0) {
825 cleanup_ksplice_update(update);
826 goto out;
828 add_to_update(pack, update);
829 out:
830 mutex_unlock(&module_mutex);
831 return ret;
833 EXPORT_SYMBOL_GPL(init_ksplice_pack);
836 * cleanup_ksplice_pack() - Cleans up a pack
837 * @pack: The pack to be cleaned up
839 void cleanup_ksplice_pack(struct ksplice_pack *pack)
841 if (pack->update == NULL)
842 return;
844 mutex_lock(&module_mutex);
845 if (pack->update->stage == STAGE_APPLIED) {
846 /* If the pack wasn't actually applied (because we
847 * only applied this update to loaded modules and this
848 * target was not loaded), then unregister the pack
849 * from the list of unused packs.
851 struct ksplice_pack *p;
852 bool found = false;
854 list_for_each_entry(p, &pack->update->unused_packs, list) {
855 if (p == pack)
856 found = true;
858 if (found)
859 list_del(&pack->list);
860 mutex_unlock(&module_mutex);
861 return;
863 list_del(&pack->list);
864 if (pack->update->stage == STAGE_PREPARING)
865 maybe_cleanup_ksplice_update(pack->update);
866 pack->update = NULL;
867 mutex_unlock(&module_mutex);
869 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
871 static struct update *init_ksplice_update(const char *kid)
873 struct update *update;
874 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
875 if (update == NULL)
876 return NULL;
877 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
878 if (update->name == NULL) {
879 kfree(update);
880 return NULL;
882 update->kid = kstrdup(kid, GFP_KERNEL);
883 if (update->kid == NULL) {
884 kfree(update->name);
885 kfree(update);
886 return NULL;
888 if (try_module_get(THIS_MODULE) != 1) {
889 kfree(update->kid);
890 kfree(update->name);
891 kfree(update);
892 return NULL;
894 INIT_LIST_HEAD(&update->packs);
895 INIT_LIST_HEAD(&update->unused_packs);
896 if (init_debug_buf(update) != OK) {
897 module_put(THIS_MODULE);
898 kfree(update->kid);
899 kfree(update->name);
900 kfree(update);
901 return NULL;
903 list_add(&update->list, &updates);
904 update->stage = STAGE_PREPARING;
905 update->abort_cause = OK;
906 update->partial = 0;
907 INIT_LIST_HEAD(&update->conflicts);
908 return update;
911 static void cleanup_ksplice_update(struct update *update)
913 list_del(&update->list);
914 cleanup_conflicts(update);
915 clear_debug_buf(update);
916 kfree(update->kid);
917 kfree(update->name);
918 kfree(update);
919 module_put(THIS_MODULE);
922 /* Clean up the update if it no longer has any packs */
923 static void maybe_cleanup_ksplice_update(struct update *update)
925 if (list_empty(&update->packs) && list_empty(&update->unused_packs))
926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
927 kobject_put(&update->kobj);
928 #else /* LINUX_VERSION_CODE < */
929 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
930 kobject_unregister(&update->kobj);
931 #endif /* LINUX_VERSION_CODE */
934 static void add_to_update(struct ksplice_pack *pack, struct update *update)
936 pack->update = update;
937 list_add(&pack->list, &update->unused_packs);
938 pack->module_list_entry.primary = pack->primary;
941 static int ksplice_sysfs_init(struct update *update)
943 int ret = 0;
944 memset(&update->kobj, 0, sizeof(update->kobj));
945 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
946 #ifndef KSPLICE_STANDALONE
947 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
948 ksplice_kobj, "%s", update->kid);
949 #else /* KSPLICE_STANDALONE */
950 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
951 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
952 &THIS_MODULE->mkobj.kobj, "ksplice");
953 #endif /* KSPLICE_STANDALONE */
954 #else /* LINUX_VERSION_CODE < */
955 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
956 if (ret != 0)
957 return ret;
958 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
959 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
960 #else /* LINUX_VERSION_CODE < */
961 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
962 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
963 #endif /* LINUX_VERSION_CODE */
964 update->kobj.ktype = &ksplice_ktype;
965 ret = kobject_register(&update->kobj);
966 #endif /* LINUX_VERSION_CODE */
967 if (ret != 0)
968 return ret;
969 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
970 kobject_uevent(&update->kobj, KOBJ_ADD);
971 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
972 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
973 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
974 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
975 #endif /* LINUX_VERSION_CODE */
976 return 0;
979 static abort_t apply_update(struct update *update)
981 struct ksplice_pack *pack, *n;
982 abort_t ret;
983 int retval;
985 list_for_each_entry_safe(pack, n, &update->unused_packs, list) {
986 if (strcmp(pack->target_name, "vmlinux") == 0) {
987 pack->target = NULL;
988 } else if (pack->target == NULL) {
989 pack->target = find_module(pack->target_name);
990 if (pack->target == NULL ||
991 !module_is_live(pack->target)) {
992 if (update->partial) {
993 continue;
994 } else {
995 ret = TARGET_NOT_LOADED;
996 goto out;
999 retval = use_module(pack->primary, pack->target);
1000 if (retval != 1) {
1001 ret = UNEXPECTED;
1002 goto out;
1005 list_del(&pack->list);
1006 list_add_tail(&pack->list, &update->packs);
1007 pack->module_list_entry.target = pack->target;
1009 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1010 if (pack->target == NULL) {
1011 apply_paravirt(pack->primary_parainstructions,
1012 pack->primary_parainstructions_end);
1013 apply_paravirt(pack->helper_parainstructions,
1014 pack->helper_parainstructions_end);
1016 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1019 list_for_each_entry(pack, &update->packs, list) {
1020 const struct ksplice_section *sect;
1021 for (sect = pack->primary_sections;
1022 sect < pack->primary_sections_end; sect++) {
1023 struct safety_record *rec = kmalloc(sizeof(*rec),
1024 GFP_KERNEL);
1025 if (rec == NULL) {
1026 ret = OUT_OF_MEMORY;
1027 goto out;
1029 rec->addr = sect->address;
1030 rec->size = sect->size;
1031 rec->label = sect->symbol->label;
1032 list_add(&rec->list, &pack->safety_records);
1036 list_for_each_entry(pack, &update->packs, list) {
1037 ret = init_symbol_arrays(pack);
1038 if (ret != OK) {
1039 cleanup_symbol_arrays(pack);
1040 goto out;
1042 ret = prepare_pack(pack);
1043 cleanup_symbol_arrays(pack);
1044 if (ret != OK)
1045 goto out;
1047 ret = apply_patches(update);
1048 out:
1049 list_for_each_entry(pack, &update->packs, list) {
1050 struct ksplice_section *s;
1051 if (update->stage == STAGE_PREPARING)
1052 clear_list(&pack->safety_records, struct safety_record,
1053 list);
1054 for (s = pack->helper_sections; s < pack->helper_sections_end;
1055 s++) {
1056 if (s->match_map != NULL) {
1057 vfree(s->match_map);
1058 s->match_map = NULL;
1062 return ret;
1065 static int compare_symbolp_names(const void *a, const void *b)
1067 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1068 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1069 return 0;
1070 if ((*sympa)->name == NULL)
1071 return -1;
1072 if ((*sympb)->name == NULL)
1073 return 1;
1074 return strcmp((*sympa)->name, (*sympb)->name);
1077 static int compare_symbolp_labels(const void *a, const void *b)
1079 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1080 return strcmp((*sympa)->label, (*sympb)->label);
1083 static int symbolp_bsearch_compare(const void *key, const void *elt)
1085 const char *name = key;
1086 const struct ksplice_symbol *const *symp = elt;
1087 const struct ksplice_symbol *sym = *symp;
1088 if (sym->name == NULL)
1089 return 1;
1090 return strcmp(name, sym->name);
1093 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1094 const char *sym_name, unsigned long sym_val)
1096 struct ksplice_symbol **symp;
1097 abort_t ret;
1099 symp = bsearch(sym_name, lookup->arr, lookup->size,
1100 sizeof(*lookup->arr), symbolp_bsearch_compare);
1101 if (symp == NULL)
1102 return OK;
1104 while (symp > lookup->arr &&
1105 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1106 symp--;
1108 for (; symp < lookup->arr + lookup->size; symp++) {
1109 struct ksplice_symbol *sym = *symp;
1110 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1111 break;
1112 ret = add_candidate_val(lookup->pack, sym->vals, sym_val);
1113 if (ret != OK)
1114 return ret;
1116 return OK;
1119 #ifdef CONFIG_KALLSYMS
1120 static int add_kallsyms_values(void *data, const char *name,
1121 struct module *owner, unsigned long val)
1123 struct ksplice_lookup *lookup = data;
1124 if (owner == lookup->pack->primary ||
1125 !patches_module(owner, lookup->pack->target))
1126 return (__force int)OK;
1127 return (__force int)add_matching_values(lookup, name, val);
1129 #endif /* CONFIG_KALLSYMS */
1131 static bool add_export_values(const struct symsearch *syms,
1132 struct module *owner,
1133 unsigned int symnum, void *data)
1135 struct ksplice_lookup *lookup = data;
1136 abort_t ret;
1138 ret = add_matching_values(lookup, syms->start[symnum].name,
1139 syms->start[symnum].value);
1140 if (ret != OK) {
1141 lookup->ret = ret;
1142 return true;
1144 return false;
1147 static void cleanup_symbol_arrays(struct ksplice_pack *pack)
1149 struct ksplice_symbol *sym;
1150 for (sym = pack->primary_symbols; sym < pack->primary_symbols_end;
1151 sym++) {
1152 if (sym->vals != NULL) {
1153 clear_list(sym->vals, struct candidate_val, list);
1154 kfree(sym->vals);
1155 sym->vals = NULL;
1158 for (sym = pack->helper_symbols; sym < pack->helper_symbols_end; sym++) {
1159 if (sym->vals != NULL) {
1160 clear_list(sym->vals, struct candidate_val, list);
1161 kfree(sym->vals);
1162 sym->vals = NULL;
1168 * The primary and helper modules each have their own independent
1169 * ksplice_symbol structures. uniquify_symbols unifies these separate
1170 * pieces of kernel symbol information by replacing all references to
1171 * the helper copy of symbols with references to the primary copy.
1173 static abort_t uniquify_symbols(struct ksplice_pack *pack)
1175 struct ksplice_reloc *r;
1176 struct ksplice_section *s;
1177 struct ksplice_symbol *sym, **sym_arr, **symp;
1178 size_t size = pack->primary_symbols_end - pack->primary_symbols;
1180 if (size == 0)
1181 return OK;
1183 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1184 if (sym_arr == NULL)
1185 return OUT_OF_MEMORY;
1187 for (symp = sym_arr, sym = pack->primary_symbols;
1188 symp < sym_arr + size && sym < pack->primary_symbols_end;
1189 sym++, symp++)
1190 *symp = sym;
1192 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1194 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1195 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1196 compare_symbolp_labels);
1197 if (symp != NULL) {
1198 if ((*symp)->name == NULL)
1199 (*symp)->name = r->symbol->name;
1200 r->symbol = *symp;
1204 for (s = pack->helper_sections; s < pack->helper_sections_end; s++) {
1205 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1206 compare_symbolp_labels);
1207 if (symp != NULL) {
1208 if ((*symp)->name == NULL)
1209 (*symp)->name = s->symbol->name;
1210 s->symbol = *symp;
1214 vfree(sym_arr);
1215 return OK;
1219 * Initialize the ksplice_symbol structures in the given array using
1220 * the kallsyms and exported symbol tables.
1222 static abort_t init_symbol_array(struct ksplice_pack *pack,
1223 struct ksplice_symbol *start,
1224 struct ksplice_symbol *end)
1226 struct ksplice_symbol *sym, **sym_arr, **symp;
1227 struct ksplice_lookup lookup;
1228 size_t size = end - start;
1229 abort_t ret;
1231 if (size == 0)
1232 return OK;
1234 for (sym = start; sym < end; sym++) {
1235 if (starts_with(sym->label, "__ksymtab")) {
1236 const struct kernel_symbol *ksym;
1237 const char *colon = strchr(sym->label, ':');
1238 const char *name = colon + 1;
1239 if (colon == NULL)
1240 continue;
1241 ksym = find_symbol(name, NULL, NULL, true, false);
1242 if (ksym == NULL) {
1243 ksdebug(pack, "Could not find kernel_symbol "
1244 "structure for %s\n", name);
1245 continue;
1247 sym->value = (unsigned long)ksym;
1248 sym->vals = NULL;
1249 continue;
1252 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1253 if (sym->vals == NULL)
1254 return OUT_OF_MEMORY;
1255 INIT_LIST_HEAD(sym->vals);
1256 sym->value = 0;
1259 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1260 if (sym_arr == NULL)
1261 return OUT_OF_MEMORY;
1263 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1264 sym++, symp++)
1265 *symp = sym;
1267 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1269 lookup.pack = pack;
1270 lookup.arr = sym_arr;
1271 lookup.size = size;
1272 lookup.ret = OK;
1274 each_symbol(add_export_values, &lookup);
1275 ret = lookup.ret;
1276 #ifdef CONFIG_KALLSYMS
1277 if (ret == OK)
1278 ret = (__force abort_t)
1279 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1280 #endif /* CONFIG_KALLSYMS */
1281 vfree(sym_arr);
1282 return ret;
1285 /* Prepare the pack's ksplice_symbol structures for run-pre matching */
1286 static abort_t init_symbol_arrays(struct ksplice_pack *pack)
1288 abort_t ret;
1290 ret = uniquify_symbols(pack);
1291 if (ret != OK)
1292 return ret;
1294 ret = init_symbol_array(pack, pack->helper_symbols,
1295 pack->helper_symbols_end);
1296 if (ret != OK)
1297 return ret;
1299 ret = init_symbol_array(pack, pack->primary_symbols,
1300 pack->primary_symbols_end);
1301 if (ret != OK)
1302 return ret;
1304 return OK;
1307 static abort_t prepare_pack(struct ksplice_pack *pack)
1309 abort_t ret;
1311 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1312 ret = match_pack_sections(pack, false);
1313 if (ret == NO_MATCH) {
1314 /* It is possible that by using relocations from .data sections
1315 * we can successfully run-pre match the rest of the sections.
1316 * To avoid using any symbols obtained from .data sections
1317 * (which may be unreliable) in the post code, we first prepare
1318 * the post code and then try to run-pre match the remaining
1319 * sections with the help of .data sections.
1321 ksdebug(pack, "Continuing without some sections; we might "
1322 "find them later.\n");
1323 ret = finalize_pack(pack);
1324 if (ret != OK) {
1325 ksdebug(pack, "Aborted. Unable to continue without "
1326 "the unmatched sections.\n");
1327 return ret;
1330 ksdebug(pack, "run-pre: Considering .data sections to find the "
1331 "unmatched sections\n");
1332 ret = match_pack_sections(pack, true);
1333 if (ret != OK)
1334 return ret;
1336 ksdebug(pack, "run-pre: Found all previously unmatched "
1337 "sections\n");
1338 return OK;
1339 } else if (ret != OK) {
1340 return ret;
1343 return finalize_pack(pack);
1347 * Finish preparing the pack for insertion into the kernel.
1348 * Afterwards, the replacement code should be ready to run and the
1349 * ksplice_patches should all be ready for trampoline insertion.
1351 static abort_t finalize_pack(struct ksplice_pack *pack)
1353 abort_t ret;
1354 ret = apply_relocs(pack, pack->primary_relocs,
1355 pack->primary_relocs_end);
1356 if (ret != OK)
1357 return ret;
1359 ret = finalize_patches(pack);
1360 if (ret != OK)
1361 return ret;
1363 return OK;
1366 static abort_t finalize_patches(struct ksplice_pack *pack)
1368 struct ksplice_patch *p;
1369 struct safety_record *rec;
1370 abort_t ret;
1372 for (p = pack->patches; p < pack->patches_end; p++) {
1373 bool found = false;
1374 list_for_each_entry(rec, &pack->safety_records, list) {
1375 if (rec->addr <= p->oldaddr &&
1376 p->oldaddr < rec->addr + rec->size) {
1377 found = true;
1378 break;
1381 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1382 const struct ksplice_reloc *r = patch_reloc(pack, p);
1383 if (r == NULL) {
1384 ksdebug(pack, "A patch with no ksplice_reloc at"
1385 " its oldaddr has no safety record\n");
1386 return NO_MATCH;
1388 ksdebug(pack, "No safety record for patch with oldaddr "
1389 "%s+%lx\n", r->symbol->label, r->target_addend);
1390 return NO_MATCH;
1393 if (p->type == KSPLICE_PATCH_TEXT) {
1394 ret = prepare_trampoline(pack, p);
1395 if (ret != OK)
1396 return ret;
1399 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1400 ksdebug(pack, "Safety record %s is too short for "
1401 "patch\n", rec->label);
1402 return UNEXPECTED;
1405 if (p->type == KSPLICE_PATCH_TEXT) {
1406 if (p->repladdr == 0)
1407 p->repladdr = (unsigned long)ksplice_deleted;
1410 return OK;
1413 static abort_t map_trampoline_pages(struct update *update)
1415 struct ksplice_pack *pack;
1416 list_for_each_entry(pack, &update->packs, list) {
1417 struct ksplice_patch *p;
1418 for (p = pack->patches; p < pack->patches_end; p++) {
1419 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1420 if (p->vaddr == NULL) {
1421 ksdebug(pack, "Unable to map oldaddr read/write"
1422 "\n");
1423 unmap_trampoline_pages(update);
1424 return UNEXPECTED;
1428 return OK;
1431 static void unmap_trampoline_pages(struct update *update)
1433 struct ksplice_pack *pack;
1434 list_for_each_entry(pack, &update->packs, list) {
1435 struct ksplice_patch *p;
1436 for (p = pack->patches; p < pack->patches_end; p++) {
1437 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1438 p->vaddr = NULL;
1444 * map_writable creates a shadow page mapping of the range
1445 * [addr, addr + len) so that we can write to code mapped read-only.
1447 * It is similar to a generalized version of x86's text_poke. But
1448 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1449 * map_writable to map the pages before stop_machine, then use the
1450 * mapping inside stop_machine, and unmap the pages afterwards.
1452 static void *map_writable(void *addr, size_t len)
1454 void *vaddr;
1455 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1456 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1457 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1458 int i;
1460 if (pages == NULL)
1461 return NULL;
1463 for (i = 0; i < nr_pages; i++) {
1464 if (__module_text_address((unsigned long)page_addr) == NULL &&
1465 __module_data_address((unsigned long)page_addr) == NULL) {
1466 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1467 pages[i] = virt_to_page(page_addr);
1468 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1469 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1470 pages[i] =
1471 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1472 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1473 WARN_ON(!PageReserved(pages[i]));
1474 } else {
1475 pages[i] = vmalloc_to_page(addr);
1477 if (pages[i] == NULL) {
1478 kfree(pages);
1479 return NULL;
1481 page_addr += PAGE_SIZE;
1483 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1484 kfree(pages);
1485 if (vaddr == NULL)
1486 return NULL;
1487 return vaddr + offset_in_page(addr);
1491 * Ksplice adds a dependency on any symbol address used to resolve relocations
1492 * in the primary module.
1494 * Be careful to follow_trampolines so that we always depend on the
1495 * latest version of the target function, since that's the code that
1496 * will run if we call addr.
1498 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
1499 unsigned long addr)
1501 struct ksplice_pack *p;
1502 struct module *m =
1503 __module_text_address(follow_trampolines(pack, addr));
1504 if (m == NULL)
1505 return OK;
1506 list_for_each_entry(p, &pack->update->packs, list) {
1507 if (m == p->primary)
1508 return OK;
1510 if (use_module(pack->primary, m) != 1)
1511 return MODULE_BUSY;
1512 return OK;
1515 static abort_t apply_relocs(struct ksplice_pack *pack,
1516 const struct ksplice_reloc *relocs,
1517 const struct ksplice_reloc *relocs_end)
1519 const struct ksplice_reloc *r;
1520 for (r = relocs; r < relocs_end; r++) {
1521 abort_t ret = apply_reloc(pack, r);
1522 if (ret != OK)
1523 return ret;
1525 return OK;
1528 static abort_t apply_reloc(struct ksplice_pack *pack,
1529 const struct ksplice_reloc *r)
1531 switch (r->howto->type) {
1532 case KSPLICE_HOWTO_RELOC:
1533 case KSPLICE_HOWTO_RELOC_PATCH:
1534 return apply_howto_reloc(pack, r);
1535 case KSPLICE_HOWTO_DATE:
1536 case KSPLICE_HOWTO_TIME:
1537 return apply_howto_date(pack, r);
1538 default:
1539 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
1540 return UNEXPECTED;
1545 * Applies a relocation. Aborts if the symbol referenced in it has
1546 * not been uniquely resolved.
1548 static abort_t apply_howto_reloc(struct ksplice_pack *pack,
1549 const struct ksplice_reloc *r)
1551 abort_t ret;
1552 int canary_ret;
1553 unsigned long sym_addr;
1554 LIST_HEAD(vals);
1556 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
1557 if (canary_ret < 0)
1558 return UNEXPECTED;
1559 if (canary_ret == 0) {
1560 ksdebug(pack, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1561 r->blank_addr, r->symbol->label, r->target_addend);
1562 return OK;
1565 #ifdef KSPLICE_STANDALONE
1566 if (!bootstrapped) {
1567 ret = add_system_map_candidates(pack,
1568 pack->primary_system_map,
1569 pack->primary_system_map_end,
1570 r->symbol->label, &vals);
1571 if (ret != OK) {
1572 release_vals(&vals);
1573 return ret;
1576 #endif /* KSPLICE_STANDALONE */
1577 ret = lookup_symbol(pack, r->symbol, &vals);
1578 if (ret != OK) {
1579 release_vals(&vals);
1580 return ret;
1583 * Relocations for the oldaddr fields of patches must have
1584 * been resolved via run-pre matching.
1586 if (!singular(&vals) || (r->symbol->vals != NULL &&
1587 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1588 release_vals(&vals);
1589 ksdebug(pack, "Failed to find %s for reloc\n",
1590 r->symbol->label);
1591 return FAILED_TO_FIND;
1593 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1594 release_vals(&vals);
1596 ret = write_reloc_value(pack, r, r->blank_addr,
1597 r->howto->pcrel ? sym_addr - r->blank_addr :
1598 sym_addr);
1599 if (ret != OK)
1600 return ret;
1602 ksdebug(pack, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1603 r->symbol->label, r->target_addend, sym_addr);
1604 switch (r->howto->size) {
1605 case 1:
1606 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1607 break;
1608 case 2:
1609 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1610 break;
1611 case 4:
1612 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1613 break;
1614 #if BITS_PER_LONG >= 64
1615 case 8:
1616 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1617 break;
1618 #endif /* BITS_PER_LONG */
1619 default:
1620 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1621 return UNEXPECTED;
1623 #ifdef KSPLICE_STANDALONE
1624 if (!bootstrapped)
1625 return OK;
1626 #endif /* KSPLICE_STANDALONE */
1629 * Create labelvals so that we can verify our choices in the
1630 * second round of run-pre matching that considers data sections.
1632 ret = create_labelval(pack, r->symbol, sym_addr, VAL);
1633 if (ret != OK)
1634 return ret;
1636 return add_dependency_on_address(pack, sym_addr);
1640 * Date relocations are created wherever __DATE__ or __TIME__ is used
1641 * in the kernel; we resolve them by simply copying in the date/time
1642 * obtained from run-pre matching the relevant compilation unit.
1644 static abort_t apply_howto_date(struct ksplice_pack *pack,
1645 const struct ksplice_reloc *r)
1647 if (r->symbol->vals != NULL) {
1648 ksdebug(pack, "Failed to find %s for date\n", r->symbol->label);
1649 return FAILED_TO_FIND;
1651 memcpy((unsigned char *)r->blank_addr,
1652 (const unsigned char *)r->symbol->value, r->howto->size);
1653 return OK;
1657 * Given a relocation and its run address, compute the address of the
1658 * symbol the relocation referenced, and store it in *valp.
1660 static abort_t read_reloc_value(struct ksplice_pack *pack,
1661 const struct ksplice_reloc *r,
1662 unsigned long addr, unsigned long *valp)
1664 unsigned char bytes[sizeof(long)];
1665 unsigned long val;
1666 const struct ksplice_reloc_howto *howto = r->howto;
1668 if (howto->size <= 0 || howto->size > sizeof(long)) {
1669 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1670 return UNEXPECTED;
1673 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1674 return NO_MATCH;
1676 switch (howto->size) {
1677 case 1:
1678 val = *(uint8_t *)bytes;
1679 break;
1680 case 2:
1681 val = *(uint16_t *)bytes;
1682 break;
1683 case 4:
1684 val = *(uint32_t *)bytes;
1685 break;
1686 #if BITS_PER_LONG >= 64
1687 case 8:
1688 val = *(uint64_t *)bytes;
1689 break;
1690 #endif /* BITS_PER_LONG */
1691 default:
1692 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1693 return UNEXPECTED;
1696 val &= howto->dst_mask;
1697 if (howto->signed_addend)
1698 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1699 val <<= howto->rightshift;
1700 val -= r->insn_addend + r->target_addend;
1701 *valp = val;
1702 return OK;
1706 * Given a relocation, the address of its storage unit, and the
1707 * address of the symbol the relocation references, write the
1708 * relocation's final value into the storage unit.
1710 static abort_t write_reloc_value(struct ksplice_pack *pack,
1711 const struct ksplice_reloc *r,
1712 unsigned long addr, unsigned long sym_addr)
1714 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1715 const struct ksplice_reloc_howto *howto = r->howto;
1716 val >>= howto->rightshift;
1717 switch (howto->size) {
1718 case 1:
1719 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1720 (val & howto->dst_mask);
1721 break;
1722 case 2:
1723 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1724 (val & howto->dst_mask);
1725 break;
1726 case 4:
1727 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1728 (val & howto->dst_mask);
1729 break;
1730 #if BITS_PER_LONG >= 64
1731 case 8:
1732 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1733 (val & howto->dst_mask);
1734 break;
1735 #endif /* BITS_PER_LONG */
1736 default:
1737 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1738 return UNEXPECTED;
1741 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1742 ksdebug(pack, "Aborted. Relocation overflow.\n");
1743 return UNEXPECTED;
1746 return OK;
1749 /* Replacement address used for functions deleted by the patch */
1750 static void __attribute__((noreturn)) ksplice_deleted(void)
1752 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1753 BUG();
1754 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1755 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1756 for (;;);
1757 #endif
1760 /* Floodfill to run-pre match the sections within a pack. */
1761 static abort_t match_pack_sections(struct ksplice_pack *pack,
1762 bool consider_data_sections)
1764 struct ksplice_section *sect;
1765 abort_t ret;
1766 int remaining = 0;
1767 bool progress;
1769 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1770 sect++) {
1771 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1772 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1773 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1774 remaining++;
1777 while (remaining > 0) {
1778 progress = false;
1779 for (sect = pack->helper_sections;
1780 sect < pack->helper_sections_end; sect++) {
1781 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1782 continue;
1783 if ((!consider_data_sections &&
1784 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1785 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1786 continue;
1787 ret = find_section(pack, sect);
1788 if (ret == OK) {
1789 sect->flags |= KSPLICE_SECTION_MATCHED;
1790 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1791 remaining--;
1792 progress = true;
1793 } else if (ret != NO_MATCH) {
1794 return ret;
1798 if (progress)
1799 continue;
1801 for (sect = pack->helper_sections;
1802 sect < pack->helper_sections_end; sect++) {
1803 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1804 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1805 continue;
1806 ksdebug(pack, "run-pre: could not match %s "
1807 "section %s\n",
1808 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1809 "data" :
1810 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1811 "rodata" : "text", sect->symbol->label);
1813 ksdebug(pack, "Aborted. run-pre: could not match some "
1814 "sections.\n");
1815 return NO_MATCH;
1817 return OK;
1821 * Search for the section in the running kernel. Returns OK if and
1822 * only if it finds precisely one address in the kernel matching the
1823 * section.
1825 static abort_t find_section(struct ksplice_pack *pack,
1826 struct ksplice_section *sect)
1828 int i;
1829 abort_t ret;
1830 unsigned long run_addr;
1831 LIST_HEAD(vals);
1832 struct candidate_val *v, *n;
1834 #ifdef KSPLICE_STANDALONE
1835 ret = add_system_map_candidates(pack, pack->helper_system_map,
1836 pack->helper_system_map_end,
1837 sect->symbol->label, &vals);
1838 if (ret != OK) {
1839 release_vals(&vals);
1840 return ret;
1842 #endif /* KSPLICE_STANDALONE */
1843 ret = lookup_symbol(pack, sect->symbol, &vals);
1844 if (ret != OK) {
1845 release_vals(&vals);
1846 return ret;
1849 ksdebug(pack, "run-pre: starting sect search for %s\n",
1850 sect->symbol->label);
1852 list_for_each_entry_safe(v, n, &vals, list) {
1853 run_addr = v->val;
1855 yield();
1856 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1857 if (ret == NO_MATCH) {
1858 list_del(&v->list);
1859 kfree(v);
1860 } else if (ret != OK) {
1861 release_vals(&vals);
1862 return ret;
1866 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1867 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1868 ret = brute_search_all(pack, sect, &vals);
1869 if (ret != OK) {
1870 release_vals(&vals);
1871 return ret;
1874 * Make sure run-pre matching output is displayed if
1875 * brute_search succeeds.
1877 if (singular(&vals)) {
1878 run_addr = list_entry(vals.next, struct candidate_val,
1879 list)->val;
1880 ret = try_addr(pack, sect, run_addr, NULL,
1881 RUN_PRE_INITIAL);
1882 if (ret != OK) {
1883 ksdebug(pack, "run-pre: Debug run failed for "
1884 "sect %s:\n", sect->symbol->label);
1885 release_vals(&vals);
1886 return ret;
1890 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1892 if (singular(&vals)) {
1893 LIST_HEAD(safety_records);
1894 run_addr = list_entry(vals.next, struct candidate_val,
1895 list)->val;
1896 ret = try_addr(pack, sect, run_addr, &safety_records,
1897 RUN_PRE_FINAL);
1898 release_vals(&vals);
1899 if (ret != OK) {
1900 clear_list(&safety_records, struct safety_record, list);
1901 ksdebug(pack, "run-pre: Final run failed for sect "
1902 "%s:\n", sect->symbol->label);
1903 } else {
1904 list_splice(&safety_records, &pack->safety_records);
1906 return ret;
1907 } else if (!list_empty(&vals)) {
1908 struct candidate_val *val;
1909 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1910 sect->symbol->label);
1911 i = 0;
1912 list_for_each_entry(val, &vals, list) {
1913 i++;
1914 ksdebug(pack, "%lx\n", val->val);
1915 if (i > 5) {
1916 ksdebug(pack, "...\n");
1917 break;
1920 release_vals(&vals);
1921 return NO_MATCH;
1923 release_vals(&vals);
1924 return NO_MATCH;
1928 * try_addr is the the interface to run-pre matching. Its primary
1929 * purpose is to manage debugging information for run-pre matching;
1930 * all the hard work is in run_pre_cmp.
1932 static abort_t try_addr(struct ksplice_pack *pack,
1933 struct ksplice_section *sect,
1934 unsigned long run_addr,
1935 struct list_head *safety_records,
1936 enum run_pre_mode mode)
1938 abort_t ret;
1939 const struct module *run_module;
1941 if ((sect->flags & KSPLICE_SECTION_RODATA) != 0 ||
1942 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1943 run_module = __module_data_address(run_addr);
1944 else
1945 run_module = __module_text_address(run_addr);
1946 if (run_module == pack->primary) {
1947 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1948 "module %s for sect %s\n", run_addr, run_module->name,
1949 sect->symbol->label);
1950 return UNEXPECTED;
1952 if (!patches_module(run_module, pack->target)) {
1953 ksdebug(pack, "run-pre: ignoring address %lx in other module "
1954 "%s for sect %s\n", run_addr, run_module == NULL ?
1955 "vmlinux" : run_module->name, sect->symbol->label);
1956 return NO_MATCH;
1959 ret = create_labelval(pack, sect->symbol, run_addr, TEMP);
1960 if (ret != OK)
1961 return ret;
1963 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1964 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1965 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1966 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1967 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
1968 mode);
1969 else
1970 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1971 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1972 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
1973 set_temp_labelvals(pack, NOVAL);
1974 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
1975 "p_a=%lx s=%lx)\n",
1976 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
1977 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
1978 "text", sect->symbol->label, run_addr, sect->address,
1979 sect->size);
1980 ksdebug(pack, "run-pre: ");
1981 if (pack->update->debug >= 1) {
1982 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1983 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
1984 RUN_PRE_DEBUG);
1985 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1986 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1987 ret = arch_run_pre_cmp(pack, sect, run_addr,
1988 safety_records,
1989 RUN_PRE_DEBUG);
1990 else
1991 ret = run_pre_cmp(pack, sect, run_addr,
1992 safety_records,
1993 RUN_PRE_DEBUG);
1994 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1995 set_temp_labelvals(pack, NOVAL);
1997 ksdebug(pack, "\n");
1998 return ret;
1999 } else if (ret != OK) {
2000 set_temp_labelvals(pack, NOVAL);
2001 return ret;
2004 if (mode != RUN_PRE_FINAL) {
2005 set_temp_labelvals(pack, NOVAL);
2006 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
2007 sect->symbol->label, run_addr);
2008 return OK;
2011 set_temp_labelvals(pack, VAL);
2012 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2013 run_addr);
2014 return OK;
2018 * run_pre_cmp is the primary run-pre matching function; it determines
2019 * whether the given ksplice_section matches the code or data in the
2020 * running kernel starting at run_addr.
2022 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2023 * section is created.
2025 * The run_pre_mode is also used to determine what debugging
2026 * information to display.
2028 static abort_t run_pre_cmp(struct ksplice_pack *pack,
2029 const struct ksplice_section *sect,
2030 unsigned long run_addr,
2031 struct list_head *safety_records,
2032 enum run_pre_mode mode)
2034 int matched = 0;
2035 abort_t ret;
2036 const struct ksplice_reloc *r, *finger;
2037 const unsigned char *pre, *run, *pre_start, *run_start;
2038 unsigned char runval;
2040 pre_start = (const unsigned char *)sect->address;
2041 run_start = (const unsigned char *)run_addr;
2043 finger = init_reloc_search(pack, sect);
2045 pre = pre_start;
2046 run = run_start;
2047 while (pre < pre_start + sect->size) {
2048 unsigned long offset = pre - pre_start;
2049 ret = lookup_reloc(pack, &finger, (unsigned long)pre, &r);
2050 if (ret == OK) {
2051 ret = handle_reloc(pack, sect, r, (unsigned long)run,
2052 mode);
2053 if (ret != OK) {
2054 if (mode == RUN_PRE_INITIAL)
2055 ksdebug(pack, "reloc in sect does not "
2056 "match after %lx/%lx bytes\n",
2057 offset, sect->size);
2058 return ret;
2060 if (mode == RUN_PRE_DEBUG)
2061 print_bytes(pack, run, r->howto->size, pre,
2062 r->howto->size);
2063 pre += r->howto->size;
2064 run += r->howto->size;
2065 finger++;
2066 continue;
2067 } else if (ret != NO_MATCH) {
2068 return ret;
2071 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2072 ret = handle_paravirt(pack, (unsigned long)pre,
2073 (unsigned long)run, &matched);
2074 if (ret != OK)
2075 return ret;
2076 if (matched != 0) {
2077 if (mode == RUN_PRE_DEBUG)
2078 print_bytes(pack, run, matched, pre,
2079 matched);
2080 pre += matched;
2081 run += matched;
2082 continue;
2086 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2087 if (mode == RUN_PRE_INITIAL)
2088 ksdebug(pack, "sect unmapped after %lx/%lx "
2089 "bytes\n", offset, sect->size);
2090 return NO_MATCH;
2093 if (runval != *pre &&
2094 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2095 if (mode == RUN_PRE_INITIAL)
2096 ksdebug(pack, "sect does not match after "
2097 "%lx/%lx bytes\n", offset, sect->size);
2098 if (mode == RUN_PRE_DEBUG) {
2099 print_bytes(pack, run, 1, pre, 1);
2100 ksdebug(pack, "[p_o=%lx] ! ", offset);
2101 print_bytes(pack, run + 1, 2, pre + 1, 2);
2103 return NO_MATCH;
2105 if (mode == RUN_PRE_DEBUG)
2106 print_bytes(pack, run, 1, pre, 1);
2107 pre++;
2108 run++;
2110 return create_safety_record(pack, sect, safety_records, run_addr,
2111 run - run_start);
2114 static void print_bytes(struct ksplice_pack *pack,
2115 const unsigned char *run, int runc,
2116 const unsigned char *pre, int prec)
2118 int o;
2119 int matched = min(runc, prec);
2120 for (o = 0; o < matched; o++) {
2121 if (run[o] == pre[o])
2122 ksdebug(pack, "%02x ", run[o]);
2123 else
2124 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
2126 for (o = matched; o < runc; o++)
2127 ksdebug(pack, "%02x/ ", run[o]);
2128 for (o = matched; o < prec; o++)
2129 ksdebug(pack, "/%02x ", pre[o]);
2132 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2133 static abort_t brute_search(struct ksplice_pack *pack,
2134 struct ksplice_section *sect,
2135 const void *start, unsigned long len,
2136 struct list_head *vals)
2138 unsigned long addr;
2139 char run, pre;
2140 abort_t ret;
2142 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2143 addr++) {
2144 if (addr % 100000 == 0)
2145 yield();
2147 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2148 return OK;
2150 pre = *(const unsigned char *)(sect->address);
2152 if (run != pre)
2153 continue;
2155 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
2156 if (ret == OK) {
2157 ret = add_candidate_val(pack, vals, addr);
2158 if (ret != OK)
2159 return ret;
2160 } else if (ret != NO_MATCH) {
2161 return ret;
2165 return OK;
2168 static abort_t brute_search_all(struct ksplice_pack *pack,
2169 struct ksplice_section *sect,
2170 struct list_head *vals)
2172 struct module *m;
2173 abort_t ret = OK;
2174 int saved_debug;
2176 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
2177 saved_debug = pack->update->debug;
2178 pack->update->debug = 0;
2180 list_for_each_entry(m, &modules, list) {
2181 if (!patches_module(m, pack->target) || m == pack->primary)
2182 continue;
2183 ret = brute_search(pack, sect, m->module_core, m->core_size,
2184 vals);
2185 if (ret != OK)
2186 goto out;
2187 ret = brute_search(pack, sect, m->module_init, m->init_size,
2188 vals);
2189 if (ret != OK)
2190 goto out;
2193 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
2194 init_mm.end_code - init_mm.start_code, vals);
2196 out:
2197 pack->update->debug = saved_debug;
2198 return ret;
2200 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2202 struct range {
2203 unsigned long address;
2204 unsigned long size;
2207 static int reloc_bsearch_compare(const void *key, const void *elt)
2209 const struct range *range = key;
2210 const struct ksplice_reloc *r = elt;
2211 if (range->address + range->size <= r->blank_addr)
2212 return -1;
2213 if (range->address > r->blank_addr)
2214 return 1;
2215 return 0;
2218 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2219 const struct ksplice_reloc *end,
2220 unsigned long address,
2221 unsigned long size)
2223 const struct ksplice_reloc *r;
2224 struct range range = { address, size };
2225 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2226 reloc_bsearch_compare);
2227 if (r == NULL)
2228 return NULL;
2229 while (r > start && (r - 1)->blank_addr >= address)
2230 r--;
2231 return r;
2234 static const struct ksplice_reloc *
2235 init_reloc_search(struct ksplice_pack *pack, const struct ksplice_section *sect)
2237 const struct ksplice_reloc *r;
2238 r = find_reloc(pack->helper_relocs, pack->helper_relocs_end,
2239 sect->address, sect->size);
2240 if (r == NULL)
2241 return pack->helper_relocs_end;
2242 return r;
2246 * lookup_reloc implements an amortized O(1) lookup for the next
2247 * helper relocation. It must be called with a strictly increasing
2248 * sequence of addresses.
2250 * The fingerp is private data for lookup_reloc, and needs to have
2251 * been initialized as a pointer to the result of find_reloc (or
2252 * init_reloc_search).
2254 static abort_t lookup_reloc(struct ksplice_pack *pack,
2255 const struct ksplice_reloc **fingerp,
2256 unsigned long addr,
2257 const struct ksplice_reloc **relocp)
2259 const struct ksplice_reloc *r = *fingerp;
2260 int canary_ret;
2262 while (r < pack->helper_relocs_end &&
2263 addr >= r->blank_addr + r->howto->size &&
2264 !(addr == r->blank_addr && r->howto->size == 0))
2265 r++;
2266 *fingerp = r;
2267 if (r == pack->helper_relocs_end)
2268 return NO_MATCH;
2269 if (addr < r->blank_addr)
2270 return NO_MATCH;
2271 *relocp = r;
2272 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2273 return OK;
2275 canary_ret = contains_canary(pack, r->blank_addr, r->howto);
2276 if (canary_ret < 0)
2277 return UNEXPECTED;
2278 if (canary_ret == 0) {
2279 ksdebug(pack, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2280 "(altinstr)\n", r->blank_addr, r->symbol->label,
2281 r->target_addend);
2282 return NO_MATCH;
2284 if (addr != r->blank_addr) {
2285 ksdebug(pack, "Invalid nonzero relocation offset\n");
2286 return UNEXPECTED;
2288 return OK;
2291 static abort_t handle_reloc(struct ksplice_pack *pack,
2292 const struct ksplice_section *sect,
2293 const struct ksplice_reloc *r,
2294 unsigned long run_addr, enum run_pre_mode mode)
2296 switch (r->howto->type) {
2297 case KSPLICE_HOWTO_RELOC:
2298 return handle_howto_reloc(pack, sect, r, run_addr, mode);
2299 case KSPLICE_HOWTO_DATE:
2300 case KSPLICE_HOWTO_TIME:
2301 return handle_howto_date(pack, sect, r, run_addr, mode);
2302 case KSPLICE_HOWTO_BUG:
2303 return handle_bug(pack, r, run_addr);
2304 case KSPLICE_HOWTO_EXTABLE:
2305 return handle_extable(pack, r, run_addr);
2306 default:
2307 ksdebug(pack, "Unexpected howto type %d\n", r->howto->type);
2308 return UNEXPECTED;
2313 * For date/time relocations, we check that the sequence of bytes
2314 * matches the format of a date or time.
2316 static abort_t handle_howto_date(struct ksplice_pack *pack,
2317 const struct ksplice_section *sect,
2318 const struct ksplice_reloc *r,
2319 unsigned long run_addr, enum run_pre_mode mode)
2321 abort_t ret;
2322 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2324 if (buf == NULL)
2325 return OUT_OF_MEMORY;
2326 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2327 ret = NO_MATCH;
2328 goto out;
2331 switch (r->howto->type) {
2332 case KSPLICE_HOWTO_TIME:
2333 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2334 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2335 isdigit(buf[6]) && isdigit(buf[7]))
2336 ret = OK;
2337 else
2338 ret = NO_MATCH;
2339 break;
2340 case KSPLICE_HOWTO_DATE:
2341 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2342 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2343 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2344 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2345 ret = OK;
2346 else
2347 ret = NO_MATCH;
2348 break;
2349 default:
2350 ret = UNEXPECTED;
2352 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2353 ksdebug(pack, "%s string: \"%.*s\" does not match format\n",
2354 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2355 r->howto->size, buf);
2357 if (ret != OK)
2358 goto out;
2359 ret = create_labelval(pack, r->symbol, run_addr, TEMP);
2360 out:
2361 kfree(buf);
2362 return ret;
2366 * Extract the value of a symbol used in a relocation in the pre code
2367 * during run-pre matching, giving an error if it conflicts with a
2368 * previously found value of that symbol
2370 static abort_t handle_howto_reloc(struct ksplice_pack *pack,
2371 const struct ksplice_section *sect,
2372 const struct ksplice_reloc *r,
2373 unsigned long run_addr,
2374 enum run_pre_mode mode)
2376 struct ksplice_section *sym_sect = symbol_section(pack, r->symbol);
2377 unsigned long offset = r->target_addend;
2378 unsigned long val;
2379 abort_t ret;
2381 ret = read_reloc_value(pack, r, run_addr, &val);
2382 if (ret != OK)
2383 return ret;
2384 if (r->howto->pcrel)
2385 val += run_addr;
2387 #ifdef KSPLICE_STANDALONE
2388 /* The match_map is only used in KSPLICE_STANDALONE */
2389 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2391 } else if (offset < 0 || offset >= sym_sect->size) {
2392 ksdebug(pack, "Out of range relocation: %s+%lx -> %s+%lx",
2393 sect->symbol->label, r->blank_addr - sect->address,
2394 r->symbol->label, offset);
2395 return NO_MATCH;
2396 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2397 sym_sect->match_map[offset] =
2398 (const unsigned char *)r->symbol->value + offset;
2399 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2400 r->symbol->value + offset) {
2402 } else if (sect == sym_sect) {
2403 ksdebug(pack, "Relocations to nonmatching locations within "
2404 "section %s: %lx does not match %lx\n",
2405 sect->symbol->label, offset,
2406 (unsigned long)sect->match_map[offset] -
2407 r->symbol->value);
2408 return NO_MATCH;
2409 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2410 if (mode == RUN_PRE_INITIAL)
2411 ksdebug(pack, "Delaying matching of %s due to reloc "
2412 "from to unmatching section: %s+%lx\n",
2413 sect->symbol->label, r->symbol->label, offset);
2414 return NO_MATCH;
2415 } else if (sym_sect->match_map[offset] == NULL) {
2416 if (mode == RUN_PRE_INITIAL)
2417 ksdebug(pack, "Relocation not to instruction boundary: "
2418 "%s+%lx -> %s+%lx", sect->symbol->label,
2419 r->blank_addr - sect->address, r->symbol->label,
2420 offset);
2421 return NO_MATCH;
2422 } else if ((unsigned long)sym_sect->match_map[offset] !=
2423 r->symbol->value + offset) {
2424 if (mode == RUN_PRE_INITIAL)
2425 ksdebug(pack, "Match map shift %s+%lx: %lx != %lx\n",
2426 r->symbol->label, offset,
2427 r->symbol->value + offset,
2428 (unsigned long)sym_sect->match_map[offset]);
2429 val += r->symbol->value + offset -
2430 (unsigned long)sym_sect->match_map[offset];
2432 #endif /* KSPLICE_STANDALONE */
2434 if (mode == RUN_PRE_INITIAL)
2435 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2436 "found %s = %lx\n", run_addr, r->blank_addr,
2437 r->symbol->label, offset, r->symbol->label, val);
2439 if (contains_canary(pack, run_addr, r->howto) != 0) {
2440 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
2441 "\n", run_addr);
2442 return UNEXPECTED;
2445 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2446 sect->symbol == r->symbol)
2447 return OK;
2448 ret = create_labelval(pack, r->symbol, val, TEMP);
2449 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2450 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2451 "= %lx does not match expected %lx\n", run_addr,
2452 r->blank_addr, r->symbol->label, r->symbol->value, val);
2454 if (ret != OK)
2455 return ret;
2456 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2457 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2458 if (mode == RUN_PRE_INITIAL)
2459 ksdebug(pack, "Recursively comparing string section "
2460 "%s\n", sym_sect->symbol->label);
2461 else if (mode == RUN_PRE_DEBUG)
2462 ksdebug(pack, "[str start] ");
2463 ret = run_pre_cmp(pack, sym_sect, val, NULL, mode);
2464 if (mode == RUN_PRE_DEBUG)
2465 ksdebug(pack, "[str end] ");
2466 if (ret == OK && mode == RUN_PRE_INITIAL)
2467 ksdebug(pack, "Successfully matched string section %s"
2468 "\n", sym_sect->symbol->label);
2469 else if (mode == RUN_PRE_INITIAL)
2470 ksdebug(pack, "Failed to match string section %s\n",
2471 sym_sect->symbol->label);
2473 return ret;
2476 static int symbol_section_bsearch_compare(const void *a, const void *b)
2478 const struct ksplice_symbol *sym = a;
2479 const struct ksplice_section *sect = b;
2480 return strcmp(sym->label, sect->symbol->label);
2483 static int compare_section_labels(const void *va, const void *vb)
2485 const struct ksplice_section *a = va, *b = vb;
2486 return strcmp(a->symbol->label, b->symbol->label);
2489 static struct ksplice_section *symbol_section(struct ksplice_pack *pack,
2490 const struct ksplice_symbol *sym)
2492 return bsearch(sym, pack->helper_sections, pack->helper_sections_end -
2493 pack->helper_sections, sizeof(struct ksplice_section),
2494 symbol_section_bsearch_compare);
2497 /* Find the relocation for the oldaddr of a ksplice_patch */
2498 static const struct ksplice_reloc *patch_reloc(struct ksplice_pack *pack,
2499 const struct ksplice_patch *p)
2501 unsigned long addr = (unsigned long)&p->oldaddr;
2502 const struct ksplice_reloc *r =
2503 find_reloc(pack->primary_relocs, pack->primary_relocs_end, addr,
2504 sizeof(addr));
2505 if (r == NULL || r->blank_addr < addr ||
2506 r->blank_addr >= addr + sizeof(addr))
2507 return NULL;
2508 return r;
2512 * Populates vals with the possible values for ksym from the various
2513 * sources Ksplice uses to resolve symbols
2515 static abort_t lookup_symbol(struct ksplice_pack *pack,
2516 const struct ksplice_symbol *ksym,
2517 struct list_head *vals)
2519 abort_t ret;
2521 #ifdef KSPLICE_STANDALONE
2522 if (!bootstrapped)
2523 return OK;
2524 #endif /* KSPLICE_STANDALONE */
2526 if (ksym->vals == NULL) {
2527 release_vals(vals);
2528 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
2529 ksym->value);
2530 return add_candidate_val(pack, vals, ksym->value);
2533 #ifdef CONFIG_MODULE_UNLOAD
2534 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2535 && pack->target->exit != NULL) {
2536 ret = add_candidate_val(pack, vals,
2537 (unsigned long)pack->target->exit);
2538 if (ret != OK)
2539 return ret;
2541 #endif
2543 if (ksym->name != NULL) {
2544 struct candidate_val *val;
2545 list_for_each_entry(val, ksym->vals, list) {
2546 ret = add_candidate_val(pack, vals, val->val);
2547 if (ret != OK)
2548 return ret;
2551 ret = new_export_lookup(pack, ksym->name, vals);
2552 if (ret != OK)
2553 return ret;
2556 return OK;
2559 #ifdef KSPLICE_STANDALONE
2560 static abort_t
2561 add_system_map_candidates(struct ksplice_pack *pack,
2562 const struct ksplice_system_map *start,
2563 const struct ksplice_system_map *end,
2564 const char *label, struct list_head *vals)
2566 abort_t ret;
2567 long off;
2568 int i;
2569 const struct ksplice_system_map *smap;
2571 /* Some Fedora kernel releases have System.map files whose symbol
2572 * addresses disagree with the running kernel by a constant address
2573 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2574 * values used to compile these kernels. This constant address offset
2575 * is always a multiple of 0x100000.
2577 * If we observe an offset that is NOT a multiple of 0x100000, then the
2578 * user provided us with an incorrect System.map file, and we should
2579 * abort.
2580 * If we observe an offset that is a multiple of 0x100000, then we can
2581 * adjust the System.map address values accordingly and proceed.
2583 off = (unsigned long)printk - pack->map_printk;
2584 if (off & 0xfffff) {
2585 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
2586 return BAD_SYSTEM_MAP;
2589 smap = bsearch(label, start, end - start, sizeof(*smap),
2590 system_map_bsearch_compare);
2591 if (smap == NULL)
2592 return OK;
2594 for (i = 0; i < smap->nr_candidates; i++) {
2595 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
2596 if (ret != OK)
2597 return ret;
2599 return OK;
2602 static int system_map_bsearch_compare(const void *key, const void *elt)
2604 const struct ksplice_system_map *map = elt;
2605 const char *label = key;
2606 return strcmp(label, map->label);
2608 #endif /* !KSPLICE_STANDALONE */
2611 * An update could one module to export a symbol and at the same time
2612 * change another module to use that symbol. This violates the normal
2613 * situation where the packs can be handled independently.
2615 * new_export_lookup obtains symbol values from the changes to the
2616 * exported symbol table made by other packs.
2618 static abort_t new_export_lookup(struct ksplice_pack *ipack, const char *name,
2619 struct list_head *vals)
2621 struct ksplice_pack *pack;
2622 struct ksplice_patch *p;
2623 list_for_each_entry(pack, &ipack->update->packs, list) {
2624 for (p = pack->patches; p < pack->patches_end; p++) {
2625 const struct kernel_symbol *sym;
2626 const struct ksplice_reloc *r;
2627 if (p->type != KSPLICE_PATCH_EXPORT ||
2628 strcmp(name, *(const char **)p->contents) != 0)
2629 continue;
2631 /* Check that the p->oldaddr reloc has been resolved. */
2632 r = patch_reloc(pack, p);
2633 if (r == NULL ||
2634 contains_canary(pack, r->blank_addr, r->howto) != 0)
2635 continue;
2636 sym = (const struct kernel_symbol *)r->symbol->value;
2639 * Check that the sym->value reloc has been resolved,
2640 * if there is a Ksplice relocation there.
2642 r = find_reloc(pack->primary_relocs,
2643 pack->primary_relocs_end,
2644 (unsigned long)&sym->value,
2645 sizeof(&sym->value));
2646 if (r != NULL &&
2647 r->blank_addr == (unsigned long)&sym->value &&
2648 contains_canary(pack, r->blank_addr, r->howto) != 0)
2649 continue;
2650 return add_candidate_val(ipack, vals, sym->value);
2653 return OK;
2657 * When apply_patches is called, the update should be fully prepared.
2658 * apply_patches will try to actually insert trampolines for the
2659 * update.
2661 static abort_t apply_patches(struct update *update)
2663 int i;
2664 abort_t ret;
2665 struct ksplice_pack *pack;
2667 ret = map_trampoline_pages(update);
2668 if (ret != OK)
2669 return ret;
2671 list_for_each_entry(pack, &update->packs, list) {
2672 const typeof(int (*)(void)) *f;
2673 for (f = pack->pre_apply; f < pack->pre_apply_end; f++) {
2674 if ((*f)() != 0) {
2675 ret = CALL_FAILED;
2676 goto out;
2681 for (i = 0; i < 5; i++) {
2682 cleanup_conflicts(update);
2683 #ifdef KSPLICE_STANDALONE
2684 bust_spinlocks(1);
2685 #endif /* KSPLICE_STANDALONE */
2686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2687 ret = (__force abort_t)stop_machine(__apply_patches, update,
2688 NULL);
2689 #else /* LINUX_VERSION_CODE < */
2690 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2691 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2692 NR_CPUS);
2693 #endif /* LINUX_VERSION_CODE */
2694 #ifdef KSPLICE_STANDALONE
2695 bust_spinlocks(0);
2696 #endif /* KSPLICE_STANDALONE */
2697 if (ret != CODE_BUSY)
2698 break;
2699 set_current_state(TASK_INTERRUPTIBLE);
2700 schedule_timeout(msecs_to_jiffies(1000));
2702 out:
2703 unmap_trampoline_pages(update);
2705 if (ret == CODE_BUSY) {
2706 print_conflicts(update);
2707 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2708 "code is busy.\n", update->kid);
2709 } else if (ret == ALREADY_REVERSED) {
2710 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2711 "reversed.\n", update->kid, update->kid);
2714 if (ret != OK) {
2715 list_for_each_entry(pack, &update->packs, list) {
2716 const typeof(void (*)(void)) *f;
2717 for (f = pack->fail_apply; f < pack->fail_apply_end;
2718 f++)
2719 (*f)();
2722 return ret;
2725 list_for_each_entry(pack, &update->packs, list) {
2726 const typeof(void (*)(void)) *f;
2727 for (f = pack->post_apply; f < pack->post_apply_end; f++)
2728 (*f)();
2731 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2732 update->kid);
2733 return OK;
2736 static abort_t reverse_patches(struct update *update)
2738 int i;
2739 abort_t ret;
2740 struct ksplice_pack *pack;
2742 clear_debug_buf(update);
2743 ret = init_debug_buf(update);
2744 if (ret != OK)
2745 return ret;
2747 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2749 ret = map_trampoline_pages(update);
2750 if (ret != OK)
2751 return ret;
2753 list_for_each_entry(pack, &update->packs, list) {
2754 const typeof(int (*)(void)) *f;
2755 for (f = pack->pre_reverse; f < pack->pre_reverse_end; f++) {
2756 if ((*f)() != 0) {
2757 ret = CALL_FAILED;
2758 goto out;
2763 for (i = 0; i < 5; i++) {
2764 cleanup_conflicts(update);
2765 clear_list(&update->conflicts, struct conflict, list);
2766 #ifdef KSPLICE_STANDALONE
2767 bust_spinlocks(1);
2768 #endif /* KSPLICE_STANDALONE */
2769 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2770 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2771 NULL);
2772 #else /* LINUX_VERSION_CODE < */
2773 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2774 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2775 update, NR_CPUS);
2776 #endif /* LINUX_VERSION_CODE */
2777 #ifdef KSPLICE_STANDALONE
2778 bust_spinlocks(0);
2779 #endif /* KSPLICE_STANDALONE */
2780 if (ret != CODE_BUSY)
2781 break;
2782 set_current_state(TASK_INTERRUPTIBLE);
2783 schedule_timeout(msecs_to_jiffies(1000));
2785 out:
2786 unmap_trampoline_pages(update);
2788 if (ret == CODE_BUSY) {
2789 print_conflicts(update);
2790 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2791 "code is busy.\n", update->kid);
2792 } else if (ret == MODULE_BUSY) {
2793 _ksdebug(update, "Update %s is in use by another module\n",
2794 update->kid);
2797 if (ret != OK) {
2798 list_for_each_entry(pack, &update->packs, list) {
2799 const typeof(void (*)(void)) *f;
2800 for (f = pack->fail_reverse; f < pack->fail_reverse_end;
2801 f++)
2802 (*f)();
2805 return ret;
2808 list_for_each_entry(pack, &update->packs, list) {
2809 const typeof(void (*)(void)) *f;
2810 for (f = pack->post_reverse; f < pack->post_reverse_end; f++)
2811 (*f)();
2814 list_for_each_entry(pack, &update->packs, list)
2815 clear_list(&pack->safety_records, struct safety_record, list);
2817 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2818 return OK;
2821 /* Atomically insert the update; run from within stop_machine */
2822 static int __apply_patches(void *updateptr)
2824 struct update *update = updateptr;
2825 struct ksplice_pack *pack;
2826 struct ksplice_patch *p;
2827 abort_t ret;
2829 if (update->stage == STAGE_APPLIED)
2830 return (__force int)OK;
2832 if (update->stage != STAGE_PREPARING)
2833 return (__force int)UNEXPECTED;
2835 ret = check_each_task(update);
2836 if (ret != OK)
2837 return (__force int)ret;
2839 list_for_each_entry(pack, &update->packs, list) {
2840 if (try_module_get(pack->primary) != 1) {
2841 struct ksplice_pack *pack1;
2842 list_for_each_entry(pack1, &update->packs, list) {
2843 if (pack1 == pack)
2844 break;
2845 module_put(pack1->primary);
2847 module_put(THIS_MODULE);
2848 return (__force int)UNEXPECTED;
2852 list_for_each_entry(pack, &update->packs, list) {
2853 const typeof(int (*)(void)) *f;
2854 for (f = pack->check_apply; f < pack->check_apply_end; f++)
2855 if ((*f)() != 0)
2856 return (__force int)CALL_FAILED;
2859 /* Commit point: the update application will succeed. */
2861 update->stage = STAGE_APPLIED;
2862 #ifdef TAINT_KSPLICE
2863 add_taint(TAINT_KSPLICE);
2864 #endif
2866 list_for_each_entry(pack, &update->packs, list)
2867 list_add(&pack->module_list_entry.list, &ksplice_module_list);
2869 list_for_each_entry(pack, &update->packs, list) {
2870 for (p = pack->patches; p < pack->patches_end; p++)
2871 insert_trampoline(p);
2874 list_for_each_entry(pack, &update->packs, list) {
2875 const typeof(void (*)(void)) *f;
2876 for (f = pack->apply; f < pack->apply_end; f++)
2877 (*f)();
2880 return (__force int)OK;
2883 /* Atomically remove the update; run from within stop_machine */
2884 static int __reverse_patches(void *updateptr)
2886 struct update *update = updateptr;
2887 struct ksplice_pack *pack;
2888 const struct ksplice_patch *p;
2889 abort_t ret;
2891 if (update->stage != STAGE_APPLIED)
2892 return (__force int)OK;
2894 #ifdef CONFIG_MODULE_UNLOAD
2895 list_for_each_entry(pack, &update->packs, list) {
2896 if (module_refcount(pack->primary) != 1)
2897 return (__force int)MODULE_BUSY;
2899 #endif /* CONFIG_MODULE_UNLOAD */
2901 ret = check_each_task(update);
2902 if (ret != OK)
2903 return (__force int)ret;
2905 list_for_each_entry(pack, &update->packs, list) {
2906 for (p = pack->patches; p < pack->patches_end; p++) {
2907 ret = verify_trampoline(pack, p);
2908 if (ret != OK)
2909 return (__force int)ret;
2913 list_for_each_entry(pack, &update->packs, list) {
2914 const typeof(int (*)(void)) *f;
2915 for (f = pack->check_reverse; f < pack->check_reverse_end; f++)
2916 if ((*f)() != 0)
2917 return (__force int)CALL_FAILED;
2920 /* Commit point: the update reversal will succeed. */
2922 update->stage = STAGE_REVERSED;
2924 list_for_each_entry(pack, &update->packs, list)
2925 module_put(pack->primary);
2927 list_for_each_entry(pack, &update->packs, list)
2928 list_del(&pack->module_list_entry.list);
2930 list_for_each_entry(pack, &update->packs, list) {
2931 const typeof(void (*)(void)) *f;
2932 for (f = pack->reverse; f < pack->reverse_end; f++)
2933 (*f)();
2936 list_for_each_entry(pack, &update->packs, list) {
2937 for (p = pack->patches; p < pack->patches_end; p++)
2938 remove_trampoline(p);
2941 return (__force int)OK;
2945 * Check whether any thread's instruction pointer or any address of
2946 * its stack is contained in one of the safety_records associated with
2947 * the update.
2949 * check_each_task must be called from inside stop_machine, because it
2950 * does not take tasklist_lock (which cannot be held by anyone else
2951 * during stop_machine).
2953 static abort_t check_each_task(struct update *update)
2955 const struct task_struct *g, *p;
2956 abort_t status = OK, ret;
2957 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2958 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2959 read_lock(&tasklist_lock);
2960 #endif /* LINUX_VERSION_CODE */
2961 do_each_thread(g, p) {
2962 /* do_each_thread is a double loop! */
2963 ret = check_task(update, p, false);
2964 if (ret != OK) {
2965 check_task(update, p, true);
2966 status = ret;
2968 if (ret != OK && ret != CODE_BUSY)
2969 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2970 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2971 goto out;
2972 #else /* LINUX_VERSION_CODE < */
2973 return ret;
2974 #endif /* LINUX_VERSION_CODE */
2975 } while_each_thread(g, p);
2976 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2977 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2978 out:
2979 read_unlock(&tasklist_lock);
2980 #endif /* LINUX_VERSION_CODE */
2981 return status;
2984 static abort_t check_task(struct update *update,
2985 const struct task_struct *t, bool rerun)
2987 abort_t status, ret;
2988 struct conflict *conf = NULL;
2990 if (rerun) {
2991 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
2992 if (conf == NULL)
2993 return OUT_OF_MEMORY;
2994 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
2995 if (conf->process_name == NULL) {
2996 kfree(conf);
2997 return OUT_OF_MEMORY;
2999 conf->pid = t->pid;
3000 INIT_LIST_HEAD(&conf->stack);
3001 list_add(&conf->list, &update->conflicts);
3004 status = check_address(update, conf, KSPLICE_IP(t));
3005 if (t == current) {
3006 ret = check_stack(update, conf, task_thread_info(t),
3007 (unsigned long *)__builtin_frame_address(0));
3008 if (status == OK)
3009 status = ret;
3010 } else if (!task_curr(t)) {
3011 ret = check_stack(update, conf, task_thread_info(t),
3012 (unsigned long *)KSPLICE_SP(t));
3013 if (status == OK)
3014 status = ret;
3015 } else if (!is_stop_machine(t)) {
3016 status = UNEXPECTED_RUNNING_TASK;
3018 return status;
3021 static abort_t check_stack(struct update *update, struct conflict *conf,
3022 const struct thread_info *tinfo,
3023 const unsigned long *stack)
3025 abort_t status = OK, ret;
3026 unsigned long addr;
3028 while (valid_stack_ptr(tinfo, stack)) {
3029 addr = *stack++;
3030 ret = check_address(update, conf, addr);
3031 if (ret != OK)
3032 status = ret;
3034 return status;
3037 static abort_t check_address(struct update *update,
3038 struct conflict *conf, unsigned long addr)
3040 abort_t status = OK, ret;
3041 const struct safety_record *rec;
3042 struct ksplice_pack *pack;
3043 struct conflict_addr *ca = NULL;
3045 if (conf != NULL) {
3046 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3047 if (ca == NULL)
3048 return OUT_OF_MEMORY;
3049 ca->addr = addr;
3050 ca->has_conflict = false;
3051 ca->label = NULL;
3052 list_add(&ca->list, &conf->stack);
3055 list_for_each_entry(pack, &update->packs, list) {
3056 unsigned long tramp_addr = follow_trampolines(pack, addr);
3057 list_for_each_entry(rec, &pack->safety_records, list) {
3058 ret = check_record(ca, rec, tramp_addr);
3059 if (ret != OK)
3060 status = ret;
3063 return status;
3066 static abort_t check_record(struct conflict_addr *ca,
3067 const struct safety_record *rec, unsigned long addr)
3069 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3070 if (ca != NULL) {
3071 ca->label = rec->label;
3072 ca->has_conflict = true;
3074 return CODE_BUSY;
3076 return OK;
3079 /* Is the task one of the stop_machine tasks? */
3080 static bool is_stop_machine(const struct task_struct *t)
3082 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3083 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3084 const char *kstop_prefix = "kstop/";
3085 #else /* LINUX_VERSION_CODE < */
3086 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3087 const char *kstop_prefix = "kstop";
3088 #endif /* LINUX_VERSION_CODE */
3089 const char *num;
3090 if (!starts_with(t->comm, kstop_prefix))
3091 return false;
3092 num = t->comm + strlen(kstop_prefix);
3093 return num[strspn(num, "0123456789")] == '\0';
3094 #else /* LINUX_VERSION_CODE < */
3095 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3096 return strcmp(t->comm, "kstopmachine") == 0;
3097 #endif /* LINUX_VERSION_CODE */
3100 static void cleanup_conflicts(struct update *update)
3102 struct conflict *conf;
3103 list_for_each_entry(conf, &update->conflicts, list) {
3104 clear_list(&conf->stack, struct conflict_addr, list);
3105 kfree(conf->process_name);
3107 clear_list(&update->conflicts, struct conflict, list);
3110 static void print_conflicts(struct update *update)
3112 const struct conflict *conf;
3113 const struct conflict_addr *ca;
3114 list_for_each_entry(conf, &update->conflicts, list) {
3115 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3116 conf->process_name);
3117 list_for_each_entry(ca, &conf->stack, list) {
3118 _ksdebug(update, " %lx", ca->addr);
3119 if (ca->has_conflict)
3120 _ksdebug(update, " [<-CONFLICT]");
3122 _ksdebug(update, "\n");
3126 static void insert_trampoline(struct ksplice_patch *p)
3128 mm_segment_t old_fs = get_fs();
3129 set_fs(KERNEL_DS);
3130 memcpy(p->saved, p->vaddr, p->size);
3131 memcpy(p->vaddr, p->contents, p->size);
3132 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3133 set_fs(old_fs);
3136 static abort_t verify_trampoline(struct ksplice_pack *pack,
3137 const struct ksplice_patch *p)
3139 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3140 ksdebug(pack, "Aborted. Trampoline at %lx has been "
3141 "overwritten.\n", p->oldaddr);
3142 return CODE_BUSY;
3144 return OK;
3147 static void remove_trampoline(const struct ksplice_patch *p)
3149 mm_segment_t old_fs = get_fs();
3150 set_fs(KERNEL_DS);
3151 memcpy(p->vaddr, p->saved, p->size);
3152 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3153 set_fs(old_fs);
3156 /* Returns NO_MATCH if there's already a labelval with a different value */
3157 static abort_t create_labelval(struct ksplice_pack *pack,
3158 struct ksplice_symbol *ksym,
3159 unsigned long val, int status)
3161 val = follow_trampolines(pack, val);
3162 if (ksym->vals == NULL)
3163 return ksym->value == val ? OK : NO_MATCH;
3165 ksym->value = val;
3166 if (status == TEMP) {
3167 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3168 if (lv == NULL)
3169 return OUT_OF_MEMORY;
3170 lv->symbol = ksym;
3171 lv->saved_vals = ksym->vals;
3172 list_add(&lv->list, &pack->temp_labelvals);
3174 ksym->vals = NULL;
3175 return OK;
3179 * Creates a new safety_record for a helper section based on its
3180 * ksplice_section and run-pre matching information.
3182 static abort_t create_safety_record(struct ksplice_pack *pack,
3183 const struct ksplice_section *sect,
3184 struct list_head *record_list,
3185 unsigned long run_addr,
3186 unsigned long run_size)
3188 struct safety_record *rec;
3189 struct ksplice_patch *p;
3191 if (record_list == NULL)
3192 return OK;
3194 for (p = pack->patches; p < pack->patches_end; p++) {
3195 const struct ksplice_reloc *r = patch_reloc(pack, p);
3196 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3197 break;
3199 if (p >= pack->patches_end)
3200 return OK;
3202 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3203 if (rec == NULL)
3204 return OUT_OF_MEMORY;
3206 * The helper might be unloaded when checking reversing
3207 * patches, so we need to kstrdup the label here.
3209 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3210 if (rec->label == NULL) {
3211 kfree(rec);
3212 return OUT_OF_MEMORY;
3214 rec->addr = run_addr;
3215 rec->size = run_size;
3217 list_add(&rec->list, record_list);
3218 return OK;
3221 static abort_t add_candidate_val(struct ksplice_pack *pack,
3222 struct list_head *vals, unsigned long val)
3224 struct candidate_val *tmp, *new;
3227 * Careful: follow trampolines before comparing values so that we do
3228 * not mistake the obsolete function for another copy of the function.
3230 val = follow_trampolines(pack, val);
3232 list_for_each_entry(tmp, vals, list) {
3233 if (tmp->val == val)
3234 return OK;
3236 new = kmalloc(sizeof(*new), GFP_KERNEL);
3237 if (new == NULL)
3238 return OUT_OF_MEMORY;
3239 new->val = val;
3240 list_add(&new->list, vals);
3241 return OK;
3244 static void release_vals(struct list_head *vals)
3246 clear_list(vals, struct candidate_val, list);
3250 * The temp_labelvals list is used to cache those temporary labelvals
3251 * that have been created to cross-check the symbol values obtained
3252 * from different relocations within a single section being matched.
3254 * If status is VAL, commit the temp_labelvals as final values.
3256 * If status is NOVAL, restore the list of possible values to the
3257 * ksplice_symbol, so that it no longer has a known value.
3259 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
3261 struct labelval *lv, *n;
3262 list_for_each_entry_safe(lv, n, &pack->temp_labelvals, list) {
3263 if (status == NOVAL) {
3264 lv->symbol->vals = lv->saved_vals;
3265 } else {
3266 release_vals(lv->saved_vals);
3267 kfree(lv->saved_vals);
3269 list_del(&lv->list);
3270 kfree(lv);
3274 /* Is there a Ksplice canary with given howto at blank_addr? */
3275 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
3276 const struct ksplice_reloc_howto *howto)
3278 switch (howto->size) {
3279 case 1:
3280 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3281 (KSPLICE_CANARY & howto->dst_mask);
3282 case 2:
3283 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3284 (KSPLICE_CANARY & howto->dst_mask);
3285 case 4:
3286 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3287 (KSPLICE_CANARY & howto->dst_mask);
3288 #if BITS_PER_LONG >= 64
3289 case 8:
3290 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3291 (KSPLICE_CANARY & howto->dst_mask);
3292 #endif /* BITS_PER_LONG */
3293 default:
3294 ksdebug(pack, "Aborted. Invalid relocation size.\n");
3295 return -1;
3300 * Compute the address of the code you would actually run if you were
3301 * to call the function at addr (i.e., follow the sequence of jumps
3302 * starting at addr)
3304 static unsigned long follow_trampolines(struct ksplice_pack *pack,
3305 unsigned long addr)
3307 unsigned long new_addr;
3308 struct module *m;
3310 while (1) {
3311 #ifdef KSPLICE_STANDALONE
3312 if (!bootstrapped)
3313 return addr;
3314 #endif /* KSPLICE_STANDALONE */
3315 if (!__kernel_text_address(addr) ||
3316 trampoline_target(pack, addr, &new_addr) != OK)
3317 return addr;
3318 m = __module_text_address(new_addr);
3319 if (m == NULL || m == pack->target ||
3320 !starts_with(m->name, "ksplice"))
3321 return addr;
3322 addr = new_addr;
3326 /* Does module a patch module b? */
3327 static bool patches_module(const struct module *a, const struct module *b)
3329 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3330 const char *name;
3331 if (a == b)
3332 return true;
3333 if (a == NULL || !starts_with(a->name, "ksplice_"))
3334 return false;
3335 name = a->name + strlen("ksplice_");
3336 name += strcspn(name, "_");
3337 if (name[0] != '_')
3338 return false;
3339 name++;
3340 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
3341 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3342 struct ksplice_module_list_entry *entry;
3343 if (a == b)
3344 return true;
3345 list_for_each_entry(entry, &ksplice_module_list, list) {
3346 if (entry->target == b && entry->primary == a)
3347 return true;
3349 return false;
3350 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3353 static bool starts_with(const char *str, const char *prefix)
3355 return strncmp(str, prefix, strlen(prefix)) == 0;
3358 static bool singular(struct list_head *list)
3360 return !list_empty(list) && list->next->next == list;
3363 static void *bsearch(const void *key, const void *base, size_t n,
3364 size_t size, int (*cmp)(const void *key, const void *elt))
3366 int start = 0, end = n - 1, mid, result;
3367 if (n == 0)
3368 return NULL;
3369 while (start <= end) {
3370 mid = (start + end) / 2;
3371 result = cmp(key, base + mid * size);
3372 if (result < 0)
3373 end = mid - 1;
3374 else if (result > 0)
3375 start = mid + 1;
3376 else
3377 return (void *)base + mid * size;
3379 return NULL;
3382 static int compare_relocs(const void *a, const void *b)
3384 const struct ksplice_reloc *ra = a, *rb = b;
3385 if (ra->blank_addr > rb->blank_addr)
3386 return 1;
3387 else if (ra->blank_addr < rb->blank_addr)
3388 return -1;
3389 else
3390 return ra->howto->size - rb->howto->size;
3393 #ifdef KSPLICE_STANDALONE
3394 static int compare_system_map(const void *a, const void *b)
3396 const struct ksplice_system_map *sa = a, *sb = b;
3397 return strcmp(sa->label, sb->label);
3399 #endif /* KSPLICE_STANDALONE */
3401 #ifdef CONFIG_DEBUG_FS
3402 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3403 /* Old kernels don't have debugfs_create_blob */
3404 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3405 size_t count, loff_t *ppos)
3407 struct debugfs_blob_wrapper *blob = file->private_data;
3408 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3409 blob->size);
3412 static int blob_open(struct inode *inode, struct file *file)
3414 if (inode->i_private)
3415 file->private_data = inode->i_private;
3416 return 0;
3419 static struct file_operations fops_blob = {
3420 .read = read_file_blob,
3421 .open = blob_open,
3424 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3425 struct dentry *parent,
3426 struct debugfs_blob_wrapper *blob)
3428 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3430 #endif /* LINUX_VERSION_CODE */
3432 static abort_t init_debug_buf(struct update *update)
3434 update->debug_blob.size = 0;
3435 update->debug_blob.data = NULL;
3436 update->debugfs_dentry =
3437 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3438 &update->debug_blob);
3439 if (update->debugfs_dentry == NULL)
3440 return OUT_OF_MEMORY;
3441 return OK;
3444 static void clear_debug_buf(struct update *update)
3446 if (update->debugfs_dentry == NULL)
3447 return;
3448 debugfs_remove(update->debugfs_dentry);
3449 update->debugfs_dentry = NULL;
3450 update->debug_blob.size = 0;
3451 vfree(update->debug_blob.data);
3452 update->debug_blob.data = NULL;
3455 static int _ksdebug(struct update *update, const char *fmt, ...)
3457 va_list args;
3458 unsigned long size, old_size, new_size;
3460 if (update->debug == 0)
3461 return 0;
3463 /* size includes the trailing '\0' */
3464 va_start(args, fmt);
3465 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3466 va_end(args);
3467 old_size = update->debug_blob.size == 0 ? 0 :
3468 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3469 new_size = update->debug_blob.size + size == 0 ? 0 :
3470 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3471 if (new_size > old_size) {
3472 char *buf = vmalloc(new_size);
3473 if (buf == NULL)
3474 return -ENOMEM;
3475 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3476 vfree(update->debug_blob.data);
3477 update->debug_blob.data = buf;
3479 va_start(args, fmt);
3480 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3481 update->debug_blob.size,
3482 size, fmt, args);
3483 va_end(args);
3484 return 0;
3486 #else /* CONFIG_DEBUG_FS */
3487 static abort_t init_debug_buf(struct update *update)
3489 return OK;
3492 static void clear_debug_buf(struct update *update)
3494 return;
3497 static int _ksdebug(struct update *update, const char *fmt, ...)
3499 va_list args;
3501 if (update->debug == 0)
3502 return 0;
3504 if (!update->debug_continue_line)
3505 printk(KERN_DEBUG "ksplice: ");
3507 va_start(args, fmt);
3508 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3509 vprintk(fmt, args);
3510 #else /* LINUX_VERSION_CODE < */
3511 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3513 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3514 printk("%s", buf);
3515 kfree(buf);
3517 #endif /* LINUX_VERSION_CODE */
3518 va_end(args);
3520 update->debug_continue_line =
3521 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3522 return 0;
3524 #endif /* CONFIG_DEBUG_FS */
3526 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3527 #ifdef CONFIG_KALLSYMS
3528 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3529 struct module *, unsigned long),
3530 void *data)
3532 char namebuf[KSYM_NAME_LEN];
3533 unsigned long i;
3534 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3535 unsigned int off;
3536 #endif /* LINUX_VERSION_CODE */
3537 int ret;
3539 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3540 * 2.6.10 was the first release after this commit
3542 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3543 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3544 off = kallsyms_expand_symbol(off, namebuf);
3545 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3546 if (ret != 0)
3547 return ret;
3549 #else /* LINUX_VERSION_CODE < */
3550 char *knames;
3552 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3553 unsigned prefix = *knames++;
3555 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3557 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3558 if (ret != OK)
3559 return ret;
3561 knames += strlen(knames) + 1;
3563 #endif /* LINUX_VERSION_CODE */
3564 return module_kallsyms_on_each_symbol(fn, data);
3567 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
3568 * 2.6.10 was the first release after this commit
3570 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3571 extern u8 kallsyms_token_table[];
3572 extern u16 kallsyms_token_index[];
3574 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3576 long len, skipped_first = 0;
3577 const u8 *tptr, *data;
3579 data = &kallsyms_names[off];
3580 len = *data;
3581 data++;
3583 off += len + 1;
3585 while (len) {
3586 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3587 data++;
3588 len--;
3590 while (*tptr) {
3591 if (skipped_first) {
3592 *result = *tptr;
3593 result++;
3594 } else
3595 skipped_first = 1;
3596 tptr++;
3600 *result = '\0';
3602 return off;
3604 #endif /* LINUX_VERSION_CODE */
3606 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3607 struct module *,
3608 unsigned long),
3609 void *data)
3611 struct module *mod;
3612 unsigned int i;
3613 int ret;
3615 list_for_each_entry(mod, &modules, list) {
3616 for (i = 0; i < mod->num_symtab; i++) {
3617 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3618 mod, mod->symtab[i].st_value);
3619 if (ret != 0)
3620 return ret;
3623 return 0;
3625 #endif /* CONFIG_KALLSYMS */
3627 static struct module *find_module(const char *name)
3629 struct module *mod;
3631 list_for_each_entry(mod, &modules, list) {
3632 if (strcmp(mod->name, name) == 0)
3633 return mod;
3635 return NULL;
3638 #ifdef CONFIG_MODULE_UNLOAD
3639 struct module_use {
3640 struct list_head list;
3641 struct module *module_which_uses;
3644 /* I'm not yet certain whether we need the strong form of this. */
3645 static inline int strong_try_module_get(struct module *mod)
3647 if (mod && mod->state != MODULE_STATE_LIVE)
3648 return -EBUSY;
3649 if (try_module_get(mod))
3650 return 0;
3651 return -ENOENT;
3654 /* Does a already use b? */
3655 static int already_uses(struct module *a, struct module *b)
3657 struct module_use *use;
3658 list_for_each_entry(use, &b->modules_which_use_me, list) {
3659 if (use->module_which_uses == a)
3660 return 1;
3662 return 0;
3665 /* Make it so module a uses b. Must be holding module_mutex */
3666 static int use_module(struct module *a, struct module *b)
3668 struct module_use *use;
3669 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3670 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3671 int no_warn;
3672 #endif /* LINUX_VERSION_CODE */
3673 if (b == NULL || already_uses(a, b))
3674 return 1;
3676 if (strong_try_module_get(b) < 0)
3677 return 0;
3679 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3680 if (!use) {
3681 module_put(b);
3682 return 0;
3684 use->module_which_uses = a;
3685 list_add(&use->list, &b->modules_which_use_me);
3686 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3687 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3688 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3689 #endif /* LINUX_VERSION_CODE */
3690 return 1;
3692 #else /* CONFIG_MODULE_UNLOAD */
3693 static int use_module(struct module *a, struct module *b)
3695 return 1;
3697 #endif /* CONFIG_MODULE_UNLOAD */
3699 #ifndef CONFIG_MODVERSIONS
3700 #define symversion(base, idx) NULL
3701 #else
3702 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3703 #endif
3705 static bool each_symbol_in_section(const struct symsearch *arr,
3706 unsigned int arrsize,
3707 struct module *owner,
3708 bool (*fn)(const struct symsearch *syms,
3709 struct module *owner,
3710 unsigned int symnum, void *data),
3711 void *data)
3713 unsigned int i, j;
3715 for (j = 0; j < arrsize; j++) {
3716 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3717 if (fn(&arr[j], owner, i, data))
3718 return true;
3721 return false;
3724 /* Returns true as soon as fn returns true, otherwise false. */
3725 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3726 struct module *owner,
3727 unsigned int symnum, void *data),
3728 void *data)
3730 struct module *mod;
3731 const struct symsearch arr[] = {
3732 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3733 NOT_GPL_ONLY, false },
3734 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3735 __start___kcrctab_gpl,
3736 GPL_ONLY, false },
3737 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3738 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3739 __start___kcrctab_gpl_future,
3740 WILL_BE_GPL_ONLY, false },
3741 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3742 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3743 { __start___ksymtab_unused, __stop___ksymtab_unused,
3744 __start___kcrctab_unused,
3745 NOT_GPL_ONLY, true },
3746 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3747 __start___kcrctab_unused_gpl,
3748 GPL_ONLY, true },
3749 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3752 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3753 return 1;
3755 list_for_each_entry(mod, &modules, list) {
3756 struct symsearch module_arr[] = {
3757 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3758 NOT_GPL_ONLY, false },
3759 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3760 mod->gpl_crcs,
3761 GPL_ONLY, false },
3762 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3763 { mod->gpl_future_syms,
3764 mod->gpl_future_syms + mod->num_gpl_future_syms,
3765 mod->gpl_future_crcs,
3766 WILL_BE_GPL_ONLY, false },
3767 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3768 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3769 { mod->unused_syms,
3770 mod->unused_syms + mod->num_unused_syms,
3771 mod->unused_crcs,
3772 NOT_GPL_ONLY, true },
3773 { mod->unused_gpl_syms,
3774 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3775 mod->unused_gpl_crcs,
3776 GPL_ONLY, true },
3777 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3780 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3781 mod, fn, data))
3782 return true;
3784 return false;
3787 struct find_symbol_arg {
3788 /* Input */
3789 const char *name;
3790 bool gplok;
3791 bool warn;
3793 /* Output */
3794 struct module *owner;
3795 const unsigned long *crc;
3796 const struct kernel_symbol *sym;
3799 static bool find_symbol_in_section(const struct symsearch *syms,
3800 struct module *owner,
3801 unsigned int symnum, void *data)
3803 struct find_symbol_arg *fsa = data;
3805 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3806 return false;
3808 if (!fsa->gplok) {
3809 if (syms->licence == GPL_ONLY)
3810 return false;
3811 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3812 printk(KERN_WARNING "Symbol %s is being used "
3813 "by a non-GPL module, which will not "
3814 "be allowed in the future\n", fsa->name);
3815 printk(KERN_WARNING "Please see the file "
3816 "Documentation/feature-removal-schedule.txt "
3817 "in the kernel source tree for more details.\n");
3821 #ifdef CONFIG_UNUSED_SYMBOLS
3822 if (syms->unused && fsa->warn) {
3823 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3824 "however this module is using it.\n", fsa->name);
3825 printk(KERN_WARNING
3826 "This symbol will go away in the future.\n");
3827 printk(KERN_WARNING
3828 "Please evalute if this is the right api to use and if "
3829 "it really is, submit a report the linux kernel "
3830 "mailinglist together with submitting your code for "
3831 "inclusion.\n");
3833 #endif
3835 fsa->owner = owner;
3836 fsa->crc = symversion(syms->crcs, symnum);
3837 fsa->sym = &syms->start[symnum];
3838 return true;
3841 /* Find a symbol and return it, along with, (optional) crc and
3842 * (optional) module which owns it */
3843 static const struct kernel_symbol *find_symbol(const char *name,
3844 struct module **owner,
3845 const unsigned long **crc,
3846 bool gplok, bool warn)
3848 struct find_symbol_arg fsa;
3850 fsa.name = name;
3851 fsa.gplok = gplok;
3852 fsa.warn = warn;
3854 if (each_symbol(find_symbol_in_section, &fsa)) {
3855 if (owner)
3856 *owner = fsa.owner;
3857 if (crc)
3858 *crc = fsa.crc;
3859 return fsa.sym;
3862 return NULL;
3865 static struct module *__module_data_address(unsigned long addr)
3867 struct module *mod;
3869 list_for_each_entry(mod, &modules, list) {
3870 if (addr >= (unsigned long)mod->module_core +
3871 mod->core_text_size &&
3872 addr < (unsigned long)mod->module_core + mod->core_size)
3873 return mod;
3875 return NULL;
3877 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3879 struct ksplice_attribute {
3880 struct attribute attr;
3881 ssize_t (*show)(struct update *update, char *buf);
3882 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3885 static ssize_t ksplice_attr_show(struct kobject *kobj, struct attribute *attr,
3886 char *buf)
3888 struct ksplice_attribute *attribute =
3889 container_of(attr, struct ksplice_attribute, attr);
3890 struct update *update = container_of(kobj, struct update, kobj);
3891 if (attribute->show == NULL)
3892 return -EIO;
3893 return attribute->show(update, buf);
3896 static ssize_t ksplice_attr_store(struct kobject *kobj, struct attribute *attr,
3897 const char *buf, size_t len)
3899 struct ksplice_attribute *attribute =
3900 container_of(attr, struct ksplice_attribute, attr);
3901 struct update *update = container_of(kobj, struct update, kobj);
3902 if (attribute->store == NULL)
3903 return -EIO;
3904 return attribute->store(update, buf, len);
3907 static struct sysfs_ops ksplice_sysfs_ops = {
3908 .show = ksplice_attr_show,
3909 .store = ksplice_attr_store,
3912 static void ksplice_release(struct kobject *kobj)
3914 struct update *update;
3915 update = container_of(kobj, struct update, kobj);
3916 cleanup_ksplice_update(update);
3919 static ssize_t stage_show(struct update *update, char *buf)
3921 switch (update->stage) {
3922 case STAGE_PREPARING:
3923 return snprintf(buf, PAGE_SIZE, "preparing\n");
3924 case STAGE_APPLIED:
3925 return snprintf(buf, PAGE_SIZE, "applied\n");
3926 case STAGE_REVERSED:
3927 return snprintf(buf, PAGE_SIZE, "reversed\n");
3929 return 0;
3932 static ssize_t abort_cause_show(struct update *update, char *buf)
3934 switch (update->abort_cause) {
3935 case OK:
3936 return snprintf(buf, PAGE_SIZE, "ok\n");
3937 case NO_MATCH:
3938 return snprintf(buf, PAGE_SIZE, "no_match\n");
3939 #ifdef KSPLICE_STANDALONE
3940 case BAD_SYSTEM_MAP:
3941 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
3942 #endif /* KSPLICE_STANDALONE */
3943 case CODE_BUSY:
3944 return snprintf(buf, PAGE_SIZE, "code_busy\n");
3945 case MODULE_BUSY:
3946 return snprintf(buf, PAGE_SIZE, "module_busy\n");
3947 case OUT_OF_MEMORY:
3948 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
3949 case FAILED_TO_FIND:
3950 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
3951 case ALREADY_REVERSED:
3952 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
3953 case MISSING_EXPORT:
3954 return snprintf(buf, PAGE_SIZE, "missing_export\n");
3955 case UNEXPECTED_RUNNING_TASK:
3956 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
3957 case TARGET_NOT_LOADED:
3958 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
3959 case CALL_FAILED:
3960 return snprintf(buf, PAGE_SIZE, "call_failed\n");
3961 case UNEXPECTED:
3962 return snprintf(buf, PAGE_SIZE, "unexpected\n");
3963 default:
3964 return snprintf(buf, PAGE_SIZE, "unknown\n");
3966 return 0;
3969 static ssize_t conflict_show(struct update *update, char *buf)
3971 const struct conflict *conf;
3972 const struct conflict_addr *ca;
3973 int used = 0;
3974 mutex_lock(&module_mutex);
3975 list_for_each_entry(conf, &update->conflicts, list) {
3976 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
3977 conf->process_name, conf->pid);
3978 list_for_each_entry(ca, &conf->stack, list) {
3979 if (!ca->has_conflict)
3980 continue;
3981 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
3982 ca->label);
3984 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
3986 mutex_unlock(&module_mutex);
3987 return used;
3990 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
3991 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
3993 struct update *update = updateptr;
3994 mutex_lock(&module_mutex);
3995 maybe_cleanup_ksplice_update(update);
3996 mutex_unlock(&module_mutex);
3997 return 0;
4000 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4002 enum stage old_stage;
4003 mutex_lock(&module_mutex);
4004 old_stage = update->stage;
4005 if ((strncmp(buf, "applied", len) == 0 ||
4006 strncmp(buf, "applied\n", len) == 0) &&
4007 update->stage == STAGE_PREPARING)
4008 update->abort_cause = apply_update(update);
4009 else if ((strncmp(buf, "reversed", len) == 0 ||
4010 strncmp(buf, "reversed\n", len) == 0) &&
4011 update->stage == STAGE_APPLIED)
4012 update->abort_cause = reverse_patches(update);
4013 else if ((strncmp(buf, "cleanup", len) == 0 ||
4014 strncmp(buf, "cleanup\n", len) == 0) &&
4015 update->stage == STAGE_REVERSED)
4016 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4017 "ksplice_cleanup_%s", update->kid);
4019 if (old_stage != STAGE_REVERSED && update->abort_cause == OK)
4020 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
4021 update->kid,
4022 update->stage == STAGE_APPLIED ? "applied" : "reversed");
4023 mutex_unlock(&module_mutex);
4024 return len;
4027 static ssize_t debug_show(struct update *update, char *buf)
4029 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4032 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4034 unsigned long l;
4035 int ret = strict_strtoul(buf, 10, &l);
4036 if (ret != 0)
4037 return ret;
4038 update->debug = l;
4039 return len;
4042 static ssize_t partial_show(struct update *update, char *buf)
4044 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4047 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4049 unsigned long l;
4050 int ret = strict_strtoul(buf, 10, &l);
4051 if (ret != 0)
4052 return ret;
4053 update->partial = l;
4054 return len;
4057 static struct ksplice_attribute stage_attribute =
4058 __ATTR(stage, 0600, stage_show, stage_store);
4059 static struct ksplice_attribute abort_cause_attribute =
4060 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4061 static struct ksplice_attribute debug_attribute =
4062 __ATTR(debug, 0600, debug_show, debug_store);
4063 static struct ksplice_attribute partial_attribute =
4064 __ATTR(partial, 0600, partial_show, partial_store);
4065 static struct ksplice_attribute conflict_attribute =
4066 __ATTR(conflicts, 0400, conflict_show, NULL);
4068 static struct attribute *ksplice_attrs[] = {
4069 &stage_attribute.attr,
4070 &abort_cause_attribute.attr,
4071 &debug_attribute.attr,
4072 &partial_attribute.attr,
4073 &conflict_attribute.attr,
4074 NULL
4077 static struct kobj_type ksplice_ktype = {
4078 .sysfs_ops = &ksplice_sysfs_ops,
4079 .release = ksplice_release,
4080 .default_attrs = ksplice_attrs,
4083 #ifdef KSPLICE_STANDALONE
4084 static int debug;
4085 module_param(debug, int, 0600);
4086 MODULE_PARM_DESC(debug, "Debug level");
4088 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4090 static struct ksplice_pack bootstrap_pack = {
4091 .name = "ksplice_" __stringify(KSPLICE_KID),
4092 .kid = "init_" __stringify(KSPLICE_KID),
4093 .target_name = NULL,
4094 .target = NULL,
4095 .map_printk = MAP_PRINTK,
4096 .primary = THIS_MODULE,
4097 .primary_system_map = ksplice_system_map,
4098 .primary_system_map_end = ksplice_system_map_end,
4100 #endif /* KSPLICE_STANDALONE */
4102 static int init_ksplice(void)
4104 #ifdef KSPLICE_STANDALONE
4105 struct ksplice_pack *pack = &bootstrap_pack;
4106 pack->update = init_ksplice_update(pack->kid);
4107 sort(pack->primary_system_map,
4108 pack->primary_system_map_end - pack->primary_system_map,
4109 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4110 if (pack->update == NULL)
4111 return -ENOMEM;
4112 add_to_update(pack, pack->update);
4113 pack->update->debug = debug;
4114 pack->update->abort_cause =
4115 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
4116 if (pack->update->abort_cause == OK)
4117 bootstrapped = true;
4118 cleanup_ksplice_update(bootstrap_pack.update);
4119 #else /* !KSPLICE_STANDALONE */
4120 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4121 if (ksplice_kobj == NULL)
4122 return -ENOMEM;
4123 #endif /* KSPLICE_STANDALONE */
4124 return 0;
4127 static void cleanup_ksplice(void)
4129 #ifndef KSPLICE_STANDALONE
4130 kobject_put(ksplice_kobj);
4131 #endif /* KSPLICE_STANDALONE */
4134 module_init(init_ksplice);
4135 module_exit(cleanup_ksplice);
4137 MODULE_AUTHOR("Ksplice, Inc.");
4138 MODULE_DESCRIPTION("Ksplice rebootless update system");
4139 #ifdef KSPLICE_VERSION
4140 MODULE_VERSION(KSPLICE_VERSION);
4141 #endif
4142 MODULE_LICENSE("GPL v2");