Handle PAGE_SIZE truncation of sysfs reads more gracefully.
[ksplice.git] / kmodsrc / ksplice.c
blob4242b95fbe808b8b929058574da04bc4615883ad
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #include <linux/vmalloc.h>
52 #ifdef KSPLICE_STANDALONE
53 #include "ksplice.h"
54 #else /* !KSPLICE_STANDALONE */
55 #include <linux/ksplice.h>
56 #endif /* KSPLICE_STANDALONE */
57 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
58 #include <asm/alternative.h>
59 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
61 #ifdef KSPLICE_STANDALONE
62 #if !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
63 #define KSPLICE_NO_KERNEL_SUPPORT 1
64 #endif /* !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
66 #ifndef __used
67 #define __used __attribute_used__
68 #endif
70 #define EXTRACT_SYMBOL(sym) \
71 static const typeof(&sym) PASTE(__ksplice_extract_, __LINE__) \
72 __used __attribute__((section(".ksplice_extract"))) = &sym
73 #endif /* KSPLICE_STANDALONE */
75 enum stage {
76 STAGE_PREPARING, /* the update is not yet applied */
77 STAGE_APPLIED, /* the update is applied */
78 STAGE_REVERSED, /* the update has been applied and reversed */
81 /* parameter to modify run-pre matching */
82 enum run_pre_mode {
83 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
84 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
85 RUN_PRE_FINAL, /* finalizes the matching */
86 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
87 RUN_PRE_SILENT,
88 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
91 enum { NOVAL, TEMP, VAL };
93 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
94 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
95 #define __bitwise__
96 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
97 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
98 #define __bitwise__ __bitwise
99 #endif
101 typedef int __bitwise__ abort_t;
103 #define OK ((__force abort_t) 0)
104 #define NO_MATCH ((__force abort_t) 1)
105 #define CODE_BUSY ((__force abort_t) 2)
106 #define MODULE_BUSY ((__force abort_t) 3)
107 #define OUT_OF_MEMORY ((__force abort_t) 4)
108 #define FAILED_TO_FIND ((__force abort_t) 5)
109 #define ALREADY_REVERSED ((__force abort_t) 6)
110 #define MISSING_EXPORT ((__force abort_t) 7)
111 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
112 #define UNEXPECTED ((__force abort_t) 9)
113 #define TARGET_NOT_LOADED ((__force abort_t) 10)
114 #define CALL_FAILED ((__force abort_t) 11)
115 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
116 #ifdef KSPLICE_STANDALONE
117 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
118 #endif /* KSPLICE_STANDALONE */
120 struct update {
121 const char *kid;
122 const char *name;
123 struct kobject kobj;
124 enum stage stage;
125 abort_t abort_cause;
126 int debug;
127 #ifdef CONFIG_DEBUG_FS
128 struct debugfs_blob_wrapper debug_blob;
129 struct dentry *debugfs_dentry;
130 #else /* !CONFIG_DEBUG_FS */
131 bool debug_continue_line;
132 #endif /* CONFIG_DEBUG_FS */
133 bool partial; /* is it OK if some target mods aren't loaded */
134 struct list_head changes, /* changes for loaded target mods */
135 unused_changes; /* changes for non-loaded target mods */
136 struct list_head conflicts;
137 struct list_head list;
138 struct list_head ksplice_module_list;
141 /* a process conflicting with an update */
142 struct conflict {
143 const char *process_name;
144 pid_t pid;
145 struct list_head stack;
146 struct list_head list;
149 /* an address on the stack of a conflict */
150 struct conflict_addr {
151 unsigned long addr; /* the address on the stack */
152 bool has_conflict; /* does this address in particular conflict? */
153 const char *label; /* the label of the conflicting safety_record */
154 struct list_head list;
157 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
158 /* Old kernels don't have debugfs_create_blob */
159 struct debugfs_blob_wrapper {
160 void *data;
161 unsigned long size;
163 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
165 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
166 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
167 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
168 #endif
170 struct labelval {
171 struct list_head list;
172 struct ksplice_symbol *symbol;
173 struct list_head *saved_vals;
176 /* region to be checked for conflicts in the stack check */
177 struct safety_record {
178 struct list_head list;
179 const char *label;
180 unsigned long addr; /* the address to be checked for conflicts
181 * (e.g. an obsolete function's starting addr)
183 unsigned long size; /* the size of the region to be checked */
186 /* possible value for a symbol */
187 struct candidate_val {
188 struct list_head list;
189 unsigned long val;
192 /* private struct used by init_symbol_array */
193 struct ksplice_lookup {
194 /* input */
195 struct ksplice_mod_change *change;
196 struct ksplice_symbol **arr;
197 size_t size;
198 /* output */
199 abort_t ret;
202 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
203 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
204 struct symsearch {
205 const struct kernel_symbol *start, *stop;
206 const unsigned long *crcs;
207 enum {
208 NOT_GPL_ONLY,
209 GPL_ONLY,
210 WILL_BE_GPL_ONLY,
211 } licence;
212 bool unused;
214 #endif /* LINUX_VERSION_CODE */
216 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
217 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
219 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
220 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
221 static bool virtual_address_mapped(unsigned long addr)
223 char retval;
224 return probe_kernel_address(addr, retval) != -EFAULT;
226 #else /* LINUX_VERSION_CODE < */
227 static bool virtual_address_mapped(unsigned long addr);
228 #endif /* LINUX_VERSION_CODE */
230 static long probe_kernel_read(void *dst, void *src, size_t size)
232 if (size == 0)
233 return 0;
234 if (!virtual_address_mapped((unsigned long)src) ||
235 !virtual_address_mapped((unsigned long)src + size - 1))
236 return -EFAULT;
238 memcpy(dst, src, size);
239 return 0;
241 #endif /* LINUX_VERSION_CODE */
243 static LIST_HEAD(updates);
244 #ifdef KSPLICE_STANDALONE
245 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
246 extern struct list_head ksplice_modules;
247 #else /* !CONFIG_KSPLICE */
248 LIST_HEAD(ksplice_modules);
249 #endif /* CONFIG_KSPLICE */
250 #else /* !KSPLICE_STANDALONE */
251 LIST_HEAD(ksplice_modules);
252 EXPORT_SYMBOL_GPL(ksplice_modules);
253 static struct kobject *ksplice_kobj;
254 #endif /* KSPLICE_STANDALONE */
256 static struct kobj_type update_ktype;
258 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
259 /* Old kernels do not have kcalloc
260 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
262 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
264 char *mem;
265 if (n != 0 && size > ULONG_MAX / n)
266 return NULL;
267 mem = kmalloc(n * size, flags);
268 if (mem)
269 memset(mem, 0, n * size);
270 return mem;
272 #endif /* LINUX_VERSION_CODE */
274 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
275 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
276 static void u32_swap(void *a, void *b, int size)
278 u32 t = *(u32 *)a;
279 *(u32 *)a = *(u32 *)b;
280 *(u32 *)b = t;
283 static void generic_swap(void *a, void *b, int size)
285 char t;
287 do {
288 t = *(char *)a;
289 *(char *)a++ = *(char *)b;
290 *(char *)b++ = t;
291 } while (--size > 0);
295 * sort - sort an array of elements
296 * @base: pointer to data to sort
297 * @num: number of elements
298 * @size: size of each element
299 * @cmp: pointer to comparison function
300 * @swap: pointer to swap function or NULL
302 * This function does a heapsort on the given array. You may provide a
303 * swap function optimized to your element type.
305 * Sorting time is O(n log n) both on average and worst-case. While
306 * qsort is about 20% faster on average, it suffers from exploitable
307 * O(n*n) worst-case behavior and extra memory requirements that make
308 * it less suitable for kernel use.
311 void sort(void *base, size_t num, size_t size,
312 int (*cmp)(const void *, const void *),
313 void (*swap)(void *, void *, int size))
315 /* pre-scale counters for performance */
316 int i = (num / 2 - 1) * size, n = num * size, c, r;
318 if (!swap)
319 swap = (size == 4 ? u32_swap : generic_swap);
321 /* heapify */
322 for (; i >= 0; i -= size) {
323 for (r = i; r * 2 + size < n; r = c) {
324 c = r * 2 + size;
325 if (c < n - size && cmp(base + c, base + c + size) < 0)
326 c += size;
327 if (cmp(base + r, base + c) >= 0)
328 break;
329 swap(base + r, base + c, size);
333 /* sort */
334 for (i = n - size; i > 0; i -= size) {
335 swap(base, base + i, size);
336 for (r = 0; r * 2 + size < i; r = c) {
337 c = r * 2 + size;
338 if (c < i - size && cmp(base + c, base + c + size) < 0)
339 c += size;
340 if (cmp(base + r, base + c) >= 0)
341 break;
342 swap(base + r, base + c, size);
346 #endif /* LINUX_VERSION_CODE < */
348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
349 /* Old kernels do not have kstrdup
350 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was after 2.6.12
352 #define kstrdup ksplice_kstrdup
353 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
355 size_t len;
356 char *buf;
358 if (!s)
359 return NULL;
361 len = strlen(s) + 1;
362 buf = kmalloc(len, gfp);
363 if (buf)
364 memcpy(buf, s, len);
365 return buf;
367 #endif /* LINUX_VERSION_CODE */
369 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
370 /* Old kernels use semaphore instead of mutex
371 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
373 #define mutex semaphore
374 #define mutex_lock down
375 #define mutex_unlock up
376 #endif /* LINUX_VERSION_CODE */
378 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
379 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
380 static char * __attribute_used__
381 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
383 unsigned int len;
384 char *p, dummy[1];
385 va_list aq;
387 va_copy(aq, ap);
388 len = vsnprintf(dummy, 0, fmt, aq);
389 va_end(aq);
391 p = kmalloc(len + 1, gfp);
392 if (!p)
393 return NULL;
395 vsnprintf(p, len + 1, fmt, ap);
397 return p;
399 #endif
401 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
402 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
403 static char * __attribute__((format (printf, 2, 3)))
404 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
406 va_list ap;
407 char *p;
409 va_start(ap, fmt);
410 p = kvasprintf(gfp, fmt, ap);
411 va_end(ap);
413 return p;
415 #endif
417 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
418 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
419 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
421 char *tail;
422 unsigned long val;
423 size_t len;
425 *res = 0;
426 len = strlen(cp);
427 if (len == 0)
428 return -EINVAL;
430 val = simple_strtoul(cp, &tail, base);
431 if ((*tail == '\0') ||
432 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
433 *res = val;
434 return 0;
437 return -EINVAL;
439 #endif
441 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
442 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
443 /* Assume cpus == NULL. */
444 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
445 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
446 /* ee527cd3a20c2aeaac17d939e5d011f7a76d69f5 was after 2.6.21 */
447 EXTRACT_SYMBOL(stop_machine_run);
448 #endif /* LINUX_VERSION_CODE */
449 #endif /* LINUX_VERSION_CODE */
451 #ifndef task_thread_info
452 #define task_thread_info(task) (task)->thread_info
453 #endif /* !task_thread_info */
455 #ifdef KSPLICE_STANDALONE
457 #ifdef do_each_thread_ve /* OpenVZ kernels define this */
458 #define do_each_thread do_each_thread_all
459 #define while_each_thread while_each_thread_all
460 #endif
462 static bool bootstrapped = false;
464 /* defined by ksplice-create */
465 extern const struct ksplice_reloc ksplice_init_relocs[],
466 ksplice_init_relocs_end[];
468 #endif /* KSPLICE_STANDALONE */
470 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
471 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
472 extern struct list_head modules;
473 EXTRACT_SYMBOL(modules);
474 extern struct mutex module_mutex;
475 EXTRACT_SYMBOL(module_mutex);
476 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
477 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
478 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
479 #endif /* LINUX_VERSION_CODE */
480 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
481 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
482 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
483 #endif /* LINUX_VERSION_CODE */
484 extern const struct kernel_symbol __start___ksymtab[];
485 EXTRACT_SYMBOL(__start___ksymtab);
486 extern const struct kernel_symbol __stop___ksymtab[];
487 EXTRACT_SYMBOL(__stop___ksymtab);
488 extern const unsigned long __start___kcrctab[];
489 EXTRACT_SYMBOL(__start___kcrctab);
490 extern const struct kernel_symbol __start___ksymtab_gpl[];
491 EXTRACT_SYMBOL(__start___ksymtab_gpl);
492 extern const struct kernel_symbol __stop___ksymtab_gpl[];
493 EXTRACT_SYMBOL(__stop___ksymtab_gpl);
494 extern const unsigned long __start___kcrctab_gpl[];
495 EXTRACT_SYMBOL(__start___kcrctab_gpl);
496 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
497 extern const struct kernel_symbol __start___ksymtab_unused[];
498 EXTRACT_SYMBOL(__start___ksymtab_unused);
499 extern const struct kernel_symbol __stop___ksymtab_unused[];
500 EXTRACT_SYMBOL(__stop___ksymtab_unused);
501 extern const unsigned long __start___kcrctab_unused[];
502 EXTRACT_SYMBOL(__start___kcrctab_unused);
503 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
504 EXTRACT_SYMBOL(__start___ksymtab_unused_gpl);
505 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
506 EXTRACT_SYMBOL(__stop___ksymtab_unused_gpl);
507 extern const unsigned long __start___kcrctab_unused_gpl[];
508 EXTRACT_SYMBOL(__start___kcrctab_unused_gpl);
509 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
510 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
511 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
512 EXTRACT_SYMBOL(__start___ksymtab_gpl_future);
513 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
514 EXTRACT_SYMBOL(__stop___ksymtab_gpl_future);
515 extern const unsigned long __start___kcrctab_gpl_future[];
516 EXTRACT_SYMBOL(__start___kcrctab_gpl_future);
517 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
518 #endif /* LINUX_VERSION_CODE */
520 static struct update *init_ksplice_update(const char *kid);
521 static void cleanup_ksplice_update(struct update *update);
522 static void maybe_cleanup_ksplice_update(struct update *update);
523 static void add_to_update(struct ksplice_mod_change *change,
524 struct update *update);
525 static int ksplice_sysfs_init(struct update *update);
527 /* Preparing the relocations and patches for application */
528 static abort_t apply_update(struct update *update);
529 static abort_t reverse_update(struct update *update);
530 static abort_t prepare_change(struct ksplice_mod_change *change);
531 static abort_t finalize_change(struct ksplice_mod_change *change);
532 static abort_t finalize_patches(struct ksplice_mod_change *change);
533 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
534 unsigned long addr);
535 static abort_t map_trampoline_pages(struct update *update);
536 static void unmap_trampoline_pages(struct update *update);
537 static void *map_writable(void *addr, size_t len);
538 static abort_t apply_relocs(struct ksplice_mod_change *change,
539 const struct ksplice_reloc *relocs,
540 const struct ksplice_reloc *relocs_end);
541 static abort_t apply_reloc(struct ksplice_mod_change *change,
542 const struct ksplice_reloc *r);
543 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
544 const struct ksplice_reloc *r);
545 static abort_t apply_howto_date(struct ksplice_mod_change *change,
546 const struct ksplice_reloc *r);
547 static abort_t read_reloc_value(struct ksplice_mod_change *change,
548 const struct ksplice_reloc *r,
549 unsigned long addr, unsigned long *valp);
550 static abort_t write_reloc_value(struct ksplice_mod_change *change,
551 const struct ksplice_reloc *r,
552 unsigned long addr, unsigned long sym_addr);
553 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
554 bool to_be_applied);
555 static void cleanup_module_list_entries(struct update *update);
556 static void __attribute__((noreturn)) ksplice_deleted(void);
558 /* run-pre matching */
559 static abort_t match_change_sections(struct ksplice_mod_change *change,
560 bool consider_data_sections);
561 static abort_t find_section(struct ksplice_mod_change *change,
562 struct ksplice_section *sect);
563 static abort_t try_addr(struct ksplice_mod_change *change,
564 struct ksplice_section *sect,
565 unsigned long run_addr,
566 struct list_head *safety_records,
567 enum run_pre_mode mode);
568 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
569 const struct ksplice_section *sect,
570 unsigned long run_addr,
571 struct list_head *safety_records,
572 enum run_pre_mode mode);
573 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
574 /* defined in arch/ARCH/kernel/ksplice-arch.c */
575 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
576 struct ksplice_section *sect,
577 unsigned long run_addr,
578 struct list_head *safety_records,
579 enum run_pre_mode mode);
580 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
581 static void print_bytes(struct ksplice_mod_change *change,
582 const unsigned char *run, int runc,
583 const unsigned char *pre, int prec);
584 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
585 static abort_t brute_search(struct ksplice_mod_change *change,
586 struct ksplice_section *sect,
587 const void *start, unsigned long len,
588 struct list_head *vals);
589 static abort_t brute_search_all(struct ksplice_mod_change *change,
590 struct ksplice_section *sect,
591 struct list_head *vals);
592 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
593 static const struct ksplice_reloc *
594 init_reloc_search(struct ksplice_mod_change *change,
595 const struct ksplice_section *sect);
596 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
597 const struct ksplice_reloc *end,
598 unsigned long address,
599 unsigned long size);
600 static abort_t lookup_reloc(struct ksplice_mod_change *change,
601 const struct ksplice_reloc **fingerp,
602 unsigned long addr,
603 const struct ksplice_reloc **relocp);
604 static abort_t handle_reloc(struct ksplice_mod_change *change,
605 const struct ksplice_section *sect,
606 const struct ksplice_reloc *r,
607 unsigned long run_addr, enum run_pre_mode mode);
608 static abort_t handle_howto_date(struct ksplice_mod_change *change,
609 const struct ksplice_section *sect,
610 const struct ksplice_reloc *r,
611 unsigned long run_addr,
612 enum run_pre_mode mode);
613 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
614 const struct ksplice_section *sect,
615 const struct ksplice_reloc *r,
616 unsigned long run_addr,
617 enum run_pre_mode mode);
618 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
619 #ifdef CONFIG_BUG
620 static abort_t handle_bug(struct ksplice_mod_change *change,
621 const struct ksplice_reloc *r,
622 unsigned long run_addr);
623 #endif /* CONFIG_BUG */
624 #else /* LINUX_VERSION_CODE < */
625 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
626 #endif /* LINUX_VERSION_CODE */
627 static abort_t handle_extable(struct ksplice_mod_change *change,
628 const struct ksplice_reloc *r,
629 unsigned long run_addr);
630 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
631 const struct ksplice_symbol *sym);
632 static int compare_section_labels(const void *va, const void *vb);
633 static int symbol_section_bsearch_compare(const void *a, const void *b);
634 static const struct ksplice_reloc *
635 patch_reloc(struct ksplice_mod_change *change,
636 const struct ksplice_patch *p);
638 /* Computing possible addresses for symbols */
639 static abort_t lookup_symbol(struct ksplice_mod_change *change,
640 const struct ksplice_symbol *ksym,
641 struct list_head *vals);
642 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
643 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
644 static abort_t init_symbol_array(struct ksplice_mod_change *change,
645 struct ksplice_symbol *start,
646 struct ksplice_symbol *end);
647 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
648 static abort_t add_matching_values(struct ksplice_lookup *lookup,
649 const char *sym_name, unsigned long sym_val);
650 static bool add_export_values(const struct symsearch *syms,
651 struct module *owner,
652 unsigned int symnum, void *data);
653 static int symbolp_bsearch_compare(const void *key, const void *elt);
654 static int compare_symbolp_names(const void *a, const void *b);
655 static int compare_symbolp_labels(const void *a, const void *b);
656 #ifdef CONFIG_KALLSYMS
657 static int add_kallsyms_values(void *data, const char *name,
658 struct module *owner, unsigned long val);
659 #endif /* CONFIG_KALLSYMS */
660 #ifdef KSPLICE_STANDALONE
661 static abort_t
662 add_system_map_candidates(struct ksplice_mod_change *change,
663 const struct ksplice_system_map *start,
664 const struct ksplice_system_map *end,
665 const char *label, struct list_head *vals);
666 static int compare_system_map(const void *a, const void *b);
667 static int system_map_bsearch_compare(const void *key, const void *elt);
668 #endif /* KSPLICE_STANDALONE */
669 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
670 const char *name, struct list_head *vals);
672 /* Atomic update trampoline insertion and removal */
673 static abort_t patch_action(struct update *update, enum ksplice_action action);
674 static int __apply_patches(void *update);
675 static int __reverse_patches(void *update);
676 static abort_t check_each_task(struct update *update);
677 static abort_t check_task(struct update *update,
678 const struct task_struct *t, bool rerun);
679 static abort_t check_stack(struct update *update, struct conflict *conf,
680 const struct thread_info *tinfo,
681 const unsigned long *stack);
682 static abort_t check_address(struct update *update,
683 struct conflict *conf, unsigned long addr);
684 static abort_t check_record(struct conflict_addr *ca,
685 const struct safety_record *rec,
686 unsigned long addr);
687 static bool is_stop_machine(const struct task_struct *t);
688 static void cleanup_conflicts(struct update *update);
689 static void print_conflicts(struct update *update);
690 static void insert_trampoline(struct ksplice_patch *p);
691 static abort_t verify_trampoline(struct ksplice_mod_change *change,
692 const struct ksplice_patch *p);
693 static void remove_trampoline(const struct ksplice_patch *p);
695 static abort_t create_labelval(struct ksplice_mod_change *change,
696 struct ksplice_symbol *ksym,
697 unsigned long val, int status);
698 static abort_t create_safety_record(struct ksplice_mod_change *change,
699 const struct ksplice_section *sect,
700 struct list_head *record_list,
701 unsigned long run_addr,
702 unsigned long run_size);
703 static abort_t add_candidate_val(struct ksplice_mod_change *change,
704 struct list_head *vals, unsigned long val);
705 static void release_vals(struct list_head *vals);
706 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
708 static int contains_canary(struct ksplice_mod_change *change,
709 unsigned long blank_addr,
710 const struct ksplice_reloc_howto *howto);
711 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
712 unsigned long addr);
713 static bool patches_module(const struct module *a, const struct module *b);
714 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
715 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
716 static bool strstarts(const char *str, const char *prefix);
717 #endif /* LINUX_VERSION_CODE */
718 static bool singular(struct list_head *list);
719 static void *bsearch(const void *key, const void *base, size_t n,
720 size_t size, int (*cmp)(const void *key, const void *elt));
721 static int compare_relocs(const void *a, const void *b);
722 static int reloc_bsearch_compare(const void *key, const void *elt);
724 /* Debugging */
725 static abort_t init_debug_buf(struct update *update);
726 static void clear_debug_buf(struct update *update);
727 static int __attribute__((format(printf, 2, 3)))
728 _ksdebug(struct update *update, const char *fmt, ...);
729 #define ksdebug(change, fmt, ...) \
730 _ksdebug(change->update, fmt, ## __VA_ARGS__)
732 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
733 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
734 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
735 struct module *, unsigned long),
736 void *data);
737 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
738 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
739 #endif /* LINUX_VERSION_CODE */
740 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
741 struct module *,
742 unsigned long),
743 void *data);
744 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
746 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
747 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
748 static struct module *find_module(const char *name);
749 static int use_module(struct module *a, struct module *b);
750 static const struct kernel_symbol *find_symbol(const char *name,
751 struct module **owner,
752 const unsigned long **crc,
753 bool gplok, bool warn);
754 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
755 struct module *owner,
756 unsigned int symnum, void *data),
757 void *data);
758 static struct module *__module_address(unsigned long addr);
759 #endif /* LINUX_VERSION_CODE */
761 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
763 /* Prepare a trampoline for the given patch */
764 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
765 struct ksplice_patch *p);
766 /* What address does the trampoline at addr jump to? */
767 static abort_t trampoline_target(struct ksplice_mod_change *change,
768 unsigned long addr, unsigned long *new_addr);
769 /* Hook to handle pc-relative jumps inserted by parainstructions */
770 static abort_t handle_paravirt(struct ksplice_mod_change *change,
771 unsigned long pre, unsigned long run,
772 int *matched);
773 /* Is address p on the stack of the given thread? */
774 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
776 #ifndef KSPLICE_STANDALONE
777 #include "ksplice-arch.c"
778 #elif defined CONFIG_X86
779 #include "x86/ksplice-arch.c"
780 #elif defined CONFIG_ARM
781 #include "arm/ksplice-arch.c"
782 #endif /* KSPLICE_STANDALONE */
784 #define clear_list(head, type, member) \
785 do { \
786 struct list_head *_pos, *_n; \
787 list_for_each_safe(_pos, _n, head) { \
788 list_del(_pos); \
789 kfree(list_entry(_pos, type, member)); \
791 } while (0)
794 * init_ksplice_mod_change() - Initializes a ksplice change
795 * @change: The change to be initialized. All of the public fields of the
796 * change and its associated data structures should be populated
797 * before this function is called. The values of the private
798 * fields will be ignored.
800 int init_ksplice_mod_change(struct ksplice_mod_change *change)
802 struct update *update;
803 struct ksplice_patch *p;
804 struct ksplice_section *s;
805 int ret = 0;
807 #ifdef KSPLICE_STANDALONE
808 if (!bootstrapped)
809 return -1;
810 #endif /* KSPLICE_STANDALONE */
812 INIT_LIST_HEAD(&change->temp_labelvals);
813 INIT_LIST_HEAD(&change->safety_records);
815 sort(change->old_code.relocs,
816 change->old_code.relocs_end - change->old_code.relocs,
817 sizeof(*change->old_code.relocs), compare_relocs, NULL);
818 sort(change->new_code.relocs,
819 change->new_code.relocs_end - change->new_code.relocs,
820 sizeof(*change->new_code.relocs), compare_relocs, NULL);
821 sort(change->old_code.sections,
822 change->old_code.sections_end - change->old_code.sections,
823 sizeof(*change->old_code.sections), compare_section_labels, NULL);
824 #ifdef KSPLICE_STANDALONE
825 sort(change->new_code.system_map,
826 change->new_code.system_map_end - change->new_code.system_map,
827 sizeof(*change->new_code.system_map), compare_system_map, NULL);
828 sort(change->old_code.system_map,
829 change->old_code.system_map_end - change->old_code.system_map,
830 sizeof(*change->old_code.system_map), compare_system_map, NULL);
831 #endif /* KSPLICE_STANDALONE */
833 for (p = change->patches; p < change->patches_end; p++)
834 p->vaddr = NULL;
835 for (s = change->old_code.sections; s < change->old_code.sections_end;
836 s++)
837 s->match_map = NULL;
838 for (p = change->patches; p < change->patches_end; p++) {
839 const struct ksplice_reloc *r = patch_reloc(change, p);
840 if (r == NULL)
841 return -ENOENT;
842 if (p->type == KSPLICE_PATCH_DATA) {
843 s = symbol_section(change, r->symbol);
844 if (s == NULL)
845 return -ENOENT;
846 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
847 * to modify rodata sections that have been explicitly
848 * marked for patching using the ksplice-patch.h macro
849 * ksplice_assume_rodata. Here we modify the section
850 * flags appropriately.
852 if (s->flags & KSPLICE_SECTION_DATA)
853 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
854 KSPLICE_SECTION_RODATA;
858 mutex_lock(&module_mutex);
859 list_for_each_entry(update, &updates, list) {
860 if (strcmp(change->kid, update->kid) == 0) {
861 if (update->stage != STAGE_PREPARING) {
862 ret = -EPERM;
863 goto out;
865 add_to_update(change, update);
866 ret = 0;
867 goto out;
870 update = init_ksplice_update(change->kid);
871 if (update == NULL) {
872 ret = -ENOMEM;
873 goto out;
875 ret = ksplice_sysfs_init(update);
876 if (ret != 0) {
877 cleanup_ksplice_update(update);
878 goto out;
880 add_to_update(change, update);
881 out:
882 mutex_unlock(&module_mutex);
883 return ret;
885 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
888 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
889 * @change: The change to be cleaned up
891 * cleanup_ksplice_mod_change is ordinarily called twice for each
892 * Ksplice update: once when the old_code module is unloaded, and once
893 * when the new_code module is unloaded. By freeing what can be freed
894 * on each unload, we avoid leaks even in unusual scenarios, e.g. if
895 * several alternative old_code modules are loaded and unloaded
896 * successively.
898 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
900 if (change->update == NULL)
901 return;
903 mutex_lock(&module_mutex);
904 if (change->update->stage == STAGE_APPLIED) {
905 struct ksplice_mod_change *c;
906 bool found = false;
908 list_for_each_entry(c, &change->update->unused_changes, list) {
909 if (c == change)
910 found = true;
912 if (found)
913 list_del(&change->list);
914 mutex_unlock(&module_mutex);
915 return;
917 list_del(&change->list);
918 if (change->update->stage == STAGE_PREPARING)
919 maybe_cleanup_ksplice_update(change->update);
920 change->update = NULL;
921 mutex_unlock(&module_mutex);
923 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
925 static struct update *init_ksplice_update(const char *kid)
927 struct update *update;
928 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
929 if (update == NULL)
930 return NULL;
931 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
932 if (update->name == NULL) {
933 kfree(update);
934 return NULL;
936 update->kid = kstrdup(kid, GFP_KERNEL);
937 if (update->kid == NULL) {
938 kfree(update->name);
939 kfree(update);
940 return NULL;
942 if (try_module_get(THIS_MODULE) != 1) {
943 kfree(update->kid);
944 kfree(update->name);
945 kfree(update);
946 return NULL;
948 INIT_LIST_HEAD(&update->changes);
949 INIT_LIST_HEAD(&update->unused_changes);
950 INIT_LIST_HEAD(&update->ksplice_module_list);
951 if (init_debug_buf(update) != OK) {
952 module_put(THIS_MODULE);
953 kfree(update->kid);
954 kfree(update->name);
955 kfree(update);
956 return NULL;
958 list_add(&update->list, &updates);
959 update->stage = STAGE_PREPARING;
960 update->abort_cause = OK;
961 update->partial = 0;
962 INIT_LIST_HEAD(&update->conflicts);
963 return update;
966 static void cleanup_ksplice_update(struct update *update)
968 list_del(&update->list);
969 cleanup_conflicts(update);
970 clear_debug_buf(update);
971 cleanup_module_list_entries(update);
972 kfree(update->kid);
973 kfree(update->name);
974 kfree(update);
975 module_put(THIS_MODULE);
978 /* Clean up the update if it no longer has any changes */
979 static void maybe_cleanup_ksplice_update(struct update *update)
981 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
982 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
983 kobject_put(&update->kobj);
984 #else /* LINUX_VERSION_CODE < */
985 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
986 kobject_unregister(&update->kobj);
987 #endif /* LINUX_VERSION_CODE */
990 static void add_to_update(struct ksplice_mod_change *change,
991 struct update *update)
993 change->update = update;
994 list_add(&change->list, &update->unused_changes);
997 static int ksplice_sysfs_init(struct update *update)
999 int ret = 0;
1000 memset(&update->kobj, 0, sizeof(update->kobj));
1001 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
1002 #ifndef KSPLICE_STANDALONE
1003 ret = kobject_init_and_add(&update->kobj, &update_ktype,
1004 ksplice_kobj, "%s", update->kid);
1005 #else /* KSPLICE_STANDALONE */
1006 ret = kobject_init_and_add(&update->kobj, &update_ktype,
1007 &THIS_MODULE->mkobj.kobj, "ksplice");
1008 #endif /* KSPLICE_STANDALONE */
1009 #else /* LINUX_VERSION_CODE < */
1010 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
1011 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
1012 if (ret != 0)
1013 return ret;
1014 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
1015 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
1016 #else /* LINUX_VERSION_CODE < */
1017 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
1018 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
1019 #endif /* LINUX_VERSION_CODE */
1020 update->kobj.ktype = &update_ktype;
1021 ret = kobject_register(&update->kobj);
1022 #endif /* LINUX_VERSION_CODE */
1023 if (ret != 0)
1024 return ret;
1025 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1026 kobject_uevent(&update->kobj, KOBJ_ADD);
1027 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1028 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1029 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1030 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
1031 #endif /* LINUX_VERSION_CODE */
1032 return 0;
1035 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1036 EXTRACT_SYMBOL(apply_paravirt);
1037 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1039 static abort_t apply_update(struct update *update)
1041 struct ksplice_mod_change *change, *n;
1042 abort_t ret;
1043 int retval;
1045 list_for_each_entry(change, &update->changes, list) {
1046 ret = create_module_list_entry(change, true);
1047 if (ret != OK)
1048 goto out;
1051 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
1052 if (strcmp(change->target_name, "vmlinux") == 0) {
1053 change->target = NULL;
1054 } else if (change->target == NULL) {
1055 change->target = find_module(change->target_name);
1056 if (change->target == NULL ||
1057 !module_is_live(change->target)) {
1058 if (!update->partial) {
1059 ret = TARGET_NOT_LOADED;
1060 goto out;
1062 ret = create_module_list_entry(change, false);
1063 if (ret != OK)
1064 goto out;
1065 continue;
1067 retval = use_module(change->new_code_mod,
1068 change->target);
1069 if (retval != 1) {
1070 ret = UNEXPECTED;
1071 goto out;
1074 ret = create_module_list_entry(change, true);
1075 if (ret != OK)
1076 goto out;
1077 list_del(&change->list);
1078 list_add_tail(&change->list, &update->changes);
1080 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1081 if (change->target == NULL) {
1082 apply_paravirt(change->new_code.parainstructions,
1083 change->new_code.parainstructions_end);
1084 apply_paravirt(change->old_code.parainstructions,
1085 change->old_code.parainstructions_end);
1087 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1090 list_for_each_entry(change, &update->changes, list) {
1091 const struct ksplice_section *sect;
1092 for (sect = change->new_code.sections;
1093 sect < change->new_code.sections_end; sect++) {
1094 struct safety_record *rec = kmalloc(sizeof(*rec),
1095 GFP_KERNEL);
1096 if (rec == NULL) {
1097 ret = OUT_OF_MEMORY;
1098 goto out;
1100 rec->addr = sect->address;
1101 rec->size = sect->size;
1102 rec->label = sect->symbol->label;
1103 list_add(&rec->list, &change->safety_records);
1107 list_for_each_entry(change, &update->changes, list) {
1108 ret = init_symbol_arrays(change);
1109 if (ret != OK) {
1110 cleanup_symbol_arrays(change);
1111 goto out;
1113 ret = prepare_change(change);
1114 cleanup_symbol_arrays(change);
1115 if (ret != OK)
1116 goto out;
1118 ret = patch_action(update, KS_APPLY);
1119 out:
1120 list_for_each_entry(change, &update->changes, list) {
1121 struct ksplice_section *s;
1122 if (update->stage == STAGE_PREPARING)
1123 clear_list(&change->safety_records,
1124 struct safety_record, list);
1125 for (s = change->old_code.sections;
1126 s < change->old_code.sections_end; s++) {
1127 if (s->match_map != NULL) {
1128 vfree(s->match_map);
1129 s->match_map = NULL;
1133 if (update->stage == STAGE_PREPARING)
1134 cleanup_module_list_entries(update);
1136 if (ret == OK)
1137 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
1138 update->kid);
1139 return ret;
1142 static abort_t reverse_update(struct update *update)
1144 abort_t ret;
1145 struct ksplice_mod_change *change;
1147 clear_debug_buf(update);
1148 ret = init_debug_buf(update);
1149 if (ret != OK)
1150 return ret;
1152 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1154 ret = patch_action(update, KS_REVERSE);
1155 if (ret != OK)
1156 return ret;
1158 list_for_each_entry(change, &update->changes, list)
1159 clear_list(&change->safety_records, struct safety_record, list);
1161 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
1162 update->kid);
1163 return OK;
1166 static int compare_symbolp_names(const void *a, const void *b)
1168 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1169 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1170 return 0;
1171 if ((*sympa)->name == NULL)
1172 return -1;
1173 if ((*sympb)->name == NULL)
1174 return 1;
1175 return strcmp((*sympa)->name, (*sympb)->name);
1178 static int compare_symbolp_labels(const void *a, const void *b)
1180 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1181 return strcmp((*sympa)->label, (*sympb)->label);
1184 static int symbolp_bsearch_compare(const void *key, const void *elt)
1186 const char *name = key;
1187 const struct ksplice_symbol *const *symp = elt;
1188 const struct ksplice_symbol *sym = *symp;
1189 if (sym->name == NULL)
1190 return 1;
1191 return strcmp(name, sym->name);
1194 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1195 const char *sym_name, unsigned long sym_val)
1197 struct ksplice_symbol **symp;
1198 abort_t ret;
1200 symp = bsearch(sym_name, lookup->arr, lookup->size,
1201 sizeof(*lookup->arr), symbolp_bsearch_compare);
1202 if (symp == NULL)
1203 return OK;
1205 while (symp > lookup->arr &&
1206 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1207 symp--;
1209 for (; symp < lookup->arr + lookup->size; symp++) {
1210 struct ksplice_symbol *sym = *symp;
1211 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1212 break;
1213 ret = add_candidate_val(lookup->change,
1214 sym->candidate_vals, sym_val);
1215 if (ret != OK)
1216 return ret;
1218 return OK;
1221 #ifdef CONFIG_KALLSYMS
1222 static int add_kallsyms_values(void *data, const char *name,
1223 struct module *owner, unsigned long val)
1225 struct ksplice_lookup *lookup = data;
1226 if (owner == lookup->change->new_code_mod ||
1227 !patches_module(owner, lookup->change->target))
1228 return (__force int)OK;
1229 return (__force int)add_matching_values(lookup, name, val);
1231 #endif /* CONFIG_KALLSYMS */
1233 static bool add_export_values(const struct symsearch *syms,
1234 struct module *owner,
1235 unsigned int symnum, void *data)
1237 struct ksplice_lookup *lookup = data;
1238 abort_t ret;
1240 ret = add_matching_values(lookup, syms->start[symnum].name,
1241 syms->start[symnum].value);
1242 if (ret != OK) {
1243 lookup->ret = ret;
1244 return true;
1246 return false;
1249 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1251 struct ksplice_symbol *sym;
1252 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1253 sym++) {
1254 if (sym->candidate_vals != NULL) {
1255 clear_list(sym->candidate_vals, struct candidate_val,
1256 list);
1257 kfree(sym->candidate_vals);
1258 sym->candidate_vals = NULL;
1261 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1262 sym++) {
1263 if (sym->candidate_vals != NULL) {
1264 clear_list(sym->candidate_vals, struct candidate_val,
1265 list);
1266 kfree(sym->candidate_vals);
1267 sym->candidate_vals = NULL;
1273 * The new_code and old_code modules each have their own independent
1274 * ksplice_symbol structures. uniquify_symbols unifies these separate
1275 * pieces of kernel symbol information by replacing all references to
1276 * the old_code copy of symbols with references to the new_code copy.
1278 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1280 struct ksplice_reloc *r;
1281 struct ksplice_section *s;
1282 struct ksplice_symbol *sym, **sym_arr, **symp;
1283 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1285 if (size == 0)
1286 return OK;
1288 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1289 if (sym_arr == NULL)
1290 return OUT_OF_MEMORY;
1292 for (symp = sym_arr, sym = change->new_code.symbols;
1293 symp < sym_arr + size && sym < change->new_code.symbols_end;
1294 sym++, symp++)
1295 *symp = sym;
1297 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1299 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1300 r++) {
1301 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1302 compare_symbolp_labels);
1303 if (symp != NULL) {
1304 if ((*symp)->name == NULL)
1305 (*symp)->name = r->symbol->name;
1306 r->symbol = *symp;
1310 for (s = change->old_code.sections; s < change->old_code.sections_end;
1311 s++) {
1312 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1313 compare_symbolp_labels);
1314 if (symp != NULL) {
1315 if ((*symp)->name == NULL)
1316 (*symp)->name = s->symbol->name;
1317 s->symbol = *symp;
1321 vfree(sym_arr);
1322 return OK;
1326 * Initialize the ksplice_symbol structures in the given array using
1327 * the kallsyms and exported symbol tables.
1329 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1330 struct ksplice_symbol *start,
1331 struct ksplice_symbol *end)
1333 struct ksplice_symbol *sym, **sym_arr, **symp;
1334 struct ksplice_lookup lookup;
1335 size_t size = end - start;
1336 abort_t ret;
1338 if (size == 0)
1339 return OK;
1341 for (sym = start; sym < end; sym++) {
1342 if (strstarts(sym->label, "__ksymtab")) {
1343 const struct kernel_symbol *ksym;
1344 const char *colon = strchr(sym->label, ':');
1345 const char *name = colon + 1;
1346 if (colon == NULL)
1347 continue;
1348 ksym = find_symbol(name, NULL, NULL, true, false);
1349 if (ksym == NULL) {
1350 ksdebug(change, "Could not find kernel_symbol "
1351 "structure for %s\n", name);
1352 continue;
1354 sym->value = (unsigned long)ksym;
1355 sym->candidate_vals = NULL;
1356 continue;
1359 sym->candidate_vals = kmalloc(sizeof(*sym->candidate_vals),
1360 GFP_KERNEL);
1361 if (sym->candidate_vals == NULL)
1362 return OUT_OF_MEMORY;
1363 INIT_LIST_HEAD(sym->candidate_vals);
1364 sym->value = 0;
1367 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1368 if (sym_arr == NULL)
1369 return OUT_OF_MEMORY;
1371 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1372 sym++, symp++)
1373 *symp = sym;
1375 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1377 lookup.change = change;
1378 lookup.arr = sym_arr;
1379 lookup.size = size;
1380 lookup.ret = OK;
1382 each_symbol(add_export_values, &lookup);
1383 ret = lookup.ret;
1384 #ifdef CONFIG_KALLSYMS
1385 if (ret == OK)
1386 ret = (__force abort_t)
1387 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1388 #endif /* CONFIG_KALLSYMS */
1389 vfree(sym_arr);
1390 return ret;
1394 * Prepare the change's ksplice_symbol structures for run-pre matching
1396 * noinline to prevent garbage on the stack from confusing check_stack
1398 static noinline abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1400 abort_t ret;
1402 ret = uniquify_symbols(change);
1403 if (ret != OK)
1404 return ret;
1406 ret = init_symbol_array(change, change->old_code.symbols,
1407 change->old_code.symbols_end);
1408 if (ret != OK)
1409 return ret;
1411 ret = init_symbol_array(change, change->new_code.symbols,
1412 change->new_code.symbols_end);
1413 if (ret != OK)
1414 return ret;
1416 return OK;
1419 /* noinline to prevent garbage on the stack from confusing check_stack */
1420 static noinline abort_t prepare_change(struct ksplice_mod_change *change)
1422 abort_t ret;
1424 ksdebug(change, "Preparing and checking %s\n", change->name);
1425 ret = match_change_sections(change, false);
1426 if (ret == NO_MATCH) {
1427 /* It is possible that by using relocations from .data sections
1428 * we can successfully run-pre match the rest of the sections.
1429 * To avoid using any symbols obtained from .data sections
1430 * (which may be unreliable) in the post code, we first prepare
1431 * the post code and then try to run-pre match the remaining
1432 * sections with the help of .data sections.
1434 ksdebug(change, "Continuing without some sections; we might "
1435 "find them later.\n");
1436 ret = finalize_change(change);
1437 if (ret != OK) {
1438 ksdebug(change, "Aborted. Unable to continue without "
1439 "the unmatched sections.\n");
1440 return ret;
1443 ksdebug(change, "run-pre: Considering .data sections to find "
1444 "the unmatched sections\n");
1445 ret = match_change_sections(change, true);
1446 if (ret != OK)
1447 return ret;
1449 ksdebug(change, "run-pre: Found all previously unmatched "
1450 "sections\n");
1451 return OK;
1452 } else if (ret != OK) {
1453 return ret;
1456 return finalize_change(change);
1460 * Finish preparing the change for insertion into the kernel.
1461 * Afterwards, the replacement code should be ready to run and the
1462 * ksplice_patches should all be ready for trampoline insertion.
1464 static abort_t finalize_change(struct ksplice_mod_change *change)
1466 abort_t ret;
1467 ret = apply_relocs(change, change->new_code.relocs,
1468 change->new_code.relocs_end);
1469 if (ret != OK)
1470 return ret;
1472 ret = finalize_patches(change);
1473 if (ret != OK)
1474 return ret;
1476 return OK;
1479 static abort_t finalize_patches(struct ksplice_mod_change *change)
1481 struct ksplice_patch *p;
1482 struct safety_record *rec;
1483 abort_t ret;
1485 for (p = change->patches; p < change->patches_end; p++) {
1486 bool found = false;
1487 list_for_each_entry(rec, &change->safety_records, list) {
1488 if (rec->addr <= p->oldaddr &&
1489 p->oldaddr < rec->addr + rec->size) {
1490 found = true;
1491 break;
1494 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1495 const struct ksplice_reloc *r = patch_reloc(change, p);
1496 if (r == NULL) {
1497 ksdebug(change, "A patch with no reloc at its "
1498 "oldaddr has no safety record\n");
1499 return NO_MATCH;
1501 ksdebug(change, "No safety record for patch with "
1502 "oldaddr %s+%lx\n", r->symbol->label,
1503 r->target_addend);
1504 return NO_MATCH;
1507 if (p->type == KSPLICE_PATCH_TEXT) {
1508 ret = prepare_trampoline(change, p);
1509 if (ret != OK)
1510 return ret;
1513 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1514 ksdebug(change, "Safety record %s is too short for "
1515 "patch\n", rec->label);
1516 return UNEXPECTED;
1519 if (p->type == KSPLICE_PATCH_TEXT) {
1520 if (p->repladdr == 0)
1521 p->repladdr = (unsigned long)ksplice_deleted;
1525 for (p = change->patches; p < change->patches_end; p++) {
1526 struct ksplice_patch *q;
1527 for (q = change->patches; q < change->patches_end; q++) {
1528 if (p != q && p->oldaddr <= q->oldaddr &&
1529 p->oldaddr + p->size > q->oldaddr) {
1530 ksdebug(change, "Overlapping oldaddrs "
1531 "for patches\n");
1532 return UNEXPECTED;
1537 return OK;
1540 /* noinline to prevent garbage on the stack from confusing check_stack */
1541 static noinline abort_t map_trampoline_pages(struct update *update)
1543 struct ksplice_mod_change *change;
1544 list_for_each_entry(change, &update->changes, list) {
1545 struct ksplice_patch *p;
1546 for (p = change->patches; p < change->patches_end; p++) {
1547 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1548 if (p->vaddr == NULL) {
1549 ksdebug(change,
1550 "Unable to map oldaddr read/write\n");
1551 unmap_trampoline_pages(update);
1552 return UNEXPECTED;
1556 return OK;
1559 static void unmap_trampoline_pages(struct update *update)
1561 struct ksplice_mod_change *change;
1562 list_for_each_entry(change, &update->changes, list) {
1563 struct ksplice_patch *p;
1564 for (p = change->patches; p < change->patches_end; p++) {
1565 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1566 p->vaddr = NULL;
1571 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) && defined(CONFIG_X86_64)
1572 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1573 #define phys_base ({EXTRACT_SYMBOL(phys_base); phys_base;})
1574 #endif /* LINUX_VERSION_CODE && CONFIG_X86_64 */
1577 * map_writable creates a shadow page mapping of the range
1578 * [addr, addr + len) so that we can write to code mapped read-only.
1580 * It is similar to a generalized version of x86's text_poke. But
1581 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1582 * map_writable to map the pages before stop_machine, then use the
1583 * mapping inside stop_machine, and unmap the pages afterwards.
1585 static void *map_writable(void *addr, size_t len)
1587 void *vaddr;
1588 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1589 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1590 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1591 int i;
1593 if (pages == NULL)
1594 return NULL;
1596 for (i = 0; i < nr_pages; i++) {
1597 if (__module_address((unsigned long)page_addr) == NULL) {
1598 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1599 pages[i] = virt_to_page(page_addr);
1600 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1601 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21
1602 * This works around a broken virt_to_page() from the RHEL 5 backport
1603 * of x86-64 relocatable kernel support.
1605 pages[i] =
1606 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1607 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1608 WARN_ON(!PageReserved(pages[i]));
1609 } else {
1610 pages[i] = vmalloc_to_page(addr);
1612 if (pages[i] == NULL) {
1613 kfree(pages);
1614 return NULL;
1616 page_addr += PAGE_SIZE;
1618 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1619 kfree(pages);
1620 if (vaddr == NULL)
1621 return NULL;
1622 return vaddr + offset_in_page(addr);
1625 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
1626 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
1627 EXTRACT_SYMBOL(__module_text_address);
1628 #endif /* LINUX_VERSION_CODE */
1631 * Ksplice adds a dependency on any symbol address used to resolve
1632 * relocations in the new_code module.
1634 * Be careful to follow_trampolines so that we always depend on the
1635 * latest version of the target function, since that's the code that
1636 * will run if we call addr.
1638 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1639 unsigned long addr)
1641 struct ksplice_mod_change *c;
1642 struct module *m =
1643 __module_text_address(follow_trampolines(change, addr));
1644 if (m == NULL)
1645 return OK;
1646 list_for_each_entry(c, &change->update->changes, list) {
1647 if (m == c->new_code_mod)
1648 return OK;
1650 if (use_module(change->new_code_mod, m) != 1)
1651 return MODULE_BUSY;
1652 return OK;
1655 static abort_t apply_relocs(struct ksplice_mod_change *change,
1656 const struct ksplice_reloc *relocs,
1657 const struct ksplice_reloc *relocs_end)
1659 const struct ksplice_reloc *r;
1660 for (r = relocs; r < relocs_end; r++) {
1661 abort_t ret = apply_reloc(change, r);
1662 if (ret != OK)
1663 return ret;
1665 return OK;
1668 static abort_t apply_reloc(struct ksplice_mod_change *change,
1669 const struct ksplice_reloc *r)
1671 switch (r->howto->type) {
1672 case KSPLICE_HOWTO_RELOC:
1673 case KSPLICE_HOWTO_RELOC_PATCH:
1674 return apply_howto_reloc(change, r);
1675 case KSPLICE_HOWTO_DATE:
1676 case KSPLICE_HOWTO_TIME:
1677 return apply_howto_date(change, r);
1678 default:
1679 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1680 return UNEXPECTED;
1685 * Applies a relocation. Aborts if the symbol referenced in it has
1686 * not been uniquely resolved.
1688 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1689 const struct ksplice_reloc *r)
1691 abort_t ret;
1692 int canary_ret;
1693 unsigned long sym_addr;
1694 LIST_HEAD(vals);
1696 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1697 if (canary_ret < 0)
1698 return UNEXPECTED;
1699 if (canary_ret == 0) {
1700 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1701 r->blank_addr, r->symbol->label, r->target_addend);
1702 return OK;
1705 #ifdef KSPLICE_STANDALONE
1706 if (!bootstrapped) {
1707 ret = add_system_map_candidates(change,
1708 change->new_code.system_map,
1709 change->new_code.system_map_end,
1710 r->symbol->label, &vals);
1711 if (ret != OK) {
1712 release_vals(&vals);
1713 return ret;
1716 #endif /* KSPLICE_STANDALONE */
1717 ret = lookup_symbol(change, r->symbol, &vals);
1718 if (ret != OK) {
1719 release_vals(&vals);
1720 return ret;
1723 * Relocations for the oldaddr fields of patches must have
1724 * been resolved via run-pre matching.
1726 if (!singular(&vals) || (r->symbol->candidate_vals != NULL &&
1727 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1728 release_vals(&vals);
1729 ksdebug(change, "Failed to find %s for reloc\n",
1730 r->symbol->label);
1731 return FAILED_TO_FIND;
1733 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1734 release_vals(&vals);
1736 ret = write_reloc_value(change, r, r->blank_addr,
1737 r->howto->pcrel ? sym_addr - r->blank_addr :
1738 sym_addr);
1739 if (ret != OK)
1740 return ret;
1742 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1743 r->symbol->label, r->target_addend, sym_addr);
1744 switch (r->howto->size) {
1745 case 1:
1746 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1747 break;
1748 case 2:
1749 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1750 break;
1751 case 4:
1752 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1753 break;
1754 #if BITS_PER_LONG >= 64
1755 case 8:
1756 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1757 break;
1758 #endif /* BITS_PER_LONG */
1759 default:
1760 ksdebug(change, "Aborted. Invalid relocation size.\n");
1761 return UNEXPECTED;
1763 #ifdef KSPLICE_STANDALONE
1764 if (!bootstrapped)
1765 return OK;
1766 #endif /* KSPLICE_STANDALONE */
1769 * Create labelvals so that we can verify our choices in the
1770 * second round of run-pre matching that considers data sections.
1772 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1773 if (ret != OK)
1774 return ret;
1776 return add_dependency_on_address(change, sym_addr);
1780 * Date relocations are created wherever __DATE__ or __TIME__ is used
1781 * in the kernel; we resolve them by simply copying in the date/time
1782 * obtained from run-pre matching the relevant compilation unit.
1784 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1785 const struct ksplice_reloc *r)
1787 if (r->symbol->candidate_vals != NULL) {
1788 ksdebug(change, "Failed to find %s for date\n",
1789 r->symbol->label);
1790 return FAILED_TO_FIND;
1792 memcpy((unsigned char *)r->blank_addr,
1793 (const unsigned char *)r->symbol->value, r->howto->size);
1794 return OK;
1798 * Given a relocation and its run address, compute the address of the
1799 * symbol the relocation referenced, and store it in *valp.
1801 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1802 const struct ksplice_reloc *r,
1803 unsigned long addr, unsigned long *valp)
1805 unsigned char bytes[sizeof(long)];
1806 unsigned long val;
1807 const struct ksplice_reloc_howto *howto = r->howto;
1809 if (howto->size <= 0 || howto->size > sizeof(long)) {
1810 ksdebug(change, "Aborted. Invalid relocation size.\n");
1811 return UNEXPECTED;
1814 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1815 return NO_MATCH;
1817 switch (howto->size) {
1818 case 1:
1819 val = *(uint8_t *)bytes;
1820 break;
1821 case 2:
1822 val = *(uint16_t *)bytes;
1823 break;
1824 case 4:
1825 val = *(uint32_t *)bytes;
1826 break;
1827 #if BITS_PER_LONG >= 64
1828 case 8:
1829 val = *(uint64_t *)bytes;
1830 break;
1831 #endif /* BITS_PER_LONG */
1832 default:
1833 ksdebug(change, "Aborted. Invalid relocation size.\n");
1834 return UNEXPECTED;
1837 val &= howto->dst_mask;
1838 if (howto->signed_addend)
1839 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1840 val <<= howto->rightshift;
1841 val -= r->insn_addend + r->target_addend;
1842 *valp = val;
1843 return OK;
1847 * Given a relocation, the address of its storage unit, and the
1848 * address of the symbol the relocation references, write the
1849 * relocation's final value into the storage unit.
1851 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1852 const struct ksplice_reloc *r,
1853 unsigned long addr, unsigned long sym_addr)
1855 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1856 const struct ksplice_reloc_howto *howto = r->howto;
1857 val >>= howto->rightshift;
1858 switch (howto->size) {
1859 case 1:
1860 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1861 (val & howto->dst_mask);
1862 break;
1863 case 2:
1864 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1865 (val & howto->dst_mask);
1866 break;
1867 case 4:
1868 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1869 (val & howto->dst_mask);
1870 break;
1871 #if BITS_PER_LONG >= 64
1872 case 8:
1873 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1874 (val & howto->dst_mask);
1875 break;
1876 #endif /* BITS_PER_LONG */
1877 default:
1878 ksdebug(change, "Aborted. Invalid relocation size.\n");
1879 return UNEXPECTED;
1882 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1883 ksdebug(change, "Aborted. Relocation overflow.\n");
1884 return UNEXPECTED;
1887 return OK;
1890 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1891 bool to_be_applied)
1893 struct ksplice_module_list_entry *entry =
1894 kmalloc(sizeof(*entry), GFP_KERNEL);
1895 if (entry == NULL)
1896 return OUT_OF_MEMORY;
1897 entry->new_code_mod_name =
1898 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1899 if (entry->new_code_mod_name == NULL) {
1900 kfree(entry);
1901 return OUT_OF_MEMORY;
1903 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1904 if (entry->target_mod_name == NULL) {
1905 kfree(entry->new_code_mod_name);
1906 kfree(entry);
1907 return OUT_OF_MEMORY;
1909 /* The update's kid is guaranteed to outlast the module_list_entry */
1910 entry->kid = change->update->kid;
1911 entry->applied = to_be_applied;
1912 list_add(&entry->update_list, &change->update->ksplice_module_list);
1913 return OK;
1916 static void cleanup_module_list_entries(struct update *update)
1918 struct ksplice_module_list_entry *entry;
1919 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1920 kfree(entry->target_mod_name);
1921 kfree(entry->new_code_mod_name);
1923 clear_list(&update->ksplice_module_list,
1924 struct ksplice_module_list_entry, update_list);
1927 /* Replacement address used for functions deleted by the patch */
1928 static void __attribute__((noreturn)) ksplice_deleted(void)
1930 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1931 BUG();
1932 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1933 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1934 for (;;);
1935 #endif
1938 /* Floodfill to run-pre match the sections within a change. */
1939 static abort_t match_change_sections(struct ksplice_mod_change *change,
1940 bool consider_data_sections)
1942 struct ksplice_section *sect;
1943 abort_t ret;
1944 int remaining = 0;
1945 bool progress;
1947 for (sect = change->old_code.sections;
1948 sect < change->old_code.sections_end; sect++) {
1949 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1950 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1951 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1952 remaining++;
1955 while (remaining > 0) {
1956 progress = false;
1957 for (sect = change->old_code.sections;
1958 sect < change->old_code.sections_end; sect++) {
1959 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1960 continue;
1961 if ((!consider_data_sections &&
1962 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1963 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1964 continue;
1965 ret = find_section(change, sect);
1966 if (ret == OK) {
1967 sect->flags |= KSPLICE_SECTION_MATCHED;
1968 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1969 remaining--;
1970 progress = true;
1971 } else if (ret != NO_MATCH) {
1972 return ret;
1976 if (progress)
1977 continue;
1979 for (sect = change->old_code.sections;
1980 sect < change->old_code.sections_end; sect++) {
1981 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1982 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1983 continue;
1984 ksdebug(change, "run-pre: could not match %s "
1985 "section %s\n",
1986 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1987 "data" :
1988 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1989 "rodata" : "text", sect->symbol->label);
1991 ksdebug(change, "Aborted. run-pre: could not match some "
1992 "sections.\n");
1993 return NO_MATCH;
1995 return OK;
1999 * Search for the section in the running kernel. Returns OK if and
2000 * only if it finds precisely one address in the kernel matching the
2001 * section.
2003 static abort_t find_section(struct ksplice_mod_change *change,
2004 struct ksplice_section *sect)
2006 int i;
2007 abort_t ret;
2008 unsigned long run_addr;
2009 LIST_HEAD(vals);
2010 struct candidate_val *v, *n;
2012 #ifdef KSPLICE_STANDALONE
2013 ret = add_system_map_candidates(change, change->old_code.system_map,
2014 change->old_code.system_map_end,
2015 sect->symbol->label, &vals);
2016 if (ret != OK) {
2017 release_vals(&vals);
2018 return ret;
2020 #endif /* KSPLICE_STANDALONE */
2021 ret = lookup_symbol(change, sect->symbol, &vals);
2022 if (ret != OK) {
2023 release_vals(&vals);
2024 return ret;
2027 ksdebug(change, "run-pre: starting sect search for %s\n",
2028 sect->symbol->label);
2030 list_for_each_entry_safe(v, n, &vals, list) {
2031 run_addr = v->val;
2033 yield();
2034 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
2035 if (ret == NO_MATCH) {
2036 list_del(&v->list);
2037 kfree(v);
2038 } else if (ret != OK) {
2039 release_vals(&vals);
2040 return ret;
2044 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2045 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2046 ret = brute_search_all(change, sect, &vals);
2047 if (ret != OK) {
2048 release_vals(&vals);
2049 return ret;
2052 * Make sure run-pre matching output is displayed if
2053 * brute_search succeeds.
2055 if (singular(&vals)) {
2056 run_addr = list_entry(vals.next, struct candidate_val,
2057 list)->val;
2058 ret = try_addr(change, sect, run_addr, NULL,
2059 RUN_PRE_INITIAL);
2060 if (ret != OK) {
2061 ksdebug(change, "run-pre: Debug run failed for "
2062 "sect %s:\n", sect->symbol->label);
2063 release_vals(&vals);
2064 return ret;
2068 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2070 if (singular(&vals)) {
2071 LIST_HEAD(safety_records);
2072 run_addr = list_entry(vals.next, struct candidate_val,
2073 list)->val;
2074 ret = try_addr(change, sect, run_addr, &safety_records,
2075 RUN_PRE_FINAL);
2076 release_vals(&vals);
2077 if (ret != OK) {
2078 clear_list(&safety_records, struct safety_record, list);
2079 ksdebug(change, "run-pre: Final run failed for sect "
2080 "%s:\n", sect->symbol->label);
2081 } else {
2082 list_splice(&safety_records, &change->safety_records);
2084 return ret;
2085 } else if (!list_empty(&vals)) {
2086 struct candidate_val *val;
2087 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
2088 sect->symbol->label);
2089 i = 0;
2090 list_for_each_entry(val, &vals, list) {
2091 i++;
2092 ksdebug(change, "%lx\n", val->val);
2093 if (i > 5) {
2094 ksdebug(change, "...\n");
2095 break;
2098 release_vals(&vals);
2099 return NO_MATCH;
2101 release_vals(&vals);
2102 return NO_MATCH;
2106 * try_addr is the the interface to run-pre matching. Its primary
2107 * purpose is to manage debugging information for run-pre matching;
2108 * all the hard work is in run_pre_cmp.
2110 static abort_t try_addr(struct ksplice_mod_change *change,
2111 struct ksplice_section *sect,
2112 unsigned long run_addr,
2113 struct list_head *safety_records,
2114 enum run_pre_mode mode)
2116 abort_t ret;
2117 const struct module *run_module = __module_address(run_addr);
2119 if (run_module == change->new_code_mod) {
2120 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2121 "module %s for sect %s\n", run_addr, run_module->name,
2122 sect->symbol->label);
2123 return UNEXPECTED;
2125 if (!patches_module(run_module, change->target)) {
2126 ksdebug(change, "run-pre: ignoring address %lx in other module "
2127 "%s for sect %s\n", run_addr, run_module == NULL ?
2128 "vmlinux" : run_module->name, sect->symbol->label);
2129 return NO_MATCH;
2132 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2133 if (ret != OK)
2134 return ret;
2136 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2137 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2138 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2139 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2140 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2141 mode);
2142 else
2143 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2144 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2145 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2146 set_temp_labelvals(change, NOVAL);
2147 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2148 "p_a=%lx s=%lx)\n",
2149 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2150 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2151 "text", sect->symbol->label, run_addr, sect->address,
2152 sect->size);
2153 ksdebug(change, "run-pre: ");
2154 if (change->update->debug >= 1) {
2155 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2156 ret = run_pre_cmp(change, sect, run_addr,
2157 safety_records, RUN_PRE_DEBUG);
2158 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2159 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2160 ret = arch_run_pre_cmp(change, sect, run_addr,
2161 safety_records,
2162 RUN_PRE_DEBUG);
2163 else
2164 ret = run_pre_cmp(change, sect, run_addr,
2165 safety_records,
2166 RUN_PRE_DEBUG);
2167 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2168 set_temp_labelvals(change, NOVAL);
2170 ksdebug(change, "\n");
2171 return ret;
2172 } else if (ret != OK) {
2173 set_temp_labelvals(change, NOVAL);
2174 return ret;
2177 if (mode != RUN_PRE_FINAL) {
2178 set_temp_labelvals(change, NOVAL);
2179 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2180 sect->symbol->label, run_addr);
2181 return OK;
2184 set_temp_labelvals(change, VAL);
2185 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2186 run_addr);
2187 return OK;
2191 * run_pre_cmp is the primary run-pre matching function; it determines
2192 * whether the given ksplice_section matches the code or data in the
2193 * running kernel starting at run_addr.
2195 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2196 * section is created.
2198 * The run_pre_mode is also used to determine what debugging
2199 * information to display.
2201 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2202 const struct ksplice_section *sect,
2203 unsigned long run_addr,
2204 struct list_head *safety_records,
2205 enum run_pre_mode mode)
2207 int matched = 0;
2208 abort_t ret;
2209 const struct ksplice_reloc *r, *finger;
2210 const unsigned char *pre, *run, *pre_start, *run_start;
2211 unsigned char runval;
2213 pre_start = (const unsigned char *)sect->address;
2214 run_start = (const unsigned char *)run_addr;
2216 finger = init_reloc_search(change, sect);
2218 pre = pre_start;
2219 run = run_start;
2220 while (pre < pre_start + sect->size) {
2221 unsigned long offset = pre - pre_start;
2222 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2223 if (ret == OK) {
2224 ret = handle_reloc(change, sect, r, (unsigned long)run,
2225 mode);
2226 if (ret != OK) {
2227 if (mode == RUN_PRE_INITIAL)
2228 ksdebug(change, "reloc in sect does "
2229 "not match after %lx/%lx "
2230 "bytes\n", offset, sect->size);
2231 return ret;
2233 if (mode == RUN_PRE_DEBUG)
2234 print_bytes(change, run, r->howto->size, pre,
2235 r->howto->size);
2236 pre += r->howto->size;
2237 run += r->howto->size;
2238 finger++;
2239 continue;
2240 } else if (ret != NO_MATCH) {
2241 return ret;
2244 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2245 ret = handle_paravirt(change, (unsigned long)pre,
2246 (unsigned long)run, &matched);
2247 if (ret != OK)
2248 return ret;
2249 if (matched != 0) {
2250 if (mode == RUN_PRE_DEBUG)
2251 print_bytes(change, run, matched, pre,
2252 matched);
2253 pre += matched;
2254 run += matched;
2255 continue;
2259 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2260 if (mode == RUN_PRE_INITIAL)
2261 ksdebug(change, "sect unmapped after %lx/%lx "
2262 "bytes\n", offset, sect->size);
2263 return NO_MATCH;
2266 if (runval != *pre &&
2267 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2268 if (mode == RUN_PRE_INITIAL)
2269 ksdebug(change, "sect does not match after "
2270 "%lx/%lx bytes\n", offset, sect->size);
2271 if (mode == RUN_PRE_DEBUG) {
2272 print_bytes(change, run, 1, pre, 1);
2273 ksdebug(change, "[p_o=%lx] ! ", offset);
2274 print_bytes(change, run + 1, 2, pre + 1, 2);
2276 return NO_MATCH;
2278 if (mode == RUN_PRE_DEBUG)
2279 print_bytes(change, run, 1, pre, 1);
2280 pre++;
2281 run++;
2283 return create_safety_record(change, sect, safety_records, run_addr,
2284 run - run_start);
2287 static void print_bytes(struct ksplice_mod_change *change,
2288 const unsigned char *run, int runc,
2289 const unsigned char *pre, int prec)
2291 int o;
2292 int matched = min(runc, prec);
2293 for (o = 0; o < matched; o++) {
2294 if (run[o] == pre[o])
2295 ksdebug(change, "%02x ", run[o]);
2296 else
2297 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2299 for (o = matched; o < runc; o++)
2300 ksdebug(change, "%02x/ ", run[o]);
2301 for (o = matched; o < prec; o++)
2302 ksdebug(change, "/%02x ", pre[o]);
2305 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2306 static abort_t brute_search(struct ksplice_mod_change *change,
2307 struct ksplice_section *sect,
2308 const void *start, unsigned long len,
2309 struct list_head *vals)
2311 unsigned long addr;
2312 char run, pre;
2313 abort_t ret;
2315 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2316 addr++) {
2317 if (addr % 100000 == 0)
2318 yield();
2320 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2321 return OK;
2323 pre = *(const unsigned char *)(sect->address);
2325 if (run != pre)
2326 continue;
2328 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2329 if (ret == OK) {
2330 ret = add_candidate_val(change, vals, addr);
2331 if (ret != OK)
2332 return ret;
2333 } else if (ret != NO_MATCH) {
2334 return ret;
2338 return OK;
2341 extern struct list_head modules;
2342 EXTRACT_SYMBOL(modules);
2343 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
2344 /* 3abf024d2abb79614d8c4cb25a70d5596f77d0ad was after 2.6.24 */
2345 EXTRACT_SYMBOL(init_mm);
2346 #endif /* LINUX_VERSION_CODE */
2348 static abort_t brute_search_all(struct ksplice_mod_change *change,
2349 struct ksplice_section *sect,
2350 struct list_head *vals)
2352 struct module *m;
2353 abort_t ret = OK;
2354 int saved_debug;
2356 ksdebug(change, "brute_search: searching for %s\n",
2357 sect->symbol->label);
2358 saved_debug = change->update->debug;
2359 change->update->debug = 0;
2361 list_for_each_entry(m, &modules, list) {
2362 if (!patches_module(m, change->target) ||
2363 m == change->new_code_mod)
2364 continue;
2365 ret = brute_search(change, sect, m->module_core, m->core_size,
2366 vals);
2367 if (ret != OK)
2368 goto out;
2369 ret = brute_search(change, sect, m->module_init, m->init_size,
2370 vals);
2371 if (ret != OK)
2372 goto out;
2375 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2376 init_mm.end_code - init_mm.start_code, vals);
2378 out:
2379 change->update->debug = saved_debug;
2380 return ret;
2382 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2384 struct range {
2385 unsigned long address;
2386 unsigned long size;
2389 static int reloc_bsearch_compare(const void *key, const void *elt)
2391 const struct range *range = key;
2392 const struct ksplice_reloc *r = elt;
2393 if (range->address + range->size <= r->blank_addr)
2394 return -1;
2395 if (range->address > r->blank_addr)
2396 return 1;
2397 return 0;
2400 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2401 const struct ksplice_reloc *end,
2402 unsigned long address,
2403 unsigned long size)
2405 const struct ksplice_reloc *r;
2406 struct range range = { address, size };
2407 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2408 reloc_bsearch_compare);
2409 if (r == NULL)
2410 return NULL;
2411 while (r > start && (r - 1)->blank_addr >= address)
2412 r--;
2413 return r;
2416 static const struct ksplice_reloc *
2417 init_reloc_search(struct ksplice_mod_change *change,
2418 const struct ksplice_section *sect)
2420 const struct ksplice_reloc *r;
2421 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2422 sect->address, sect->size);
2423 if (r == NULL)
2424 return change->old_code.relocs_end;
2425 return r;
2429 * lookup_reloc implements an amortized O(1) lookup for the next
2430 * old_code relocation. It must be called with a strictly increasing
2431 * sequence of addresses.
2433 * The fingerp is private data for lookup_reloc, and needs to have
2434 * been initialized as a pointer to the result of find_reloc (or
2435 * init_reloc_search).
2437 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2438 const struct ksplice_reloc **fingerp,
2439 unsigned long addr,
2440 const struct ksplice_reloc **relocp)
2442 const struct ksplice_reloc *r = *fingerp;
2443 int canary_ret;
2445 while (r < change->old_code.relocs_end &&
2446 addr >= r->blank_addr + r->howto->size &&
2447 !(addr == r->blank_addr && r->howto->size == 0))
2448 r++;
2449 *fingerp = r;
2450 if (r == change->old_code.relocs_end)
2451 return NO_MATCH;
2452 if (addr < r->blank_addr)
2453 return NO_MATCH;
2454 *relocp = r;
2455 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2456 return OK;
2458 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2459 if (canary_ret < 0)
2460 return UNEXPECTED;
2461 if (canary_ret == 0) {
2462 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2463 "(altinstr)\n", r->blank_addr, r->symbol->label,
2464 r->target_addend);
2465 return NO_MATCH;
2467 if (addr != r->blank_addr) {
2468 ksdebug(change, "Invalid nonzero relocation offset\n");
2469 return UNEXPECTED;
2471 return OK;
2474 static abort_t handle_howto_symbol(struct ksplice_mod_change *change,
2475 const struct ksplice_reloc *r,
2476 unsigned long run_addr,
2477 enum run_pre_mode mode)
2479 if (mode == RUN_PRE_INITIAL)
2480 ksdebug(change, "run-pre: symbol %s at %lx\n", r->symbol->label,
2481 run_addr);
2482 return create_labelval(change, r->symbol, run_addr, TEMP);
2485 static abort_t handle_reloc(struct ksplice_mod_change *change,
2486 const struct ksplice_section *sect,
2487 const struct ksplice_reloc *r,
2488 unsigned long run_addr, enum run_pre_mode mode)
2490 switch (r->howto->type) {
2491 case KSPLICE_HOWTO_RELOC:
2492 return handle_howto_reloc(change, sect, r, run_addr, mode);
2493 case KSPLICE_HOWTO_DATE:
2494 case KSPLICE_HOWTO_TIME:
2495 return handle_howto_date(change, sect, r, run_addr, mode);
2496 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2497 #ifdef CONFIG_BUG
2498 case KSPLICE_HOWTO_BUG:
2499 return handle_bug(change, r, run_addr);
2500 #endif /* CONFIG_BUG */
2501 #else /* LINUX_VERSION_CODE < */
2502 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2503 #endif /* LINUX_VERSION_CODE */
2504 case KSPLICE_HOWTO_EXTABLE:
2505 return handle_extable(change, r, run_addr);
2506 case KSPLICE_HOWTO_SYMBOL:
2507 return handle_howto_symbol(change, r, run_addr, mode);
2508 default:
2509 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2510 return UNEXPECTED;
2515 * For date/time relocations, we check that the sequence of bytes
2516 * matches the format of a date or time.
2518 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2519 const struct ksplice_section *sect,
2520 const struct ksplice_reloc *r,
2521 unsigned long run_addr, enum run_pre_mode mode)
2523 abort_t ret;
2524 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2526 if (buf == NULL)
2527 return OUT_OF_MEMORY;
2528 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2529 ret = NO_MATCH;
2530 goto out;
2533 switch (r->howto->type) {
2534 case KSPLICE_HOWTO_TIME:
2535 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2536 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2537 isdigit(buf[6]) && isdigit(buf[7]))
2538 ret = OK;
2539 else
2540 ret = NO_MATCH;
2541 break;
2542 case KSPLICE_HOWTO_DATE:
2543 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2544 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2545 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2546 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2547 ret = OK;
2548 else
2549 ret = NO_MATCH;
2550 break;
2551 default:
2552 ret = UNEXPECTED;
2554 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2555 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2556 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2557 r->howto->size, buf);
2559 if (ret != OK)
2560 goto out;
2561 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2562 out:
2563 kfree(buf);
2564 return ret;
2568 * Extract the value of a symbol used in a relocation in the pre code
2569 * during run-pre matching, giving an error if it conflicts with a
2570 * previously found value of that symbol
2572 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2573 const struct ksplice_section *sect,
2574 const struct ksplice_reloc *r,
2575 unsigned long run_addr,
2576 enum run_pre_mode mode)
2578 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2579 unsigned long offset = r->target_addend;
2580 unsigned long val;
2581 abort_t ret;
2583 ret = read_reloc_value(change, r, run_addr, &val);
2584 if (ret != OK)
2585 return ret;
2586 if (r->howto->pcrel)
2587 val += run_addr;
2589 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
2590 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2592 } else if (offset < 0 || offset >= sym_sect->size) {
2593 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2594 sect->symbol->label, r->blank_addr - sect->address,
2595 r->symbol->label, offset);
2596 return NO_MATCH;
2597 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2598 sym_sect->match_map[offset] =
2599 (const unsigned char *)r->symbol->value + offset;
2600 sym_sect->unmatched++;
2601 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2602 r->symbol->value + offset) {
2604 } else if (sect == sym_sect) {
2605 ksdebug(change, "Relocations to nonmatching locations within "
2606 "section %s: %lx does not match %lx\n",
2607 sect->symbol->label, offset,
2608 (unsigned long)sect->match_map[offset] -
2609 r->symbol->value);
2610 return NO_MATCH;
2611 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2612 if (mode == RUN_PRE_INITIAL)
2613 ksdebug(change, "Delaying matching of %s due to reloc "
2614 "from to unmatching section: %s+%lx\n",
2615 sect->symbol->label, r->symbol->label, offset);
2616 return NO_MATCH;
2617 } else if (sym_sect->match_map[offset] == NULL) {
2618 if (mode == RUN_PRE_INITIAL)
2619 ksdebug(change, "Relocation not to instruction "
2620 "boundary: %s+%lx -> %s+%lx",
2621 sect->symbol->label, r->blank_addr -
2622 sect->address, r->symbol->label, offset);
2623 return NO_MATCH;
2624 } else if ((unsigned long)sym_sect->match_map[offset] !=
2625 r->symbol->value + offset) {
2626 if (mode == RUN_PRE_INITIAL)
2627 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2628 r->symbol->label, offset,
2629 r->symbol->value + offset,
2630 (unsigned long)sym_sect->match_map[offset]);
2631 val += r->symbol->value + offset -
2632 (unsigned long)sym_sect->match_map[offset];
2634 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
2636 if (mode == RUN_PRE_INITIAL)
2637 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2638 "found %s = %lx\n", run_addr, r->blank_addr,
2639 r->symbol->label, offset, r->symbol->label, val);
2641 if (contains_canary(change, run_addr, r->howto) != 0) {
2642 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2643 "\n", run_addr);
2644 return UNEXPECTED;
2647 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2648 sect->symbol == r->symbol)
2649 return OK;
2650 ret = create_labelval(change, r->symbol, val, TEMP);
2651 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2652 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2653 "%s = %lx does not match expected %lx\n", run_addr,
2654 r->blank_addr, r->symbol->label, r->symbol->value, val);
2656 if (ret != OK)
2657 return ret;
2658 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2659 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2660 if (mode == RUN_PRE_INITIAL)
2661 ksdebug(change, "Recursively comparing string section "
2662 "%s\n", sym_sect->symbol->label);
2663 else if (mode == RUN_PRE_DEBUG)
2664 ksdebug(change, "[str start] ");
2665 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2666 if (mode == RUN_PRE_DEBUG)
2667 ksdebug(change, "[str end] ");
2668 if (ret == OK && mode == RUN_PRE_INITIAL)
2669 ksdebug(change, "Successfully matched string section %s"
2670 "\n", sym_sect->symbol->label);
2671 else if (mode == RUN_PRE_INITIAL)
2672 ksdebug(change, "Failed to match string section %s\n",
2673 sym_sect->symbol->label);
2675 return ret;
2678 #ifdef CONFIG_GENERIC_BUG
2679 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2680 EXTRACT_SYMBOL(find_bug);
2681 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2682 static abort_t handle_bug(struct ksplice_mod_change *change,
2683 const struct ksplice_reloc *r, unsigned long run_addr)
2685 const struct bug_entry *run_bug = find_bug(run_addr);
2686 struct ksplice_section *bug_sect = symbol_section(change, r->symbol);
2687 if (run_bug == NULL)
2688 return NO_MATCH;
2689 if (bug_sect == NULL)
2690 return UNEXPECTED;
2691 return create_labelval(change, bug_sect->symbol, (unsigned long)run_bug,
2692 TEMP);
2694 #endif /* CONFIG_GENERIC_BUG */
2696 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2697 EXTRACT_SYMBOL(search_exception_tables);
2698 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2700 static abort_t handle_extable(struct ksplice_mod_change *change,
2701 const struct ksplice_reloc *r,
2702 unsigned long run_addr)
2704 const struct exception_table_entry *run_ent =
2705 search_exception_tables(run_addr);
2706 struct ksplice_section *ex_sect = symbol_section(change, r->symbol);
2707 if (run_ent == NULL)
2708 return NO_MATCH;
2709 if (ex_sect == NULL)
2710 return UNEXPECTED;
2711 return create_labelval(change, ex_sect->symbol, (unsigned long)run_ent,
2712 TEMP);
2715 static int symbol_section_bsearch_compare(const void *a, const void *b)
2717 const struct ksplice_symbol *sym = a;
2718 const struct ksplice_section *sect = b;
2719 return strcmp(sym->label, sect->symbol->label);
2722 static int compare_section_labels(const void *va, const void *vb)
2724 const struct ksplice_section *a = va, *b = vb;
2725 return strcmp(a->symbol->label, b->symbol->label);
2728 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2729 const struct ksplice_symbol *sym)
2731 return bsearch(sym, change->old_code.sections,
2732 change->old_code.sections_end -
2733 change->old_code.sections,
2734 sizeof(struct ksplice_section),
2735 symbol_section_bsearch_compare);
2738 /* Find the relocation for the oldaddr of a ksplice_patch */
2739 static const struct ksplice_reloc *
2740 patch_reloc(struct ksplice_mod_change *change,
2741 const struct ksplice_patch *p)
2743 unsigned long addr = (unsigned long)&p->oldaddr;
2744 const struct ksplice_reloc *r =
2745 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2746 addr, sizeof(addr));
2747 if (r == NULL || r->blank_addr < addr ||
2748 r->blank_addr >= addr + sizeof(addr))
2749 return NULL;
2750 return r;
2754 * Populates vals with the possible values for ksym from the various
2755 * sources Ksplice uses to resolve symbols
2757 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2758 const struct ksplice_symbol *ksym,
2759 struct list_head *vals)
2761 abort_t ret;
2763 #ifdef KSPLICE_STANDALONE
2764 if (!bootstrapped)
2765 return OK;
2766 #endif /* KSPLICE_STANDALONE */
2768 if (ksym->candidate_vals == NULL) {
2769 release_vals(vals);
2770 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2771 ksym->value);
2772 return add_candidate_val(change, vals, ksym->value);
2775 #ifdef CONFIG_MODULE_UNLOAD
2776 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2777 && change->target->exit != NULL) {
2778 ret = add_candidate_val(change, vals,
2779 (unsigned long)change->target->exit);
2780 if (ret != OK)
2781 return ret;
2783 #endif
2785 if (ksym->name != NULL) {
2786 struct candidate_val *val;
2787 list_for_each_entry(val, ksym->candidate_vals, list) {
2788 ret = add_candidate_val(change, vals, val->val);
2789 if (ret != OK)
2790 return ret;
2793 ret = new_export_lookup(change, ksym->name, vals);
2794 if (ret != OK)
2795 return ret;
2798 return OK;
2801 #ifdef KSPLICE_STANDALONE
2802 static abort_t
2803 add_system_map_candidates(struct ksplice_mod_change *change,
2804 const struct ksplice_system_map *start,
2805 const struct ksplice_system_map *end,
2806 const char *label, struct list_head *vals)
2808 abort_t ret;
2809 long off;
2810 int i;
2811 const struct ksplice_system_map *smap;
2813 /* Some Fedora kernel releases have System.map files whose symbol
2814 * addresses disagree with the running kernel by a constant address
2815 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2816 * values used to compile these kernels. This constant address offset
2817 * is always a multiple of 0x100000.
2819 * If we observe an offset that is NOT a multiple of 0x100000, then the
2820 * user provided us with an incorrect System.map file, and we should
2821 * abort.
2822 * If we observe an offset that is a multiple of 0x100000, then we can
2823 * adjust the System.map address values accordingly and proceed.
2825 off = (unsigned long)printk - change->map_printk;
2826 if (off & 0xfffff) {
2827 ksdebug(change,
2828 "Aborted. System.map does not match kernel.\n");
2829 return BAD_SYSTEM_MAP;
2832 smap = bsearch(label, start, end - start, sizeof(*smap),
2833 system_map_bsearch_compare);
2834 if (smap == NULL)
2835 return OK;
2837 for (i = 0; i < smap->nr_candidates; i++) {
2838 ret = add_candidate_val(change, vals,
2839 smap->candidates[i] + off);
2840 if (ret != OK)
2841 return ret;
2843 return OK;
2846 static int system_map_bsearch_compare(const void *key, const void *elt)
2848 const struct ksplice_system_map *map = elt;
2849 const char *label = key;
2850 return strcmp(label, map->label);
2852 #endif /* !KSPLICE_STANDALONE */
2855 * An update could one module to export a symbol and at the same time
2856 * change another module to use that symbol. This violates the normal
2857 * situation where the changes can be handled independently.
2859 * new_export_lookup obtains symbol values from the changes to the
2860 * exported symbol table made by other changes.
2862 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2863 const char *name, struct list_head *vals)
2865 struct ksplice_mod_change *change;
2866 struct ksplice_patch *p;
2867 list_for_each_entry(change, &ichange->update->changes, list) {
2868 for (p = change->patches; p < change->patches_end; p++) {
2869 const struct kernel_symbol *sym;
2870 const struct ksplice_reloc *r;
2871 if (p->type != KSPLICE_PATCH_EXPORT ||
2872 strcmp(name, *(const char **)p->contents) != 0)
2873 continue;
2875 /* Check that the p->oldaddr reloc has been resolved. */
2876 r = patch_reloc(change, p);
2877 if (r == NULL ||
2878 contains_canary(change, r->blank_addr,
2879 r->howto) != 0)
2880 continue;
2881 sym = (const struct kernel_symbol *)r->symbol->value;
2884 * Check that the sym->value reloc has been resolved,
2885 * if there is a Ksplice relocation there.
2887 r = find_reloc(change->new_code.relocs,
2888 change->new_code.relocs_end,
2889 (unsigned long)&sym->value,
2890 sizeof(&sym->value));
2891 if (r != NULL &&
2892 r->blank_addr == (unsigned long)&sym->value &&
2893 contains_canary(change, r->blank_addr,
2894 r->howto) != 0)
2895 continue;
2896 return add_candidate_val(ichange, vals, sym->value);
2899 return OK;
2902 #ifdef KSPLICE_STANDALONE
2903 EXTRACT_SYMBOL(bust_spinlocks);
2904 #endif /* KSPLICE_STANDALONE */
2907 * When patch_action is called, the update should be fully prepared.
2908 * patch_action will try to actually insert or remove trampolines for
2909 * the update.
2911 static abort_t patch_action(struct update *update, enum ksplice_action action)
2913 static int (*const __patch_actions[KS_ACTIONS])(void *) = {
2914 [KS_APPLY] = __apply_patches,
2915 [KS_REVERSE] = __reverse_patches,
2917 int i;
2918 abort_t ret;
2919 struct ksplice_mod_change *change;
2921 ret = map_trampoline_pages(update);
2922 if (ret != OK)
2923 return ret;
2925 list_for_each_entry(change, &update->changes, list) {
2926 const typeof(int (*)(void)) *f;
2927 for (f = change->hooks[action].pre;
2928 f < change->hooks[action].pre_end; f++) {
2929 if ((*f)() != 0) {
2930 ret = CALL_FAILED;
2931 goto out;
2936 for (i = 0; i < 5; i++) {
2937 cleanup_conflicts(update);
2938 #ifdef KSPLICE_STANDALONE
2939 bust_spinlocks(1);
2940 #endif /* KSPLICE_STANDALONE */
2941 ret = (__force abort_t)stop_machine(__patch_actions[action],
2942 update, NULL);
2943 #ifdef KSPLICE_STANDALONE
2944 bust_spinlocks(0);
2945 #endif /* KSPLICE_STANDALONE */
2946 if (ret != CODE_BUSY)
2947 break;
2948 set_current_state(TASK_INTERRUPTIBLE);
2949 schedule_timeout(msecs_to_jiffies(1000));
2951 out:
2952 unmap_trampoline_pages(update);
2954 if (ret == CODE_BUSY) {
2955 print_conflicts(update);
2956 _ksdebug(update, "Aborted %s. stack check: to-be-%s "
2957 "code is busy.\n", update->kid,
2958 action == KS_APPLY ? "replaced" : "reversed");
2959 } else if (ret == ALREADY_REVERSED) {
2960 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2961 "reversed.\n", update->kid, update->kid);
2962 } else if (ret == MODULE_BUSY) {
2963 _ksdebug(update, "Update %s is in use by another module\n",
2964 update->kid);
2967 if (ret != OK) {
2968 list_for_each_entry(change, &update->changes, list) {
2969 const typeof(void (*)(void)) *f;
2970 for (f = change->hooks[action].fail;
2971 f < change->hooks[action].fail_end; f++)
2972 (*f)();
2975 return ret;
2978 list_for_each_entry(change, &update->changes, list) {
2979 const typeof(void (*)(void)) *f;
2980 for (f = change->hooks[action].post;
2981 f < change->hooks[action].post_end; f++)
2982 (*f)();
2985 _ksdebug(update, "Atomic patch %s for %s complete\n",
2986 action == KS_APPLY ? "insertion" : "removal", update->kid);
2987 return OK;
2990 /* Atomically insert the update; run from within stop_machine */
2991 static int __apply_patches(void *updateptr)
2993 struct update *update = updateptr;
2994 struct ksplice_mod_change *change;
2995 struct ksplice_module_list_entry *entry;
2996 struct ksplice_patch *p;
2997 abort_t ret;
2999 if (update->stage == STAGE_APPLIED)
3000 return (__force int)OK;
3002 if (update->stage != STAGE_PREPARING)
3003 return (__force int)UNEXPECTED;
3005 ret = check_each_task(update);
3006 if (ret != OK)
3007 return (__force int)ret;
3009 list_for_each_entry(change, &update->changes, list) {
3010 if (try_module_get(change->new_code_mod) != 1) {
3011 struct ksplice_mod_change *change1;
3012 list_for_each_entry(change1, &update->changes, list) {
3013 if (change1 == change)
3014 break;
3015 module_put(change1->new_code_mod);
3017 module_put(THIS_MODULE);
3018 return (__force int)UNEXPECTED;
3022 list_for_each_entry(change, &update->changes, list) {
3023 const typeof(int (*)(void)) *f;
3024 for (f = change->hooks[KS_APPLY].check;
3025 f < change->hooks[KS_APPLY].check_end; f++) {
3026 if ((*f)() != 0)
3027 return (__force int)CALL_FAILED;
3031 /* Commit point: the update application will succeed. */
3033 update->stage = STAGE_APPLIED;
3034 #ifdef TAINT_KSPLICE
3035 add_taint(TAINT_KSPLICE);
3036 #endif
3038 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3039 list_add(&entry->list, &ksplice_modules);
3041 list_for_each_entry(change, &update->changes, list) {
3042 for (p = change->patches; p < change->patches_end; p++)
3043 insert_trampoline(p);
3046 list_for_each_entry(change, &update->changes, list) {
3047 const typeof(void (*)(void)) *f;
3048 for (f = change->hooks[KS_APPLY].intra;
3049 f < change->hooks[KS_APPLY].intra_end; f++)
3050 (*f)();
3053 return (__force int)OK;
3056 /* Atomically remove the update; run from within stop_machine */
3057 static int __reverse_patches(void *updateptr)
3059 struct update *update = updateptr;
3060 struct ksplice_mod_change *change;
3061 struct ksplice_module_list_entry *entry;
3062 const struct ksplice_patch *p;
3063 abort_t ret;
3065 if (update->stage != STAGE_APPLIED)
3066 return (__force int)OK;
3068 #ifdef CONFIG_MODULE_UNLOAD
3069 list_for_each_entry(change, &update->changes, list) {
3070 if (module_refcount(change->new_code_mod) != 1)
3071 return (__force int)MODULE_BUSY;
3073 #endif /* CONFIG_MODULE_UNLOAD */
3075 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
3076 if (!entry->applied &&
3077 find_module(entry->target_mod_name) != NULL)
3078 return COLD_UPDATE_LOADED;
3081 ret = check_each_task(update);
3082 if (ret != OK)
3083 return (__force int)ret;
3085 list_for_each_entry(change, &update->changes, list) {
3086 for (p = change->patches; p < change->patches_end; p++) {
3087 ret = verify_trampoline(change, p);
3088 if (ret != OK)
3089 return (__force int)ret;
3093 list_for_each_entry(change, &update->changes, list) {
3094 const typeof(int (*)(void)) *f;
3095 for (f = change->hooks[KS_REVERSE].check;
3096 f < change->hooks[KS_REVERSE].check_end; f++) {
3097 if ((*f)() != 0)
3098 return (__force int)CALL_FAILED;
3102 /* Commit point: the update reversal will succeed. */
3104 update->stage = STAGE_REVERSED;
3106 list_for_each_entry(change, &update->changes, list)
3107 module_put(change->new_code_mod);
3109 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3110 list_del(&entry->list);
3112 list_for_each_entry(change, &update->changes, list) {
3113 const typeof(void (*)(void)) *f;
3114 for (f = change->hooks[KS_REVERSE].intra;
3115 f < change->hooks[KS_REVERSE].intra_end; f++)
3116 (*f)();
3119 list_for_each_entry(change, &update->changes, list) {
3120 for (p = change->patches; p < change->patches_end; p++)
3121 remove_trampoline(p);
3124 return (__force int)OK;
3127 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3128 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3129 EXTRACT_SYMBOL(tasklist_lock);
3130 #endif /* LINUX_VERSION_CODE */
3133 * Check whether any thread's instruction pointer or any address of
3134 * its stack is contained in one of the safety_records associated with
3135 * the update.
3137 * check_each_task must be called from inside stop_machine, because it
3138 * does not take tasklist_lock (which cannot be held by anyone else
3139 * during stop_machine).
3141 static abort_t check_each_task(struct update *update)
3143 const struct task_struct *g, *p;
3144 abort_t status = OK, ret;
3145 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3146 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3147 read_lock(&tasklist_lock);
3148 #endif /* LINUX_VERSION_CODE */
3149 do_each_thread(g, p) {
3150 /* do_each_thread is a double loop! */
3151 ret = check_task(update, p, false);
3152 if (ret != OK) {
3153 check_task(update, p, true);
3154 status = ret;
3156 if (ret != OK && ret != CODE_BUSY)
3157 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3158 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3159 goto out;
3160 #else /* LINUX_VERSION_CODE < */
3161 return ret;
3162 #endif /* LINUX_VERSION_CODE */
3163 } while_each_thread(g, p);
3164 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3165 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3166 out:
3167 read_unlock(&tasklist_lock);
3168 #endif /* LINUX_VERSION_CODE */
3169 return status;
3172 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3173 EXTRACT_SYMBOL(task_curr);
3174 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3176 static abort_t check_task(struct update *update,
3177 const struct task_struct *t, bool rerun)
3179 abort_t status, ret;
3180 struct conflict *conf = NULL;
3182 if (rerun) {
3183 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3184 if (conf == NULL)
3185 return OUT_OF_MEMORY;
3186 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3187 if (conf->process_name == NULL) {
3188 kfree(conf);
3189 return OUT_OF_MEMORY;
3191 conf->pid = t->pid;
3192 INIT_LIST_HEAD(&conf->stack);
3193 list_add(&conf->list, &update->conflicts);
3196 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,19)
3197 if (t->state == TASK_DEAD)
3198 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3199 /* c394cc9fbb367f87faa2228ec2eabacd2d4701c6 was after 2.6.18 */
3200 if ((t->flags & PF_DEAD) != 0)
3201 #endif
3202 return OK;
3204 status = check_address(update, conf, KSPLICE_IP(t));
3206 ret = check_address(update, conf,
3207 (unsigned long)task_thread_info(t)->
3208 restart_block.fn);
3209 if (status == OK)
3210 status = ret;
3212 if (t == current) {
3213 ret = check_stack(update, conf, task_thread_info(t),
3214 (unsigned long *)__builtin_frame_address(0));
3215 if (status == OK)
3216 status = ret;
3217 } else if (!task_curr(t)) {
3218 ret = check_stack(update, conf, task_thread_info(t),
3219 (unsigned long *)KSPLICE_SP(t));
3220 if (status == OK)
3221 status = ret;
3222 } else if (!is_stop_machine(t)) {
3223 status = UNEXPECTED_RUNNING_TASK;
3225 return status;
3228 static abort_t check_stack(struct update *update, struct conflict *conf,
3229 const struct thread_info *tinfo,
3230 const unsigned long *stack)
3232 abort_t status = OK, ret;
3233 unsigned long addr;
3235 while (valid_stack_ptr(tinfo, stack)) {
3236 addr = *stack++;
3237 ret = check_address(update, conf, addr);
3238 if (ret != OK)
3239 status = ret;
3241 return status;
3244 static abort_t check_address(struct update *update,
3245 struct conflict *conf, unsigned long addr)
3247 abort_t status = OK, ret;
3248 const struct safety_record *rec;
3249 struct ksplice_mod_change *change;
3250 struct conflict_addr *ca = NULL;
3252 if (conf != NULL) {
3253 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3254 if (ca == NULL)
3255 return OUT_OF_MEMORY;
3256 ca->addr = addr;
3257 ca->has_conflict = false;
3258 ca->label = NULL;
3259 list_add(&ca->list, &conf->stack);
3262 list_for_each_entry(change, &update->changes, list) {
3263 unsigned long tramp_addr = follow_trampolines(change, addr);
3264 list_for_each_entry(rec, &change->safety_records, list) {
3265 ret = check_record(ca, rec, tramp_addr);
3266 if (ret != OK)
3267 status = ret;
3270 return status;
3273 static abort_t check_record(struct conflict_addr *ca,
3274 const struct safety_record *rec, unsigned long addr)
3276 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3277 if (ca != NULL) {
3278 ca->label = rec->label;
3279 ca->has_conflict = true;
3281 return CODE_BUSY;
3283 return OK;
3286 /* Is the task one of the stop_machine tasks? */
3287 static bool is_stop_machine(const struct task_struct *t)
3289 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3290 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3291 const char *kstop_prefix = "kstop/";
3292 #else /* LINUX_VERSION_CODE < */
3293 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3294 const char *kstop_prefix = "kstop";
3295 #endif /* LINUX_VERSION_CODE */
3296 const char *num;
3297 if (!strstarts(t->comm, kstop_prefix))
3298 return false;
3299 num = t->comm + strlen(kstop_prefix);
3300 return num[strspn(num, "0123456789")] == '\0';
3301 #else /* LINUX_VERSION_CODE < */
3302 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3303 return strcmp(t->comm, "kstopmachine") == 0;
3304 #endif /* LINUX_VERSION_CODE */
3307 static void cleanup_conflicts(struct update *update)
3309 struct conflict *conf;
3310 list_for_each_entry(conf, &update->conflicts, list) {
3311 clear_list(&conf->stack, struct conflict_addr, list);
3312 kfree(conf->process_name);
3314 clear_list(&update->conflicts, struct conflict, list);
3317 static void print_conflicts(struct update *update)
3319 const struct conflict *conf;
3320 const struct conflict_addr *ca;
3321 list_for_each_entry(conf, &update->conflicts, list) {
3322 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3323 conf->process_name);
3324 list_for_each_entry(ca, &conf->stack, list) {
3325 _ksdebug(update, " %lx", ca->addr);
3326 if (ca->has_conflict)
3327 _ksdebug(update, " [<-CONFLICT]");
3329 _ksdebug(update, "\n");
3333 static void insert_trampoline(struct ksplice_patch *p)
3335 mm_segment_t old_fs = get_fs();
3336 set_fs(KERNEL_DS);
3337 memcpy(p->saved, p->vaddr, p->size);
3338 memcpy(p->vaddr, p->contents, p->size);
3339 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3340 set_fs(old_fs);
3343 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3344 const struct ksplice_patch *p)
3346 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3347 ksdebug(change, "Aborted. Trampoline at %lx has been "
3348 "overwritten.\n", p->oldaddr);
3349 return CODE_BUSY;
3351 return OK;
3354 static void remove_trampoline(const struct ksplice_patch *p)
3356 mm_segment_t old_fs = get_fs();
3357 set_fs(KERNEL_DS);
3358 memcpy(p->vaddr, p->saved, p->size);
3359 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3360 set_fs(old_fs);
3363 /* Returns NO_MATCH if there's already a labelval with a different value */
3364 static abort_t create_labelval(struct ksplice_mod_change *change,
3365 struct ksplice_symbol *ksym,
3366 unsigned long val, int status)
3368 val = follow_trampolines(change, val);
3369 if (ksym->candidate_vals == NULL)
3370 return ksym->value == val ? OK : NO_MATCH;
3372 ksym->value = val;
3373 if (status == TEMP) {
3374 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3375 if (lv == NULL)
3376 return OUT_OF_MEMORY;
3377 lv->symbol = ksym;
3378 lv->saved_vals = ksym->candidate_vals;
3379 list_add(&lv->list, &change->temp_labelvals);
3381 ksym->candidate_vals = NULL;
3382 return OK;
3386 * Creates a new safety_record for a old_code section based on its
3387 * ksplice_section and run-pre matching information.
3389 static abort_t create_safety_record(struct ksplice_mod_change *change,
3390 const struct ksplice_section *sect,
3391 struct list_head *record_list,
3392 unsigned long run_addr,
3393 unsigned long run_size)
3395 struct safety_record *rec;
3396 struct ksplice_patch *p;
3398 if (record_list == NULL)
3399 return OK;
3401 for (p = change->patches; p < change->patches_end; p++) {
3402 const struct ksplice_reloc *r = patch_reloc(change, p);
3403 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3404 break;
3406 if (p >= change->patches_end)
3407 return OK;
3409 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3410 if (rec == NULL)
3411 return OUT_OF_MEMORY;
3413 * The old_code might be unloaded when checking reversing
3414 * patches, so we need to kstrdup the label here.
3416 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3417 if (rec->label == NULL) {
3418 kfree(rec);
3419 return OUT_OF_MEMORY;
3421 rec->addr = run_addr;
3422 rec->size = run_size;
3424 list_add(&rec->list, record_list);
3425 return OK;
3428 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3429 struct list_head *vals, unsigned long val)
3431 struct candidate_val *tmp, *new;
3434 * Careful: follow trampolines before comparing values so that we do
3435 * not mistake the obsolete function for another copy of the function.
3437 val = follow_trampolines(change, val);
3439 list_for_each_entry(tmp, vals, list) {
3440 if (tmp->val == val)
3441 return OK;
3443 new = kmalloc(sizeof(*new), GFP_KERNEL);
3444 if (new == NULL)
3445 return OUT_OF_MEMORY;
3446 new->val = val;
3447 list_add(&new->list, vals);
3448 return OK;
3451 static void release_vals(struct list_head *vals)
3453 clear_list(vals, struct candidate_val, list);
3457 * The temp_labelvals list is used to cache those temporary labelvals
3458 * that have been created to cross-check the symbol values obtained
3459 * from different relocations within a single section being matched.
3461 * If status is VAL, commit the temp_labelvals as final values.
3463 * If status is NOVAL, restore the list of possible values to the
3464 * ksplice_symbol, so that it no longer has a known value.
3466 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3468 struct labelval *lv, *n;
3469 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3470 if (status == NOVAL) {
3471 lv->symbol->candidate_vals = lv->saved_vals;
3472 } else {
3473 release_vals(lv->saved_vals);
3474 kfree(lv->saved_vals);
3476 list_del(&lv->list);
3477 kfree(lv);
3481 /* Is there a Ksplice canary with given howto at blank_addr? */
3482 static int contains_canary(struct ksplice_mod_change *change,
3483 unsigned long blank_addr,
3484 const struct ksplice_reloc_howto *howto)
3486 switch (howto->size) {
3487 case 1:
3488 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3489 (KSPLICE_CANARY & howto->dst_mask);
3490 case 2:
3491 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3492 (KSPLICE_CANARY & howto->dst_mask);
3493 case 4:
3494 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3495 (KSPLICE_CANARY & howto->dst_mask);
3496 #if BITS_PER_LONG >= 64
3497 case 8:
3498 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3499 (KSPLICE_CANARY & howto->dst_mask);
3500 #endif /* BITS_PER_LONG */
3501 default:
3502 ksdebug(change, "Aborted. Invalid relocation size.\n");
3503 return -1;
3507 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3508 EXTRACT_SYMBOL(__kernel_text_address);
3509 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3512 * Compute the address of the code you would actually run if you were
3513 * to call the function at addr (i.e., follow the sequence of jumps
3514 * starting at addr)
3516 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3517 unsigned long addr)
3519 unsigned long new_addr;
3520 struct module *m;
3522 while (1) {
3523 #ifdef KSPLICE_STANDALONE
3524 if (!bootstrapped)
3525 return addr;
3526 #endif /* KSPLICE_STANDALONE */
3527 if (!__kernel_text_address(addr) ||
3528 trampoline_target(change, addr, &new_addr) != OK)
3529 return addr;
3530 m = __module_text_address(new_addr);
3531 if (m == NULL || m == change->target ||
3532 !strstarts(m->name, "ksplice"))
3533 return addr;
3534 addr = new_addr;
3538 /* Does module a patch module b? */
3539 static bool patches_module(const struct module *a, const struct module *b)
3541 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3542 const char *name;
3543 const char *modname = b == NULL ? "vmlinux" : b->name;
3544 if (a == b)
3545 return true;
3546 if (a == NULL || !strstarts(a->name, "ksplice_"))
3547 return false;
3548 name = a->name + strlen("ksplice_");
3549 name += strcspn(name, "_");
3550 if (name[0] != '_')
3551 return false;
3552 name++;
3553 return strstarts(name, modname) &&
3554 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3555 strcmp(name + strlen(modname), "_new") == 0;
3556 #else /* LINUX_VERSION_CODE < */
3557 /* 0e8a2de644a93132594f66222a9d48405674eacd was after 2.6.9 */
3558 (strcmp(name + strlen(modname), "_n") == 0
3559 || strcmp(name + strlen(modname), "_new") == 0);
3560 #endif /* LINUX_VERSION_CODE */
3561 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3562 struct ksplice_module_list_entry *entry;
3563 if (a == b)
3564 return true;
3565 list_for_each_entry(entry, &ksplice_modules, list) {
3566 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3567 strcmp(entry->new_code_mod_name, a->name) == 0)
3568 return true;
3570 return false;
3571 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3574 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3575 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
3576 static bool strstarts(const char *str, const char *prefix)
3578 return strncmp(str, prefix, strlen(prefix)) == 0;
3580 #endif /* LINUX_VERSION_CODE */
3582 static bool singular(struct list_head *list)
3584 return !list_empty(list) && list->next->next == list;
3587 static void *bsearch(const void *key, const void *base, size_t n,
3588 size_t size, int (*cmp)(const void *key, const void *elt))
3590 int start = 0, end = n - 1, mid, result;
3591 if (n == 0)
3592 return NULL;
3593 while (start <= end) {
3594 mid = (start + end) / 2;
3595 result = cmp(key, base + mid * size);
3596 if (result < 0)
3597 end = mid - 1;
3598 else if (result > 0)
3599 start = mid + 1;
3600 else
3601 return (void *)base + mid * size;
3603 return NULL;
3606 static int compare_relocs(const void *a, const void *b)
3608 const struct ksplice_reloc *ra = a, *rb = b;
3609 if (ra->blank_addr > rb->blank_addr)
3610 return 1;
3611 else if (ra->blank_addr < rb->blank_addr)
3612 return -1;
3613 else
3614 return ra->howto->size - rb->howto->size;
3617 #ifdef KSPLICE_STANDALONE
3618 static int compare_system_map(const void *a, const void *b)
3620 const struct ksplice_system_map *sa = a, *sb = b;
3621 return strcmp(sa->label, sb->label);
3623 #endif /* KSPLICE_STANDALONE */
3625 #ifdef CONFIG_DEBUG_FS
3626 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3627 /* Old kernels don't have debugfs_create_blob */
3628 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3629 size_t count, loff_t *ppos)
3631 struct debugfs_blob_wrapper *blob = file->private_data;
3632 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3633 blob->size);
3636 static int blob_open(struct inode *inode, struct file *file)
3638 if (inode->i_private)
3639 file->private_data = inode->i_private;
3640 return 0;
3643 static struct file_operations fops_blob = {
3644 .read = read_file_blob,
3645 .open = blob_open,
3648 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3649 struct dentry *parent,
3650 struct debugfs_blob_wrapper *blob)
3652 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3654 #endif /* LINUX_VERSION_CODE */
3656 static abort_t init_debug_buf(struct update *update)
3658 update->debug_blob.size = 0;
3659 update->debug_blob.data = NULL;
3660 update->debugfs_dentry =
3661 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3662 &update->debug_blob);
3663 if (update->debugfs_dentry == NULL)
3664 return OUT_OF_MEMORY;
3665 return OK;
3668 static void clear_debug_buf(struct update *update)
3670 if (update->debugfs_dentry == NULL)
3671 return;
3672 debugfs_remove(update->debugfs_dentry);
3673 update->debugfs_dentry = NULL;
3674 update->debug_blob.size = 0;
3675 vfree(update->debug_blob.data);
3676 update->debug_blob.data = NULL;
3679 static int _ksdebug(struct update *update, const char *fmt, ...)
3681 va_list args;
3682 unsigned long size, old_size, new_size;
3684 if (update->debug == 0)
3685 return 0;
3687 /* size includes the trailing '\0' */
3688 va_start(args, fmt);
3689 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3690 va_end(args);
3691 old_size = update->debug_blob.size == 0 ? 0 :
3692 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3693 new_size = update->debug_blob.size + size == 0 ? 0 :
3694 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3695 if (new_size > old_size) {
3696 char *buf = vmalloc(new_size);
3697 if (buf == NULL)
3698 return -ENOMEM;
3699 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3700 vfree(update->debug_blob.data);
3701 update->debug_blob.data = buf;
3703 va_start(args, fmt);
3704 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3705 update->debug_blob.size,
3706 size, fmt, args);
3707 va_end(args);
3708 return 0;
3710 #else /* CONFIG_DEBUG_FS */
3711 static abort_t init_debug_buf(struct update *update)
3713 return OK;
3716 static void clear_debug_buf(struct update *update)
3718 return;
3721 static int _ksdebug(struct update *update, const char *fmt, ...)
3723 va_list args;
3725 if (update->debug == 0)
3726 return 0;
3728 if (!update->debug_continue_line)
3729 printk(KERN_DEBUG "ksplice: ");
3731 va_start(args, fmt);
3732 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3733 vprintk(fmt, args);
3734 #else /* LINUX_VERSION_CODE < */
3735 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3737 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3738 printk("%s", buf);
3739 kfree(buf);
3741 #endif /* LINUX_VERSION_CODE */
3742 va_end(args);
3744 update->debug_continue_line =
3745 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3746 return 0;
3748 #endif /* CONFIG_DEBUG_FS */
3750 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
3751 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
3752 extern unsigned long kallsyms_addresses[];
3753 EXTRACT_SYMBOL(kallsyms_addresses);
3754 extern unsigned long kallsyms_num_syms;
3755 EXTRACT_SYMBOL(kallsyms_num_syms);
3756 extern u8 kallsyms_names[];
3757 EXTRACT_SYMBOL(kallsyms_names);
3759 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3760 struct module *, unsigned long),
3761 void *data)
3763 char namebuf[KSYM_NAME_LEN];
3764 unsigned long i;
3765 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3766 unsigned int off;
3767 #endif /* LINUX_VERSION_CODE */
3768 int ret;
3770 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3771 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3772 off = kallsyms_expand_symbol(off, namebuf);
3773 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3774 if (ret != 0)
3775 return ret;
3777 #else /* LINUX_VERSION_CODE < */
3778 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3779 char *knames;
3781 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3782 unsigned prefix = *knames++;
3784 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3786 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3787 if (ret != OK)
3788 return ret;
3790 knames += strlen(knames) + 1;
3792 #endif /* LINUX_VERSION_CODE */
3793 return module_kallsyms_on_each_symbol(fn, data);
3796 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3797 extern u8 kallsyms_token_table[];
3798 EXTRACT_SYMBOL(kallsyms_token_table);
3799 extern u16 kallsyms_token_index[];
3800 EXTRACT_SYMBOL(kallsyms_token_index);
3802 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3804 long len, skipped_first = 0;
3805 const u8 *tptr, *data;
3807 data = &kallsyms_names[off];
3808 len = *data;
3809 data++;
3811 off += len + 1;
3813 while (len) {
3814 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3815 data++;
3816 len--;
3818 while (*tptr) {
3819 if (skipped_first) {
3820 *result = *tptr;
3821 result++;
3822 } else
3823 skipped_first = 1;
3824 tptr++;
3828 *result = '\0';
3830 return off;
3832 #else /* LINUX_VERSION_CODE < */
3833 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3834 #endif /* LINUX_VERSION_CODE */
3836 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3837 struct module *,
3838 unsigned long),
3839 void *data)
3841 struct module *mod;
3842 unsigned int i;
3843 int ret;
3845 list_for_each_entry(mod, &modules, list) {
3846 for (i = 0; i < mod->num_symtab; i++) {
3847 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3848 mod, mod->symtab[i].st_value);
3849 if (ret != 0)
3850 return ret;
3853 return 0;
3855 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
3857 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3858 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
3859 static struct module *find_module(const char *name)
3861 struct module *mod;
3863 list_for_each_entry(mod, &modules, list) {
3864 if (strcmp(mod->name, name) == 0)
3865 return mod;
3867 return NULL;
3870 #ifdef CONFIG_MODULE_UNLOAD
3871 struct module_use {
3872 struct list_head list;
3873 struct module *module_which_uses;
3876 /* I'm not yet certain whether we need the strong form of this. */
3877 static inline int strong_try_module_get(struct module *mod)
3879 if (mod && mod->state != MODULE_STATE_LIVE)
3880 return -EBUSY;
3881 if (try_module_get(mod))
3882 return 0;
3883 return -ENOENT;
3886 /* Does a already use b? */
3887 static int already_uses(struct module *a, struct module *b)
3889 struct module_use *use;
3890 list_for_each_entry(use, &b->modules_which_use_me, list) {
3891 if (use->module_which_uses == a)
3892 return 1;
3894 return 0;
3897 /* Make it so module a uses b. Must be holding module_mutex */
3898 static int use_module(struct module *a, struct module *b)
3900 struct module_use *use;
3901 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3902 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3903 int no_warn;
3904 #endif /* LINUX_VERSION_CODE */
3905 if (b == NULL || already_uses(a, b))
3906 return 1;
3908 if (strong_try_module_get(b) < 0)
3909 return 0;
3911 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3912 if (!use) {
3913 module_put(b);
3914 return 0;
3916 use->module_which_uses = a;
3917 list_add(&use->list, &b->modules_which_use_me);
3918 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3919 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3920 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3921 #endif /* LINUX_VERSION_CODE */
3922 return 1;
3924 #else /* CONFIG_MODULE_UNLOAD */
3925 static int use_module(struct module *a, struct module *b)
3927 return 1;
3929 #endif /* CONFIG_MODULE_UNLOAD */
3931 #ifndef CONFIG_MODVERSIONS
3932 #define symversion(base, idx) NULL
3933 #else
3934 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3935 #endif
3937 static bool each_symbol_in_section(const struct symsearch *arr,
3938 unsigned int arrsize,
3939 struct module *owner,
3940 bool (*fn)(const struct symsearch *syms,
3941 struct module *owner,
3942 unsigned int symnum, void *data),
3943 void *data)
3945 unsigned int i, j;
3947 for (j = 0; j < arrsize; j++) {
3948 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3949 if (fn(&arr[j], owner, i, data))
3950 return true;
3953 return false;
3956 /* Returns true as soon as fn returns true, otherwise false. */
3957 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3958 struct module *owner,
3959 unsigned int symnum, void *data),
3960 void *data)
3962 struct module *mod;
3963 const struct symsearch arr[] = {
3964 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3965 NOT_GPL_ONLY, false },
3966 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3967 __start___kcrctab_gpl,
3968 GPL_ONLY, false },
3969 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3970 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3971 __start___kcrctab_gpl_future,
3972 WILL_BE_GPL_ONLY, false },
3973 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3974 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3975 { __start___ksymtab_unused, __stop___ksymtab_unused,
3976 __start___kcrctab_unused,
3977 NOT_GPL_ONLY, true },
3978 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3979 __start___kcrctab_unused_gpl,
3980 GPL_ONLY, true },
3981 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3984 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3985 return 1;
3987 list_for_each_entry(mod, &modules, list) {
3988 struct symsearch module_arr[] = {
3989 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3990 NOT_GPL_ONLY, false },
3991 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3992 mod->gpl_crcs,
3993 GPL_ONLY, false },
3994 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3995 { mod->gpl_future_syms,
3996 mod->gpl_future_syms + mod->num_gpl_future_syms,
3997 mod->gpl_future_crcs,
3998 WILL_BE_GPL_ONLY, false },
3999 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
4000 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
4001 { mod->unused_syms,
4002 mod->unused_syms + mod->num_unused_syms,
4003 mod->unused_crcs,
4004 NOT_GPL_ONLY, true },
4005 { mod->unused_gpl_syms,
4006 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
4007 mod->unused_gpl_crcs,
4008 GPL_ONLY, true },
4009 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
4012 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
4013 mod, fn, data))
4014 return true;
4016 return false;
4019 struct find_symbol_arg {
4020 /* Input */
4021 const char *name;
4022 bool gplok;
4023 bool warn;
4025 /* Output */
4026 struct module *owner;
4027 const unsigned long *crc;
4028 const struct kernel_symbol *sym;
4031 static bool find_symbol_in_section(const struct symsearch *syms,
4032 struct module *owner,
4033 unsigned int symnum, void *data)
4035 struct find_symbol_arg *fsa = data;
4037 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
4038 return false;
4040 if (!fsa->gplok) {
4041 if (syms->licence == GPL_ONLY)
4042 return false;
4043 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
4044 printk(KERN_WARNING "Symbol %s is being used "
4045 "by a non-GPL module, which will not "
4046 "be allowed in the future\n", fsa->name);
4047 printk(KERN_WARNING "Please see the file "
4048 "Documentation/feature-removal-schedule.txt "
4049 "in the kernel source tree for more details.\n");
4053 #ifdef CONFIG_UNUSED_SYMBOLS
4054 if (syms->unused && fsa->warn) {
4055 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
4056 "however this module is using it.\n", fsa->name);
4057 printk(KERN_WARNING
4058 "This symbol will go away in the future.\n");
4059 printk(KERN_WARNING
4060 "Please evalute if this is the right api to use and if "
4061 "it really is, submit a report the linux kernel "
4062 "mailinglist together with submitting your code for "
4063 "inclusion.\n");
4065 #endif
4067 fsa->owner = owner;
4068 fsa->crc = symversion(syms->crcs, symnum);
4069 fsa->sym = &syms->start[symnum];
4070 return true;
4073 /* Find a symbol and return it, along with, (optional) crc and
4074 * (optional) module which owns it */
4075 static const struct kernel_symbol *find_symbol(const char *name,
4076 struct module **owner,
4077 const unsigned long **crc,
4078 bool gplok, bool warn)
4080 struct find_symbol_arg fsa;
4082 fsa.name = name;
4083 fsa.gplok = gplok;
4084 fsa.warn = warn;
4086 if (each_symbol(find_symbol_in_section, &fsa)) {
4087 if (owner)
4088 *owner = fsa.owner;
4089 if (crc)
4090 *crc = fsa.crc;
4091 return fsa.sym;
4094 return NULL;
4097 static inline int within_module_core(unsigned long addr, struct module *mod)
4099 return (unsigned long)mod->module_core <= addr &&
4100 addr < (unsigned long)mod->module_core + mod->core_size;
4103 static inline int within_module_init(unsigned long addr, struct module *mod)
4105 return (unsigned long)mod->module_init <= addr &&
4106 addr < (unsigned long)mod->module_init + mod->init_size;
4109 static struct module *__module_address(unsigned long addr)
4111 struct module *mod;
4113 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
4114 list_for_each_entry_rcu(mod, &modules, list)
4115 #else
4116 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
4117 list_for_each_entry(mod, &modules, list)
4118 #endif
4119 if (within_module_core(addr, mod) ||
4120 within_module_init(addr, mod))
4121 return mod;
4122 return NULL;
4124 #endif /* LINUX_VERSION_CODE */
4126 struct update_attribute {
4127 struct attribute attr;
4128 ssize_t (*show)(struct update *update, char *buf);
4129 ssize_t (*store)(struct update *update, const char *buf, size_t len);
4132 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
4133 char *buf)
4135 struct update_attribute *attribute =
4136 container_of(attr, struct update_attribute, attr);
4137 struct update *update = container_of(kobj, struct update, kobj);
4138 if (attribute->show == NULL)
4139 return -EIO;
4140 return attribute->show(update, buf);
4143 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
4144 const char *buf, size_t len)
4146 struct update_attribute *attribute =
4147 container_of(attr, struct update_attribute, attr);
4148 struct update *update = container_of(kobj, struct update, kobj);
4149 if (attribute->store == NULL)
4150 return -EIO;
4151 return attribute->store(update, buf, len);
4154 static struct sysfs_ops update_sysfs_ops = {
4155 .show = update_attr_show,
4156 .store = update_attr_store,
4159 static void update_release(struct kobject *kobj)
4161 struct update *update;
4162 update = container_of(kobj, struct update, kobj);
4163 cleanup_ksplice_update(update);
4166 static ssize_t stage_show(struct update *update, char *buf)
4168 switch (update->stage) {
4169 case STAGE_PREPARING:
4170 return snprintf(buf, PAGE_SIZE, "preparing\n");
4171 case STAGE_APPLIED:
4172 return snprintf(buf, PAGE_SIZE, "applied\n");
4173 case STAGE_REVERSED:
4174 return snprintf(buf, PAGE_SIZE, "reversed\n");
4176 return 0;
4179 static ssize_t abort_cause_show(struct update *update, char *buf)
4181 switch (update->abort_cause) {
4182 case OK:
4183 return snprintf(buf, PAGE_SIZE, "ok\n");
4184 case NO_MATCH:
4185 return snprintf(buf, PAGE_SIZE, "no_match\n");
4186 #ifdef KSPLICE_STANDALONE
4187 case BAD_SYSTEM_MAP:
4188 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4189 #endif /* KSPLICE_STANDALONE */
4190 case CODE_BUSY:
4191 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4192 case MODULE_BUSY:
4193 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4194 case OUT_OF_MEMORY:
4195 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4196 case FAILED_TO_FIND:
4197 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4198 case ALREADY_REVERSED:
4199 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4200 case MISSING_EXPORT:
4201 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4202 case UNEXPECTED_RUNNING_TASK:
4203 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4204 case TARGET_NOT_LOADED:
4205 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4206 case CALL_FAILED:
4207 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4208 case COLD_UPDATE_LOADED:
4209 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4210 case UNEXPECTED:
4211 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4212 default:
4213 return snprintf(buf, PAGE_SIZE, "unknown\n");
4215 return 0;
4218 static ssize_t conflict_show(struct update *update, char *buf)
4220 const struct conflict *conf;
4221 const struct conflict_addr *ca;
4222 int lastused = 0;
4223 mutex_lock(&module_mutex);
4224 list_for_each_entry(conf, &update->conflicts, list) {
4225 int used = lastused;
4226 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4227 conf->process_name, conf->pid);
4228 if (used >= PAGE_SIZE)
4229 goto out;
4230 list_for_each_entry(ca, &conf->stack, list) {
4231 if (!ca->has_conflict)
4232 continue;
4233 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4234 ca->label);
4235 if (used >= PAGE_SIZE)
4236 goto out;
4238 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4239 if (used >= PAGE_SIZE)
4240 goto out;
4241 lastused = used;
4243 out:
4244 mutex_unlock(&module_mutex);
4245 return lastused;
4248 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4249 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4251 struct update *update = updateptr;
4252 mutex_lock(&module_mutex);
4253 maybe_cleanup_ksplice_update(update);
4254 mutex_unlock(&module_mutex);
4255 return 0;
4258 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4260 enum stage old_stage;
4261 mutex_lock(&module_mutex);
4262 old_stage = update->stage;
4263 if ((strncmp(buf, "applied", len) == 0 ||
4264 strncmp(buf, "applied\n", len) == 0) &&
4265 update->stage == STAGE_PREPARING)
4266 update->abort_cause = apply_update(update);
4267 else if ((strncmp(buf, "reversed", len) == 0 ||
4268 strncmp(buf, "reversed\n", len) == 0) &&
4269 update->stage == STAGE_APPLIED)
4270 update->abort_cause = reverse_update(update);
4271 else if ((strncmp(buf, "cleanup", len) == 0 ||
4272 strncmp(buf, "cleanup\n", len) == 0) &&
4273 update->stage == STAGE_REVERSED)
4274 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4275 "ksplice_cleanup_%s", update->kid);
4277 mutex_unlock(&module_mutex);
4278 return len;
4281 static ssize_t debug_show(struct update *update, char *buf)
4283 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4286 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4288 unsigned long l;
4289 int ret = strict_strtoul(buf, 10, &l);
4290 if (ret != 0)
4291 return ret;
4292 update->debug = l;
4293 return len;
4296 static ssize_t partial_show(struct update *update, char *buf)
4298 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4301 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4303 unsigned long l;
4304 int ret = strict_strtoul(buf, 10, &l);
4305 if (ret != 0)
4306 return ret;
4307 update->partial = l;
4308 return len;
4311 static struct update_attribute stage_attribute =
4312 __ATTR(stage, 0600, stage_show, stage_store);
4313 static struct update_attribute abort_cause_attribute =
4314 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4315 static struct update_attribute debug_attribute =
4316 __ATTR(debug, 0600, debug_show, debug_store);
4317 static struct update_attribute partial_attribute =
4318 __ATTR(partial, 0600, partial_show, partial_store);
4319 static struct update_attribute conflict_attribute =
4320 __ATTR(conflicts, 0400, conflict_show, NULL);
4322 static struct attribute *update_attrs[] = {
4323 &stage_attribute.attr,
4324 &abort_cause_attribute.attr,
4325 &debug_attribute.attr,
4326 &partial_attribute.attr,
4327 &conflict_attribute.attr,
4328 NULL
4331 static struct kobj_type update_ktype = {
4332 .sysfs_ops = &update_sysfs_ops,
4333 .release = update_release,
4334 .default_attrs = update_attrs,
4337 #ifdef KSPLICE_STANDALONE
4338 static int debug;
4339 module_param(debug, int, 0600);
4340 MODULE_PARM_DESC(debug, "Debug level");
4342 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4344 static struct ksplice_mod_change bootstrap_mod_change = {
4345 .name = "ksplice_" __stringify(KSPLICE_KID),
4346 .kid = "init_" __stringify(KSPLICE_KID),
4347 .target_name = NULL,
4348 .target = NULL,
4349 .map_printk = MAP_PRINTK,
4350 .new_code_mod = THIS_MODULE,
4351 .new_code.system_map = ksplice_system_map,
4352 .new_code.system_map_end = ksplice_system_map_end,
4354 #endif /* KSPLICE_STANDALONE */
4356 static int init_ksplice(void)
4358 #ifdef KSPLICE_STANDALONE
4359 struct ksplice_mod_change *change = &bootstrap_mod_change;
4360 change->update = init_ksplice_update(change->kid);
4361 sort(change->new_code.system_map,
4362 change->new_code.system_map_end - change->new_code.system_map,
4363 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4364 if (change->update == NULL)
4365 return -ENOMEM;
4366 add_to_update(change, change->update);
4367 change->update->debug = debug;
4368 change->update->abort_cause =
4369 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4370 if (change->update->abort_cause == OK)
4371 bootstrapped = true;
4372 cleanup_ksplice_update(bootstrap_mod_change.update);
4373 #else /* !KSPLICE_STANDALONE */
4374 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4375 if (ksplice_kobj == NULL)
4376 return -ENOMEM;
4377 #endif /* KSPLICE_STANDALONE */
4378 return 0;
4381 static void cleanup_ksplice(void)
4383 #ifndef KSPLICE_STANDALONE
4384 kobject_put(ksplice_kobj);
4385 #endif /* KSPLICE_STANDALONE */
4388 module_init(init_ksplice);
4389 module_exit(cleanup_ksplice);
4391 MODULE_AUTHOR("Ksplice, Inc.");
4392 MODULE_DESCRIPTION("Ksplice rebootless update system");
4393 #ifdef KSPLICE_VERSION
4394 MODULE_VERSION(KSPLICE_VERSION);
4395 #endif
4396 MODULE_LICENSE("GPL v2");