strstarts was merged in 2.6.30.
[ksplice.git] / kmodsrc / ksplice.c
blob870032dbfe765c71c4dd9688fcc4c8be9c05d760
1 /* Copyright (C) 2007-2009 Ksplice, Inc.
2 * Authors: Jeff Arnold, Anders Kaseorg, Tim Abbott
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2.
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
12 * You should have received a copy of the GNU General Public License
13 * along with this program; if not, write to the Free Software
14 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
15 * 02110-1301, USA.
18 #include <linux/module.h>
19 #include <linux/version.h>
20 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
21 #include <linux/bug.h>
22 #else /* LINUX_VERSION_CODE */
23 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
24 #endif /* LINUX_VERSION_CODE */
25 #include <linux/ctype.h>
26 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
27 #include <linux/debugfs.h>
28 #else /* CONFIG_DEBUG_FS */
29 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
30 #endif /* CONFIG_DEBUG_FS */
31 #include <linux/errno.h>
32 #include <linux/kallsyms.h>
33 #include <linux/kobject.h>
34 #include <linux/kthread.h>
35 #include <linux/pagemap.h>
36 #include <linux/sched.h>
37 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
38 #include <linux/sort.h>
39 #else /* LINUX_VERSION_CODE < */
40 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
41 #endif /* LINUX_VERSION_CODE */
42 #include <linux/stop_machine.h>
43 #include <linux/sysfs.h>
44 #include <linux/time.h>
45 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
46 #include <linux/uaccess.h>
47 #else /* LINUX_VERSION_CODE < */
48 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
49 #include <asm/uaccess.h>
50 #endif /* LINUX_VERSION_CODE */
51 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
52 #include <linux/utsrelease.h>
53 #else /* LINUX_VERSION_CODE < */
54 /* 63104eec234bdecb55fd9c15467ae00d0a3f42ac was after 2.6.17 */
55 #endif /* LINUX_VERSION_CODE */
56 #include <linux/vmalloc.h>
57 #ifdef KSPLICE_STANDALONE
58 #include "ksplice.h"
59 #else /* !KSPLICE_STANDALONE */
60 #include <linux/ksplice.h>
61 #endif /* KSPLICE_STANDALONE */
62 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
63 #include <asm/alternative.h>
64 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
66 #if defined(KSPLICE_STANDALONE) && \
67 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
68 #define KSPLICE_NO_KERNEL_SUPPORT 1
69 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
71 enum stage {
72 STAGE_PREPARING, /* the update is not yet applied */
73 STAGE_APPLIED, /* the update is applied */
74 STAGE_REVERSED, /* the update has been applied and reversed */
77 /* parameter to modify run-pre matching */
78 enum run_pre_mode {
79 RUN_PRE_INITIAL, /* dry run (only change temp_labelvals) */
80 RUN_PRE_DEBUG, /* dry run with byte-by-byte debugging */
81 RUN_PRE_FINAL, /* finalizes the matching */
82 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
83 RUN_PRE_SILENT,
84 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
87 enum { NOVAL, TEMP, VAL };
89 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
90 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
91 #define __bitwise__
92 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
93 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
94 #define __bitwise__ __bitwise
95 #endif
97 typedef int __bitwise__ abort_t;
99 #define OK ((__force abort_t) 0)
100 #define NO_MATCH ((__force abort_t) 1)
101 #define CODE_BUSY ((__force abort_t) 2)
102 #define MODULE_BUSY ((__force abort_t) 3)
103 #define OUT_OF_MEMORY ((__force abort_t) 4)
104 #define FAILED_TO_FIND ((__force abort_t) 5)
105 #define ALREADY_REVERSED ((__force abort_t) 6)
106 #define MISSING_EXPORT ((__force abort_t) 7)
107 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
108 #define UNEXPECTED ((__force abort_t) 9)
109 #define TARGET_NOT_LOADED ((__force abort_t) 10)
110 #define CALL_FAILED ((__force abort_t) 11)
111 #define COLD_UPDATE_LOADED ((__force abort_t) 12)
112 #ifdef KSPLICE_STANDALONE
113 #define BAD_SYSTEM_MAP ((__force abort_t) 13)
114 #endif /* KSPLICE_STANDALONE */
116 struct update {
117 const char *kid;
118 const char *name;
119 struct kobject kobj;
120 enum stage stage;
121 abort_t abort_cause;
122 int debug;
123 #ifdef CONFIG_DEBUG_FS
124 struct debugfs_blob_wrapper debug_blob;
125 struct dentry *debugfs_dentry;
126 #else /* !CONFIG_DEBUG_FS */
127 bool debug_continue_line;
128 #endif /* CONFIG_DEBUG_FS */
129 bool partial; /* is it OK if some target mods aren't loaded */
130 struct list_head changes, /* changes for loaded target mods */
131 unused_changes; /* changes for non-loaded target mods */
132 struct list_head conflicts;
133 struct list_head list;
134 struct list_head ksplice_module_list;
137 /* a process conflicting with an update */
138 struct conflict {
139 const char *process_name;
140 pid_t pid;
141 struct list_head stack;
142 struct list_head list;
145 /* an address on the stack of a conflict */
146 struct conflict_addr {
147 unsigned long addr; /* the address on the stack */
148 bool has_conflict; /* does this address in particular conflict? */
149 const char *label; /* the label of the conflicting safety_record */
150 struct list_head list;
153 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
154 /* Old kernels don't have debugfs_create_blob */
155 struct debugfs_blob_wrapper {
156 void *data;
157 unsigned long size;
159 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
161 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
162 /* 930631edd4b1fe2781d9fe90edbe35d89dfc94cc was after 2.6.18 */
163 #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d))
164 #endif
166 struct labelval {
167 struct list_head list;
168 struct ksplice_symbol *symbol;
169 struct list_head *saved_vals;
172 /* region to be checked for conflicts in the stack check */
173 struct safety_record {
174 struct list_head list;
175 const char *label;
176 unsigned long addr; /* the address to be checked for conflicts
177 * (e.g. an obsolete function's starting addr)
179 unsigned long size; /* the size of the region to be checked */
182 /* possible value for a symbol */
183 struct candidate_val {
184 struct list_head list;
185 unsigned long val;
188 /* private struct used by init_symbol_array */
189 struct ksplice_lookup {
190 /* input */
191 struct ksplice_mod_change *change;
192 struct ksplice_symbol **arr;
193 size_t size;
194 /* output */
195 abort_t ret;
198 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
199 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
200 struct symsearch {
201 const struct kernel_symbol *start, *stop;
202 const unsigned long *crcs;
203 enum {
204 NOT_GPL_ONLY,
205 GPL_ONLY,
206 WILL_BE_GPL_ONLY,
207 } licence;
208 bool unused;
210 #endif /* LINUX_VERSION_CODE */
212 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
213 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
215 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
216 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
217 static bool virtual_address_mapped(unsigned long addr)
219 char retval;
220 return probe_kernel_address(addr, retval) != -EFAULT;
222 #else /* LINUX_VERSION_CODE < */
223 static bool virtual_address_mapped(unsigned long addr);
224 #endif /* LINUX_VERSION_CODE */
226 static long probe_kernel_read(void *dst, void *src, size_t size)
228 if (size == 0)
229 return 0;
230 if (!virtual_address_mapped((unsigned long)src) ||
231 !virtual_address_mapped((unsigned long)src + size - 1))
232 return -EFAULT;
234 memcpy(dst, src, size);
235 return 0;
237 #endif /* LINUX_VERSION_CODE */
239 static LIST_HEAD(updates);
240 #ifdef KSPLICE_STANDALONE
241 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
242 extern struct list_head ksplice_modules;
243 #else /* !CONFIG_KSPLICE */
244 LIST_HEAD(ksplice_modules);
245 #endif /* CONFIG_KSPLICE */
246 #else /* !KSPLICE_STANDALONE */
247 LIST_HEAD(ksplice_modules);
248 EXPORT_SYMBOL_GPL(ksplice_modules);
249 static struct kobject *ksplice_kobj;
250 #endif /* KSPLICE_STANDALONE */
252 static struct kobj_type update_ktype;
254 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
255 /* Old kernels do not have kcalloc
256 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
258 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
260 char *mem;
261 if (n != 0 && size > ULONG_MAX / n)
262 return NULL;
263 mem = kmalloc(n * size, flags);
264 if (mem)
265 memset(mem, 0, n * size);
266 return mem;
268 #endif /* LINUX_VERSION_CODE */
270 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
271 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
272 static void u32_swap(void *a, void *b, int size)
274 u32 t = *(u32 *)a;
275 *(u32 *)a = *(u32 *)b;
276 *(u32 *)b = t;
279 static void generic_swap(void *a, void *b, int size)
281 char t;
283 do {
284 t = *(char *)a;
285 *(char *)a++ = *(char *)b;
286 *(char *)b++ = t;
287 } while (--size > 0);
291 * sort - sort an array of elements
292 * @base: pointer to data to sort
293 * @num: number of elements
294 * @size: size of each element
295 * @cmp: pointer to comparison function
296 * @swap: pointer to swap function or NULL
298 * This function does a heapsort on the given array. You may provide a
299 * swap function optimized to your element type.
301 * Sorting time is O(n log n) both on average and worst-case. While
302 * qsort is about 20% faster on average, it suffers from exploitable
303 * O(n*n) worst-case behavior and extra memory requirements that make
304 * it less suitable for kernel use.
307 void sort(void *base, size_t num, size_t size,
308 int (*cmp)(const void *, const void *),
309 void (*swap)(void *, void *, int size))
311 /* pre-scale counters for performance */
312 int i = (num / 2 - 1) * size, n = num * size, c, r;
314 if (!swap)
315 swap = (size == 4 ? u32_swap : generic_swap);
317 /* heapify */
318 for (; i >= 0; i -= size) {
319 for (r = i; r * 2 + size < n; r = c) {
320 c = r * 2 + size;
321 if (c < n - size && cmp(base + c, base + c + size) < 0)
322 c += size;
323 if (cmp(base + r, base + c) >= 0)
324 break;
325 swap(base + r, base + c, size);
329 /* sort */
330 for (i = n - size; i > 0; i -= size) {
331 swap(base, base + i, size);
332 for (r = 0; r * 2 + size < i; r = c) {
333 c = r * 2 + size;
334 if (c < i - size && cmp(base + c, base + c + size) < 0)
335 c += size;
336 if (cmp(base + r, base + c) >= 0)
337 break;
338 swap(base + r, base + c, size);
342 #endif /* LINUX_VERSION_CODE < */
344 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
345 /* Old kernels do not have kstrdup
346 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was after 2.6.12
348 #define kstrdup ksplice_kstrdup
349 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
351 size_t len;
352 char *buf;
354 if (!s)
355 return NULL;
357 len = strlen(s) + 1;
358 buf = kmalloc(len, gfp);
359 if (buf)
360 memcpy(buf, s, len);
361 return buf;
363 #endif /* LINUX_VERSION_CODE */
365 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
366 /* Old kernels use semaphore instead of mutex
367 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
369 #define mutex semaphore
370 #define mutex_lock down
371 #define mutex_unlock up
372 #endif /* LINUX_VERSION_CODE */
374 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
375 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
376 static char * __attribute_used__
377 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
379 unsigned int len;
380 char *p, dummy[1];
381 va_list aq;
383 va_copy(aq, ap);
384 len = vsnprintf(dummy, 0, fmt, aq);
385 va_end(aq);
387 p = kmalloc(len + 1, gfp);
388 if (!p)
389 return NULL;
391 vsnprintf(p, len + 1, fmt, ap);
393 return p;
395 #endif
397 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
398 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
399 static char * __attribute__((format (printf, 2, 3)))
400 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
402 va_list ap;
403 char *p;
405 va_start(ap, fmt);
406 p = kvasprintf(gfp, fmt, ap);
407 va_end(ap);
409 return p;
411 #endif
413 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
414 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
415 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
417 char *tail;
418 unsigned long val;
419 size_t len;
421 *res = 0;
422 len = strlen(cp);
423 if (len == 0)
424 return -EINVAL;
426 val = simple_strtoul(cp, &tail, base);
427 if ((*tail == '\0') ||
428 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
429 *res = val;
430 return 0;
433 return -EINVAL;
435 #endif
437 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27)
438 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
439 /* Assume cpus == NULL. */
440 #define stop_machine(fn, data, cpus) stop_machine_run(fn, data, NR_CPUS);
441 #endif /* LINUX_VERSION_CODE */
443 #ifndef task_thread_info
444 #define task_thread_info(task) (task)->thread_info
445 #endif /* !task_thread_info */
447 #ifdef KSPLICE_STANDALONE
449 #ifdef do_each_thread_ve /* OpenVZ kernels define this */
450 #define do_each_thread do_each_thread_all
451 #define while_each_thread while_each_thread_all
452 #endif
454 static bool bootstrapped = false;
456 /* defined by ksplice-create */
457 extern const struct ksplice_reloc ksplice_init_relocs[],
458 ksplice_init_relocs_end[];
460 #endif /* KSPLICE_STANDALONE */
462 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
463 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
464 extern struct list_head modules;
465 extern struct mutex module_mutex;
466 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
467 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
468 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
469 #endif /* LINUX_VERSION_CODE */
470 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
471 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
472 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
473 #endif /* LINUX_VERSION_CODE */
474 extern const struct kernel_symbol __start___ksymtab[];
475 extern const struct kernel_symbol __stop___ksymtab[];
476 extern const unsigned long __start___kcrctab[];
477 extern const struct kernel_symbol __start___ksymtab_gpl[];
478 extern const struct kernel_symbol __stop___ksymtab_gpl[];
479 extern const unsigned long __start___kcrctab_gpl[];
480 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
481 extern const struct kernel_symbol __start___ksymtab_unused[];
482 extern const struct kernel_symbol __stop___ksymtab_unused[];
483 extern const unsigned long __start___kcrctab_unused[];
484 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
485 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
486 extern const unsigned long __start___kcrctab_unused_gpl[];
487 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
488 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
489 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
490 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
491 extern const unsigned long __start___kcrctab_gpl_future[];
492 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
493 #endif /* LINUX_VERSION_CODE */
495 static struct update *init_ksplice_update(const char *kid);
496 static void cleanup_ksplice_update(struct update *update);
497 static void maybe_cleanup_ksplice_update(struct update *update);
498 static void add_to_update(struct ksplice_mod_change *change,
499 struct update *update);
500 static int ksplice_sysfs_init(struct update *update);
502 /* Preparing the relocations and patches for application */
503 static abort_t apply_update(struct update *update);
504 static abort_t reverse_update(struct update *update);
505 static abort_t prepare_change(struct ksplice_mod_change *change);
506 static abort_t finalize_change(struct ksplice_mod_change *change);
507 static abort_t finalize_patches(struct ksplice_mod_change *change);
508 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
509 unsigned long addr);
510 static abort_t map_trampoline_pages(struct update *update);
511 static void unmap_trampoline_pages(struct update *update);
512 static void *map_writable(void *addr, size_t len);
513 static abort_t apply_relocs(struct ksplice_mod_change *change,
514 const struct ksplice_reloc *relocs,
515 const struct ksplice_reloc *relocs_end);
516 static abort_t apply_reloc(struct ksplice_mod_change *change,
517 const struct ksplice_reloc *r);
518 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
519 const struct ksplice_reloc *r);
520 static abort_t apply_howto_date(struct ksplice_mod_change *change,
521 const struct ksplice_reloc *r);
522 static abort_t read_reloc_value(struct ksplice_mod_change *change,
523 const struct ksplice_reloc *r,
524 unsigned long addr, unsigned long *valp);
525 static abort_t write_reloc_value(struct ksplice_mod_change *change,
526 const struct ksplice_reloc *r,
527 unsigned long addr, unsigned long sym_addr);
528 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
529 bool to_be_applied);
530 static void cleanup_module_list_entries(struct update *update);
531 static void __attribute__((noreturn)) ksplice_deleted(void);
533 /* run-pre matching */
534 static abort_t match_change_sections(struct ksplice_mod_change *change,
535 bool consider_data_sections);
536 static abort_t find_section(struct ksplice_mod_change *change,
537 struct ksplice_section *sect);
538 static abort_t try_addr(struct ksplice_mod_change *change,
539 struct ksplice_section *sect,
540 unsigned long run_addr,
541 struct list_head *safety_records,
542 enum run_pre_mode mode);
543 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
544 const struct ksplice_section *sect,
545 unsigned long run_addr,
546 struct list_head *safety_records,
547 enum run_pre_mode mode);
548 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
549 /* defined in arch/ARCH/kernel/ksplice-arch.c */
550 static abort_t arch_run_pre_cmp(struct ksplice_mod_change *change,
551 struct ksplice_section *sect,
552 unsigned long run_addr,
553 struct list_head *safety_records,
554 enum run_pre_mode mode);
555 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
556 static void print_bytes(struct ksplice_mod_change *change,
557 const unsigned char *run, int runc,
558 const unsigned char *pre, int prec);
559 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
560 static abort_t brute_search(struct ksplice_mod_change *change,
561 struct ksplice_section *sect,
562 const void *start, unsigned long len,
563 struct list_head *vals);
564 static abort_t brute_search_all(struct ksplice_mod_change *change,
565 struct ksplice_section *sect,
566 struct list_head *vals);
567 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
568 static const struct ksplice_reloc *
569 init_reloc_search(struct ksplice_mod_change *change,
570 const struct ksplice_section *sect);
571 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
572 const struct ksplice_reloc *end,
573 unsigned long address,
574 unsigned long size);
575 static abort_t lookup_reloc(struct ksplice_mod_change *change,
576 const struct ksplice_reloc **fingerp,
577 unsigned long addr,
578 const struct ksplice_reloc **relocp);
579 static abort_t handle_reloc(struct ksplice_mod_change *change,
580 const struct ksplice_section *sect,
581 const struct ksplice_reloc *r,
582 unsigned long run_addr, enum run_pre_mode mode);
583 static abort_t handle_howto_date(struct ksplice_mod_change *change,
584 const struct ksplice_section *sect,
585 const struct ksplice_reloc *r,
586 unsigned long run_addr,
587 enum run_pre_mode mode);
588 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
589 const struct ksplice_section *sect,
590 const struct ksplice_reloc *r,
591 unsigned long run_addr,
592 enum run_pre_mode mode);
593 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
594 #ifdef CONFIG_BUG
595 static abort_t handle_bug(struct ksplice_mod_change *change,
596 const struct ksplice_reloc *r,
597 unsigned long run_addr);
598 #endif /* CONFIG_BUG */
599 #else /* LINUX_VERSION_CODE < */
600 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
601 #endif /* LINUX_VERSION_CODE */
602 static abort_t handle_extable(struct ksplice_mod_change *change,
603 const struct ksplice_reloc *r,
604 unsigned long run_addr);
605 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
606 const struct ksplice_symbol *sym);
607 static int compare_section_labels(const void *va, const void *vb);
608 static int symbol_section_bsearch_compare(const void *a, const void *b);
609 static const struct ksplice_reloc *
610 patch_reloc(struct ksplice_mod_change *change,
611 const struct ksplice_patch *p);
613 /* Computing possible addresses for symbols */
614 static abort_t lookup_symbol(struct ksplice_mod_change *change,
615 const struct ksplice_symbol *ksym,
616 struct list_head *vals);
617 static void cleanup_symbol_arrays(struct ksplice_mod_change *change);
618 static abort_t init_symbol_arrays(struct ksplice_mod_change *change);
619 static abort_t init_symbol_array(struct ksplice_mod_change *change,
620 struct ksplice_symbol *start,
621 struct ksplice_symbol *end);
622 static abort_t uniquify_symbols(struct ksplice_mod_change *change);
623 static abort_t add_matching_values(struct ksplice_lookup *lookup,
624 const char *sym_name, unsigned long sym_val);
625 static bool add_export_values(const struct symsearch *syms,
626 struct module *owner,
627 unsigned int symnum, void *data);
628 static int symbolp_bsearch_compare(const void *key, const void *elt);
629 static int compare_symbolp_names(const void *a, const void *b);
630 static int compare_symbolp_labels(const void *a, const void *b);
631 #ifdef CONFIG_KALLSYMS
632 static int add_kallsyms_values(void *data, const char *name,
633 struct module *owner, unsigned long val);
634 #endif /* CONFIG_KALLSYMS */
635 #ifdef KSPLICE_STANDALONE
636 static abort_t
637 add_system_map_candidates(struct ksplice_mod_change *change,
638 const struct ksplice_system_map *start,
639 const struct ksplice_system_map *end,
640 const char *label, struct list_head *vals);
641 static int compare_system_map(const void *a, const void *b);
642 static int system_map_bsearch_compare(const void *key, const void *elt);
643 #endif /* KSPLICE_STANDALONE */
644 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
645 const char *name, struct list_head *vals);
647 /* Atomic update trampoline insertion and removal */
648 static abort_t patch_action(struct update *update, enum ksplice_action action);
649 static int __apply_patches(void *update);
650 static int __reverse_patches(void *update);
651 static abort_t check_each_task(struct update *update);
652 static abort_t check_task(struct update *update,
653 const struct task_struct *t, bool rerun);
654 static abort_t check_stack(struct update *update, struct conflict *conf,
655 const struct thread_info *tinfo,
656 const unsigned long *stack);
657 static abort_t check_address(struct update *update,
658 struct conflict *conf, unsigned long addr);
659 static abort_t check_record(struct conflict_addr *ca,
660 const struct safety_record *rec,
661 unsigned long addr);
662 static bool is_stop_machine(const struct task_struct *t);
663 static void cleanup_conflicts(struct update *update);
664 static void print_conflicts(struct update *update);
665 static void insert_trampoline(struct ksplice_patch *p);
666 static abort_t verify_trampoline(struct ksplice_mod_change *change,
667 const struct ksplice_patch *p);
668 static void remove_trampoline(const struct ksplice_patch *p);
670 static abort_t create_labelval(struct ksplice_mod_change *change,
671 struct ksplice_symbol *ksym,
672 unsigned long val, int status);
673 static abort_t create_safety_record(struct ksplice_mod_change *change,
674 const struct ksplice_section *sect,
675 struct list_head *record_list,
676 unsigned long run_addr,
677 unsigned long run_size);
678 static abort_t add_candidate_val(struct ksplice_mod_change *change,
679 struct list_head *vals, unsigned long val);
680 static void release_vals(struct list_head *vals);
681 static void set_temp_labelvals(struct ksplice_mod_change *change, int status);
683 static int contains_canary(struct ksplice_mod_change *change,
684 unsigned long blank_addr,
685 const struct ksplice_reloc_howto *howto);
686 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
687 unsigned long addr);
688 static bool patches_module(const struct module *a, const struct module *b);
689 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
690 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
691 static bool strstarts(const char *str, const char *prefix);
692 #endif /* LINUX_VERSION_CODE */
693 static bool singular(struct list_head *list);
694 static void *bsearch(const void *key, const void *base, size_t n,
695 size_t size, int (*cmp)(const void *key, const void *elt));
696 static int compare_relocs(const void *a, const void *b);
697 static int reloc_bsearch_compare(const void *key, const void *elt);
699 /* Debugging */
700 static abort_t init_debug_buf(struct update *update);
701 static void clear_debug_buf(struct update *update);
702 static int __attribute__((format(printf, 2, 3)))
703 _ksdebug(struct update *update, const char *fmt, ...);
704 #define ksdebug(change, fmt, ...) \
705 _ksdebug(change->update, fmt, ## __VA_ARGS__)
707 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
708 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
709 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
710 struct module *, unsigned long),
711 void *data);
712 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
713 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
714 #endif /* LINUX_VERSION_CODE */
715 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
716 struct module *,
717 unsigned long),
718 void *data);
719 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
721 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
722 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
723 static struct module *find_module(const char *name);
724 static int use_module(struct module *a, struct module *b);
725 static const struct kernel_symbol *find_symbol(const char *name,
726 struct module **owner,
727 const unsigned long **crc,
728 bool gplok, bool warn);
729 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
730 struct module *owner,
731 unsigned int symnum, void *data),
732 void *data);
733 static struct module *__module_address(unsigned long addr);
734 #endif /* LINUX_VERSION_CODE */
736 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
738 /* Prepare a trampoline for the given patch */
739 static abort_t prepare_trampoline(struct ksplice_mod_change *change,
740 struct ksplice_patch *p);
741 /* What address does the trampoline at addr jump to? */
742 static abort_t trampoline_target(struct ksplice_mod_change *change,
743 unsigned long addr, unsigned long *new_addr);
744 /* Hook to handle pc-relative jumps inserted by parainstructions */
745 static abort_t handle_paravirt(struct ksplice_mod_change *change,
746 unsigned long pre, unsigned long run,
747 int *matched);
748 /* Is address p on the stack of the given thread? */
749 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
751 #ifndef KSPLICE_STANDALONE
752 #include "ksplice-arch.c"
753 #elif defined CONFIG_X86
754 #include "x86/ksplice-arch.c"
755 #elif defined CONFIG_ARM
756 #include "arm/ksplice-arch.c"
757 #endif /* KSPLICE_STANDALONE */
759 #define clear_list(head, type, member) \
760 do { \
761 struct list_head *_pos, *_n; \
762 list_for_each_safe(_pos, _n, head) { \
763 list_del(_pos); \
764 kfree(list_entry(_pos, type, member)); \
766 } while (0)
769 * init_ksplice_mod_change() - Initializes a ksplice change
770 * @change: The change to be initialized. All of the public fields of the
771 * change and its associated data structures should be populated
772 * before this function is called. The values of the private
773 * fields will be ignored.
775 int init_ksplice_mod_change(struct ksplice_mod_change *change)
777 struct update *update;
778 struct ksplice_patch *p;
779 struct ksplice_section *s;
780 int ret = 0;
782 #ifdef KSPLICE_STANDALONE
783 if (!bootstrapped)
784 return -1;
785 #endif /* KSPLICE_STANDALONE */
787 INIT_LIST_HEAD(&change->temp_labelvals);
788 INIT_LIST_HEAD(&change->safety_records);
790 sort(change->old_code.relocs,
791 change->old_code.relocs_end - change->old_code.relocs,
792 sizeof(*change->old_code.relocs), compare_relocs, NULL);
793 sort(change->new_code.relocs,
794 change->new_code.relocs_end - change->new_code.relocs,
795 sizeof(*change->new_code.relocs), compare_relocs, NULL);
796 sort(change->old_code.sections,
797 change->old_code.sections_end - change->old_code.sections,
798 sizeof(*change->old_code.sections), compare_section_labels, NULL);
799 #ifdef KSPLICE_STANDALONE
800 sort(change->new_code.system_map,
801 change->new_code.system_map_end - change->new_code.system_map,
802 sizeof(*change->new_code.system_map), compare_system_map, NULL);
803 sort(change->old_code.system_map,
804 change->old_code.system_map_end - change->old_code.system_map,
805 sizeof(*change->old_code.system_map), compare_system_map, NULL);
806 #endif /* KSPLICE_STANDALONE */
808 for (p = change->patches; p < change->patches_end; p++)
809 p->vaddr = NULL;
810 for (s = change->old_code.sections; s < change->old_code.sections_end;
811 s++)
812 s->match_map = NULL;
813 for (p = change->patches; p < change->patches_end; p++) {
814 const struct ksplice_reloc *r = patch_reloc(change, p);
815 if (r == NULL)
816 return -ENOENT;
817 if (p->type == KSPLICE_PATCH_DATA) {
818 s = symbol_section(change, r->symbol);
819 if (s == NULL)
820 return -ENOENT;
821 /* Ksplice creates KSPLICE_PATCH_DATA patches in order
822 * to modify rodata sections that have been explicitly
823 * marked for patching using the ksplice-patch.h macro
824 * ksplice_assume_rodata. Here we modify the section
825 * flags appropriately.
827 if (s->flags & KSPLICE_SECTION_DATA)
828 s->flags = (s->flags & ~KSPLICE_SECTION_DATA) |
829 KSPLICE_SECTION_RODATA;
833 mutex_lock(&module_mutex);
834 list_for_each_entry(update, &updates, list) {
835 if (strcmp(change->kid, update->kid) == 0) {
836 if (update->stage != STAGE_PREPARING) {
837 ret = -EPERM;
838 goto out;
840 add_to_update(change, update);
841 ret = 0;
842 goto out;
845 update = init_ksplice_update(change->kid);
846 if (update == NULL) {
847 ret = -ENOMEM;
848 goto out;
850 ret = ksplice_sysfs_init(update);
851 if (ret != 0) {
852 cleanup_ksplice_update(update);
853 goto out;
855 add_to_update(change, update);
856 out:
857 mutex_unlock(&module_mutex);
858 return ret;
860 EXPORT_SYMBOL_GPL(init_ksplice_mod_change);
863 * cleanup_ksplice_mod_change() - Cleans up a change if appropriate
864 * @change: The change to be cleaned up
866 * cleanup_ksplice_mod_change is currently called twice for each
867 * Ksplice update; once when the old_code module is unloaded, and once
868 * when the new_code module is unloaded. The extra call is used to
869 * avoid leaks if you unload the old_code without applying the update.
871 void cleanup_ksplice_mod_change(struct ksplice_mod_change *change)
873 if (change->update == NULL)
874 return;
876 mutex_lock(&module_mutex);
877 if (change->update->stage == STAGE_APPLIED) {
878 /* If the change wasn't actually applied (because we
879 * only applied this update to loaded modules and this
880 * target was not loaded), then unregister the change
881 * from the list of unused changes.
883 struct ksplice_mod_change *c;
884 bool found = false;
886 list_for_each_entry(c, &change->update->unused_changes, list) {
887 if (c == change)
888 found = true;
890 if (found)
891 list_del(&change->list);
892 mutex_unlock(&module_mutex);
893 return;
895 list_del(&change->list);
896 if (change->update->stage == STAGE_PREPARING)
897 maybe_cleanup_ksplice_update(change->update);
898 change->update = NULL;
899 mutex_unlock(&module_mutex);
901 EXPORT_SYMBOL_GPL(cleanup_ksplice_mod_change);
903 static struct update *init_ksplice_update(const char *kid)
905 struct update *update;
906 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
907 if (update == NULL)
908 return NULL;
909 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
910 if (update->name == NULL) {
911 kfree(update);
912 return NULL;
914 update->kid = kstrdup(kid, GFP_KERNEL);
915 if (update->kid == NULL) {
916 kfree(update->name);
917 kfree(update);
918 return NULL;
920 if (try_module_get(THIS_MODULE) != 1) {
921 kfree(update->kid);
922 kfree(update->name);
923 kfree(update);
924 return NULL;
926 INIT_LIST_HEAD(&update->changes);
927 INIT_LIST_HEAD(&update->unused_changes);
928 INIT_LIST_HEAD(&update->ksplice_module_list);
929 if (init_debug_buf(update) != OK) {
930 module_put(THIS_MODULE);
931 kfree(update->kid);
932 kfree(update->name);
933 kfree(update);
934 return NULL;
936 list_add(&update->list, &updates);
937 update->stage = STAGE_PREPARING;
938 update->abort_cause = OK;
939 update->partial = 0;
940 INIT_LIST_HEAD(&update->conflicts);
941 return update;
944 static void cleanup_ksplice_update(struct update *update)
946 list_del(&update->list);
947 cleanup_conflicts(update);
948 clear_debug_buf(update);
949 cleanup_module_list_entries(update);
950 kfree(update->kid);
951 kfree(update->name);
952 kfree(update);
953 module_put(THIS_MODULE);
956 /* Clean up the update if it no longer has any changes */
957 static void maybe_cleanup_ksplice_update(struct update *update)
959 if (list_empty(&update->changes) && list_empty(&update->unused_changes))
960 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
961 kobject_put(&update->kobj);
962 #else /* LINUX_VERSION_CODE < */
963 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
964 kobject_unregister(&update->kobj);
965 #endif /* LINUX_VERSION_CODE */
968 static void add_to_update(struct ksplice_mod_change *change,
969 struct update *update)
971 change->update = update;
972 list_add(&change->list, &update->unused_changes);
975 static int ksplice_sysfs_init(struct update *update)
977 int ret = 0;
978 memset(&update->kobj, 0, sizeof(update->kobj));
979 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
980 #ifndef KSPLICE_STANDALONE
981 ret = kobject_init_and_add(&update->kobj, &update_ktype,
982 ksplice_kobj, "%s", update->kid);
983 #else /* KSPLICE_STANDALONE */
984 ret = kobject_init_and_add(&update->kobj, &update_ktype,
985 &THIS_MODULE->mkobj.kobj, "ksplice");
986 #endif /* KSPLICE_STANDALONE */
987 #else /* LINUX_VERSION_CODE < */
988 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
989 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
990 if (ret != 0)
991 return ret;
992 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
993 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
994 #else /* LINUX_VERSION_CODE < */
995 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
996 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
997 #endif /* LINUX_VERSION_CODE */
998 update->kobj.ktype = &update_ktype;
999 ret = kobject_register(&update->kobj);
1000 #endif /* LINUX_VERSION_CODE */
1001 if (ret != 0)
1002 return ret;
1003 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
1004 kobject_uevent(&update->kobj, KOBJ_ADD);
1005 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
1006 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
1007 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
1008 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
1009 #endif /* LINUX_VERSION_CODE */
1010 return 0;
1013 static abort_t apply_update(struct update *update)
1015 struct ksplice_mod_change *change, *n;
1016 abort_t ret;
1017 int retval;
1019 list_for_each_entry(change, &update->changes, list) {
1020 ret = create_module_list_entry(change, true);
1021 if (ret != OK)
1022 goto out;
1025 list_for_each_entry_safe(change, n, &update->unused_changes, list) {
1026 if (strcmp(change->target_name, "vmlinux") == 0) {
1027 change->target = NULL;
1028 } else if (change->target == NULL) {
1029 change->target = find_module(change->target_name);
1030 if (change->target == NULL ||
1031 !module_is_live(change->target)) {
1032 if (!update->partial) {
1033 ret = TARGET_NOT_LOADED;
1034 goto out;
1036 ret = create_module_list_entry(change, false);
1037 if (ret != OK)
1038 goto out;
1039 continue;
1041 retval = use_module(change->new_code_mod,
1042 change->target);
1043 if (retval != 1) {
1044 ret = UNEXPECTED;
1045 goto out;
1048 ret = create_module_list_entry(change, true);
1049 if (ret != OK)
1050 goto out;
1051 list_del(&change->list);
1052 list_add_tail(&change->list, &update->changes);
1054 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
1055 if (change->target == NULL) {
1056 apply_paravirt(change->new_code.parainstructions,
1057 change->new_code.parainstructions_end);
1058 apply_paravirt(change->old_code.parainstructions,
1059 change->old_code.parainstructions_end);
1061 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
1064 list_for_each_entry(change, &update->changes, list) {
1065 const struct ksplice_section *sect;
1066 for (sect = change->new_code.sections;
1067 sect < change->new_code.sections_end; sect++) {
1068 struct safety_record *rec = kmalloc(sizeof(*rec),
1069 GFP_KERNEL);
1070 if (rec == NULL) {
1071 ret = OUT_OF_MEMORY;
1072 goto out;
1074 rec->addr = sect->address;
1075 rec->size = sect->size;
1076 rec->label = sect->symbol->label;
1077 list_add(&rec->list, &change->safety_records);
1081 list_for_each_entry(change, &update->changes, list) {
1082 ret = init_symbol_arrays(change);
1083 if (ret != OK) {
1084 cleanup_symbol_arrays(change);
1085 goto out;
1087 ret = prepare_change(change);
1088 cleanup_symbol_arrays(change);
1089 if (ret != OK)
1090 goto out;
1092 ret = patch_action(update, KS_APPLY);
1093 out:
1094 list_for_each_entry(change, &update->changes, list) {
1095 struct ksplice_section *s;
1096 if (update->stage == STAGE_PREPARING)
1097 clear_list(&change->safety_records,
1098 struct safety_record, list);
1099 for (s = change->old_code.sections;
1100 s < change->old_code.sections_end; s++) {
1101 if (s->match_map != NULL) {
1102 vfree(s->match_map);
1103 s->match_map = NULL;
1107 if (update->stage == STAGE_PREPARING)
1108 cleanup_module_list_entries(update);
1110 if (ret == OK)
1111 printk(KERN_INFO "ksplice: Update %s applied successfully\n",
1112 update->kid);
1113 return ret;
1116 static abort_t reverse_update(struct update *update)
1118 abort_t ret;
1119 struct ksplice_mod_change *change;
1121 clear_debug_buf(update);
1122 ret = init_debug_buf(update);
1123 if (ret != OK)
1124 return ret;
1126 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
1128 ret = patch_action(update, KS_REVERSE);
1129 if (ret != OK)
1130 return ret;
1132 list_for_each_entry(change, &update->changes, list)
1133 clear_list(&change->safety_records, struct safety_record, list);
1135 printk(KERN_INFO "ksplice: Update %s reversed successfully\n",
1136 update->kid);
1137 return OK;
1140 static int compare_symbolp_names(const void *a, const void *b)
1142 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1143 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
1144 return 0;
1145 if ((*sympa)->name == NULL)
1146 return -1;
1147 if ((*sympb)->name == NULL)
1148 return 1;
1149 return strcmp((*sympa)->name, (*sympb)->name);
1152 static int compare_symbolp_labels(const void *a, const void *b)
1154 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
1155 return strcmp((*sympa)->label, (*sympb)->label);
1158 static int symbolp_bsearch_compare(const void *key, const void *elt)
1160 const char *name = key;
1161 const struct ksplice_symbol *const *symp = elt;
1162 const struct ksplice_symbol *sym = *symp;
1163 if (sym->name == NULL)
1164 return 1;
1165 return strcmp(name, sym->name);
1168 static abort_t add_matching_values(struct ksplice_lookup *lookup,
1169 const char *sym_name, unsigned long sym_val)
1171 struct ksplice_symbol **symp;
1172 abort_t ret;
1174 symp = bsearch(sym_name, lookup->arr, lookup->size,
1175 sizeof(*lookup->arr), symbolp_bsearch_compare);
1176 if (symp == NULL)
1177 return OK;
1179 while (symp > lookup->arr &&
1180 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
1181 symp--;
1183 for (; symp < lookup->arr + lookup->size; symp++) {
1184 struct ksplice_symbol *sym = *symp;
1185 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
1186 break;
1187 ret = add_candidate_val(lookup->change,
1188 sym->candidate_vals, sym_val);
1189 if (ret != OK)
1190 return ret;
1192 return OK;
1195 #ifdef CONFIG_KALLSYMS
1196 static int add_kallsyms_values(void *data, const char *name,
1197 struct module *owner, unsigned long val)
1199 struct ksplice_lookup *lookup = data;
1200 if (owner == lookup->change->new_code_mod ||
1201 !patches_module(owner, lookup->change->target))
1202 return (__force int)OK;
1203 return (__force int)add_matching_values(lookup, name, val);
1205 #endif /* CONFIG_KALLSYMS */
1207 static bool add_export_values(const struct symsearch *syms,
1208 struct module *owner,
1209 unsigned int symnum, void *data)
1211 struct ksplice_lookup *lookup = data;
1212 abort_t ret;
1214 ret = add_matching_values(lookup, syms->start[symnum].name,
1215 syms->start[symnum].value);
1216 if (ret != OK) {
1217 lookup->ret = ret;
1218 return true;
1220 return false;
1223 static void cleanup_symbol_arrays(struct ksplice_mod_change *change)
1225 struct ksplice_symbol *sym;
1226 for (sym = change->new_code.symbols; sym < change->new_code.symbols_end;
1227 sym++) {
1228 if (sym->candidate_vals != NULL) {
1229 clear_list(sym->candidate_vals, struct candidate_val,
1230 list);
1231 kfree(sym->candidate_vals);
1232 sym->candidate_vals = NULL;
1235 for (sym = change->old_code.symbols; sym < change->old_code.symbols_end;
1236 sym++) {
1237 if (sym->candidate_vals != NULL) {
1238 clear_list(sym->candidate_vals, struct candidate_val,
1239 list);
1240 kfree(sym->candidate_vals);
1241 sym->candidate_vals = NULL;
1247 * The new_code and old_code modules each have their own independent
1248 * ksplice_symbol structures. uniquify_symbols unifies these separate
1249 * pieces of kernel symbol information by replacing all references to
1250 * the old_code copy of symbols with references to the new_code copy.
1252 static abort_t uniquify_symbols(struct ksplice_mod_change *change)
1254 struct ksplice_reloc *r;
1255 struct ksplice_section *s;
1256 struct ksplice_symbol *sym, **sym_arr, **symp;
1257 size_t size = change->new_code.symbols_end - change->new_code.symbols;
1259 if (size == 0)
1260 return OK;
1262 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1263 if (sym_arr == NULL)
1264 return OUT_OF_MEMORY;
1266 for (symp = sym_arr, sym = change->new_code.symbols;
1267 symp < sym_arr + size && sym < change->new_code.symbols_end;
1268 sym++, symp++)
1269 *symp = sym;
1271 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1273 for (r = change->old_code.relocs; r < change->old_code.relocs_end;
1274 r++) {
1275 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1276 compare_symbolp_labels);
1277 if (symp != NULL) {
1278 if ((*symp)->name == NULL)
1279 (*symp)->name = r->symbol->name;
1280 r->symbol = *symp;
1284 for (s = change->old_code.sections; s < change->old_code.sections_end;
1285 s++) {
1286 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1287 compare_symbolp_labels);
1288 if (symp != NULL) {
1289 if ((*symp)->name == NULL)
1290 (*symp)->name = s->symbol->name;
1291 s->symbol = *symp;
1295 vfree(sym_arr);
1296 return OK;
1300 * Initialize the ksplice_symbol structures in the given array using
1301 * the kallsyms and exported symbol tables.
1303 static abort_t init_symbol_array(struct ksplice_mod_change *change,
1304 struct ksplice_symbol *start,
1305 struct ksplice_symbol *end)
1307 struct ksplice_symbol *sym, **sym_arr, **symp;
1308 struct ksplice_lookup lookup;
1309 size_t size = end - start;
1310 abort_t ret;
1312 if (size == 0)
1313 return OK;
1315 for (sym = start; sym < end; sym++) {
1316 if (strstarts(sym->label, "__ksymtab")) {
1317 const struct kernel_symbol *ksym;
1318 const char *colon = strchr(sym->label, ':');
1319 const char *name = colon + 1;
1320 if (colon == NULL)
1321 continue;
1322 ksym = find_symbol(name, NULL, NULL, true, false);
1323 if (ksym == NULL) {
1324 ksdebug(change, "Could not find kernel_symbol "
1325 "structure for %s\n", name);
1326 continue;
1328 sym->value = (unsigned long)ksym;
1329 sym->candidate_vals = NULL;
1330 continue;
1333 sym->candidate_vals = kmalloc(sizeof(*sym->candidate_vals),
1334 GFP_KERNEL);
1335 if (sym->candidate_vals == NULL)
1336 return OUT_OF_MEMORY;
1337 INIT_LIST_HEAD(sym->candidate_vals);
1338 sym->value = 0;
1341 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1342 if (sym_arr == NULL)
1343 return OUT_OF_MEMORY;
1345 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1346 sym++, symp++)
1347 *symp = sym;
1349 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1351 lookup.change = change;
1352 lookup.arr = sym_arr;
1353 lookup.size = size;
1354 lookup.ret = OK;
1356 each_symbol(add_export_values, &lookup);
1357 ret = lookup.ret;
1358 #ifdef CONFIG_KALLSYMS
1359 if (ret == OK)
1360 ret = (__force abort_t)
1361 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1362 #endif /* CONFIG_KALLSYMS */
1363 vfree(sym_arr);
1364 return ret;
1367 /* Prepare the change's ksplice_symbol structures for run-pre matching */
1368 static abort_t init_symbol_arrays(struct ksplice_mod_change *change)
1370 abort_t ret;
1372 ret = uniquify_symbols(change);
1373 if (ret != OK)
1374 return ret;
1376 ret = init_symbol_array(change, change->old_code.symbols,
1377 change->old_code.symbols_end);
1378 if (ret != OK)
1379 return ret;
1381 ret = init_symbol_array(change, change->new_code.symbols,
1382 change->new_code.symbols_end);
1383 if (ret != OK)
1384 return ret;
1386 return OK;
1389 static abort_t prepare_change(struct ksplice_mod_change *change)
1391 abort_t ret;
1393 ksdebug(change, "Preparing and checking %s\n", change->name);
1394 ret = match_change_sections(change, false);
1395 if (ret == NO_MATCH) {
1396 /* It is possible that by using relocations from .data sections
1397 * we can successfully run-pre match the rest of the sections.
1398 * To avoid using any symbols obtained from .data sections
1399 * (which may be unreliable) in the post code, we first prepare
1400 * the post code and then try to run-pre match the remaining
1401 * sections with the help of .data sections.
1403 ksdebug(change, "Continuing without some sections; we might "
1404 "find them later.\n");
1405 ret = finalize_change(change);
1406 if (ret != OK) {
1407 ksdebug(change, "Aborted. Unable to continue without "
1408 "the unmatched sections.\n");
1409 return ret;
1412 ksdebug(change, "run-pre: Considering .data sections to find "
1413 "the unmatched sections\n");
1414 ret = match_change_sections(change, true);
1415 if (ret != OK)
1416 return ret;
1418 ksdebug(change, "run-pre: Found all previously unmatched "
1419 "sections\n");
1420 return OK;
1421 } else if (ret != OK) {
1422 return ret;
1425 return finalize_change(change);
1429 * Finish preparing the change for insertion into the kernel.
1430 * Afterwards, the replacement code should be ready to run and the
1431 * ksplice_patches should all be ready for trampoline insertion.
1433 static abort_t finalize_change(struct ksplice_mod_change *change)
1435 abort_t ret;
1436 ret = apply_relocs(change, change->new_code.relocs,
1437 change->new_code.relocs_end);
1438 if (ret != OK)
1439 return ret;
1441 ret = finalize_patches(change);
1442 if (ret != OK)
1443 return ret;
1445 return OK;
1448 static abort_t finalize_patches(struct ksplice_mod_change *change)
1450 struct ksplice_patch *p;
1451 struct safety_record *rec;
1452 abort_t ret;
1454 for (p = change->patches; p < change->patches_end; p++) {
1455 bool found = false;
1456 list_for_each_entry(rec, &change->safety_records, list) {
1457 if (rec->addr <= p->oldaddr &&
1458 p->oldaddr < rec->addr + rec->size) {
1459 found = true;
1460 break;
1463 if (!found && p->type != KSPLICE_PATCH_EXPORT) {
1464 const struct ksplice_reloc *r = patch_reloc(change, p);
1465 if (r == NULL) {
1466 ksdebug(change, "A patch with no reloc at its "
1467 "oldaddr has no safety record\n");
1468 return NO_MATCH;
1470 ksdebug(change, "No safety record for patch with "
1471 "oldaddr %s+%lx\n", r->symbol->label,
1472 r->target_addend);
1473 return NO_MATCH;
1476 if (p->type == KSPLICE_PATCH_TEXT) {
1477 ret = prepare_trampoline(change, p);
1478 if (ret != OK)
1479 return ret;
1482 if (found && rec->addr + rec->size < p->oldaddr + p->size) {
1483 ksdebug(change, "Safety record %s is too short for "
1484 "patch\n", rec->label);
1485 return UNEXPECTED;
1488 if (p->type == KSPLICE_PATCH_TEXT) {
1489 if (p->repladdr == 0)
1490 p->repladdr = (unsigned long)ksplice_deleted;
1493 return OK;
1496 /* noinline to prevent garbage on the stack from confusing check_stack */
1497 static noinline abort_t map_trampoline_pages(struct update *update)
1499 struct ksplice_mod_change *change;
1500 list_for_each_entry(change, &update->changes, list) {
1501 struct ksplice_patch *p;
1502 for (p = change->patches; p < change->patches_end; p++) {
1503 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1504 if (p->vaddr == NULL) {
1505 ksdebug(change,
1506 "Unable to map oldaddr read/write\n");
1507 unmap_trampoline_pages(update);
1508 return UNEXPECTED;
1512 return OK;
1515 static void unmap_trampoline_pages(struct update *update)
1517 struct ksplice_mod_change *change;
1518 list_for_each_entry(change, &update->changes, list) {
1519 struct ksplice_patch *p;
1520 for (p = change->patches; p < change->patches_end; p++) {
1521 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1522 p->vaddr = NULL;
1528 * map_writable creates a shadow page mapping of the range
1529 * [addr, addr + len) so that we can write to code mapped read-only.
1531 * It is similar to a generalized version of x86's text_poke. But
1532 * because one cannot use vmalloc/vfree() inside stop_machine, we use
1533 * map_writable to map the pages before stop_machine, then use the
1534 * mapping inside stop_machine, and unmap the pages afterwards.
1536 static void *map_writable(void *addr, size_t len)
1538 void *vaddr;
1539 int nr_pages = DIV_ROUND_UP(offset_in_page(addr) + len, PAGE_SIZE);
1540 struct page **pages = kmalloc(nr_pages * sizeof(*pages), GFP_KERNEL);
1541 void *page_addr = (void *)((unsigned long)addr & PAGE_MASK);
1542 int i;
1544 if (pages == NULL)
1545 return NULL;
1547 for (i = 0; i < nr_pages; i++) {
1548 if (__module_address((unsigned long)page_addr) == NULL) {
1549 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,22) || !defined(CONFIG_X86_64)
1550 pages[i] = virt_to_page(page_addr);
1551 #else /* LINUX_VERSION_CODE < && CONFIG_X86_64 */
1552 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1553 pages[i] =
1554 pfn_to_page(__pa_symbol(page_addr) >> PAGE_SHIFT);
1555 #endif /* LINUX_VERSION_CODE || !CONFIG_X86_64 */
1556 WARN_ON(!PageReserved(pages[i]));
1557 } else {
1558 pages[i] = vmalloc_to_page(addr);
1560 if (pages[i] == NULL) {
1561 kfree(pages);
1562 return NULL;
1564 page_addr += PAGE_SIZE;
1566 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1567 kfree(pages);
1568 if (vaddr == NULL)
1569 return NULL;
1570 return vaddr + offset_in_page(addr);
1574 * Ksplice adds a dependency on any symbol address used to resolve
1575 * relocations in the new_code module.
1577 * Be careful to follow_trampolines so that we always depend on the
1578 * latest version of the target function, since that's the code that
1579 * will run if we call addr.
1581 static abort_t add_dependency_on_address(struct ksplice_mod_change *change,
1582 unsigned long addr)
1584 struct ksplice_mod_change *c;
1585 struct module *m =
1586 __module_text_address(follow_trampolines(change, addr));
1587 if (m == NULL)
1588 return OK;
1589 list_for_each_entry(c, &change->update->changes, list) {
1590 if (m == c->new_code_mod)
1591 return OK;
1593 if (use_module(change->new_code_mod, m) != 1)
1594 return MODULE_BUSY;
1595 return OK;
1598 static abort_t apply_relocs(struct ksplice_mod_change *change,
1599 const struct ksplice_reloc *relocs,
1600 const struct ksplice_reloc *relocs_end)
1602 const struct ksplice_reloc *r;
1603 for (r = relocs; r < relocs_end; r++) {
1604 abort_t ret = apply_reloc(change, r);
1605 if (ret != OK)
1606 return ret;
1608 return OK;
1611 static abort_t apply_reloc(struct ksplice_mod_change *change,
1612 const struct ksplice_reloc *r)
1614 switch (r->howto->type) {
1615 case KSPLICE_HOWTO_RELOC:
1616 case KSPLICE_HOWTO_RELOC_PATCH:
1617 return apply_howto_reloc(change, r);
1618 case KSPLICE_HOWTO_DATE:
1619 case KSPLICE_HOWTO_TIME:
1620 return apply_howto_date(change, r);
1621 default:
1622 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
1623 return UNEXPECTED;
1628 * Applies a relocation. Aborts if the symbol referenced in it has
1629 * not been uniquely resolved.
1631 static abort_t apply_howto_reloc(struct ksplice_mod_change *change,
1632 const struct ksplice_reloc *r)
1634 abort_t ret;
1635 int canary_ret;
1636 unsigned long sym_addr;
1637 LIST_HEAD(vals);
1639 canary_ret = contains_canary(change, r->blank_addr, r->howto);
1640 if (canary_ret < 0)
1641 return UNEXPECTED;
1642 if (canary_ret == 0) {
1643 ksdebug(change, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1644 r->blank_addr, r->symbol->label, r->target_addend);
1645 return OK;
1648 #ifdef KSPLICE_STANDALONE
1649 if (!bootstrapped) {
1650 ret = add_system_map_candidates(change,
1651 change->new_code.system_map,
1652 change->new_code.system_map_end,
1653 r->symbol->label, &vals);
1654 if (ret != OK) {
1655 release_vals(&vals);
1656 return ret;
1659 #endif /* KSPLICE_STANDALONE */
1660 ret = lookup_symbol(change, r->symbol, &vals);
1661 if (ret != OK) {
1662 release_vals(&vals);
1663 return ret;
1666 * Relocations for the oldaddr fields of patches must have
1667 * been resolved via run-pre matching.
1669 if (!singular(&vals) || (r->symbol->candidate_vals != NULL &&
1670 r->howto->type == KSPLICE_HOWTO_RELOC_PATCH)) {
1671 release_vals(&vals);
1672 ksdebug(change, "Failed to find %s for reloc\n",
1673 r->symbol->label);
1674 return FAILED_TO_FIND;
1676 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1677 release_vals(&vals);
1679 ret = write_reloc_value(change, r, r->blank_addr,
1680 r->howto->pcrel ? sym_addr - r->blank_addr :
1681 sym_addr);
1682 if (ret != OK)
1683 return ret;
1685 ksdebug(change, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1686 r->symbol->label, r->target_addend, sym_addr);
1687 switch (r->howto->size) {
1688 case 1:
1689 ksdebug(change, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1690 break;
1691 case 2:
1692 ksdebug(change, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1693 break;
1694 case 4:
1695 ksdebug(change, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1696 break;
1697 #if BITS_PER_LONG >= 64
1698 case 8:
1699 ksdebug(change, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1700 break;
1701 #endif /* BITS_PER_LONG */
1702 default:
1703 ksdebug(change, "Aborted. Invalid relocation size.\n");
1704 return UNEXPECTED;
1706 #ifdef KSPLICE_STANDALONE
1707 if (!bootstrapped)
1708 return OK;
1709 #endif /* KSPLICE_STANDALONE */
1712 * Create labelvals so that we can verify our choices in the
1713 * second round of run-pre matching that considers data sections.
1715 ret = create_labelval(change, r->symbol, sym_addr, VAL);
1716 if (ret != OK)
1717 return ret;
1719 return add_dependency_on_address(change, sym_addr);
1723 * Date relocations are created wherever __DATE__ or __TIME__ is used
1724 * in the kernel; we resolve them by simply copying in the date/time
1725 * obtained from run-pre matching the relevant compilation unit.
1727 static abort_t apply_howto_date(struct ksplice_mod_change *change,
1728 const struct ksplice_reloc *r)
1730 if (r->symbol->candidate_vals != NULL) {
1731 ksdebug(change, "Failed to find %s for date\n",
1732 r->symbol->label);
1733 return FAILED_TO_FIND;
1735 memcpy((unsigned char *)r->blank_addr,
1736 (const unsigned char *)r->symbol->value, r->howto->size);
1737 return OK;
1741 * Given a relocation and its run address, compute the address of the
1742 * symbol the relocation referenced, and store it in *valp.
1744 static abort_t read_reloc_value(struct ksplice_mod_change *change,
1745 const struct ksplice_reloc *r,
1746 unsigned long addr, unsigned long *valp)
1748 unsigned char bytes[sizeof(long)];
1749 unsigned long val;
1750 const struct ksplice_reloc_howto *howto = r->howto;
1752 if (howto->size <= 0 || howto->size > sizeof(long)) {
1753 ksdebug(change, "Aborted. Invalid relocation size.\n");
1754 return UNEXPECTED;
1757 if (probe_kernel_read(bytes, (void *)addr, howto->size) == -EFAULT)
1758 return NO_MATCH;
1760 switch (howto->size) {
1761 case 1:
1762 val = *(uint8_t *)bytes;
1763 break;
1764 case 2:
1765 val = *(uint16_t *)bytes;
1766 break;
1767 case 4:
1768 val = *(uint32_t *)bytes;
1769 break;
1770 #if BITS_PER_LONG >= 64
1771 case 8:
1772 val = *(uint64_t *)bytes;
1773 break;
1774 #endif /* BITS_PER_LONG */
1775 default:
1776 ksdebug(change, "Aborted. Invalid relocation size.\n");
1777 return UNEXPECTED;
1780 val &= howto->dst_mask;
1781 if (howto->signed_addend)
1782 val |= -(val & (howto->dst_mask & ~(howto->dst_mask >> 1)));
1783 val <<= howto->rightshift;
1784 val -= r->insn_addend + r->target_addend;
1785 *valp = val;
1786 return OK;
1790 * Given a relocation, the address of its storage unit, and the
1791 * address of the symbol the relocation references, write the
1792 * relocation's final value into the storage unit.
1794 static abort_t write_reloc_value(struct ksplice_mod_change *change,
1795 const struct ksplice_reloc *r,
1796 unsigned long addr, unsigned long sym_addr)
1798 unsigned long val = sym_addr + r->target_addend + r->insn_addend;
1799 const struct ksplice_reloc_howto *howto = r->howto;
1800 val >>= howto->rightshift;
1801 switch (howto->size) {
1802 case 1:
1803 *(uint8_t *)addr = (*(uint8_t *)addr & ~howto->dst_mask) |
1804 (val & howto->dst_mask);
1805 break;
1806 case 2:
1807 *(uint16_t *)addr = (*(uint16_t *)addr & ~howto->dst_mask) |
1808 (val & howto->dst_mask);
1809 break;
1810 case 4:
1811 *(uint32_t *)addr = (*(uint32_t *)addr & ~howto->dst_mask) |
1812 (val & howto->dst_mask);
1813 break;
1814 #if BITS_PER_LONG >= 64
1815 case 8:
1816 *(uint64_t *)addr = (*(uint64_t *)addr & ~howto->dst_mask) |
1817 (val & howto->dst_mask);
1818 break;
1819 #endif /* BITS_PER_LONG */
1820 default:
1821 ksdebug(change, "Aborted. Invalid relocation size.\n");
1822 return UNEXPECTED;
1825 if (read_reloc_value(change, r, addr, &val) != OK || val != sym_addr) {
1826 ksdebug(change, "Aborted. Relocation overflow.\n");
1827 return UNEXPECTED;
1830 return OK;
1833 static abort_t create_module_list_entry(struct ksplice_mod_change *change,
1834 bool to_be_applied)
1836 struct ksplice_module_list_entry *entry =
1837 kmalloc(sizeof(*entry), GFP_KERNEL);
1838 if (entry == NULL)
1839 return OUT_OF_MEMORY;
1840 entry->new_code_mod_name =
1841 kstrdup(change->new_code_mod->name, GFP_KERNEL);
1842 if (entry->new_code_mod_name == NULL) {
1843 kfree(entry);
1844 return OUT_OF_MEMORY;
1846 entry->target_mod_name = kstrdup(change->target_name, GFP_KERNEL);
1847 if (entry->target_mod_name == NULL) {
1848 kfree(entry->new_code_mod_name);
1849 kfree(entry);
1850 return OUT_OF_MEMORY;
1852 /* The update's kid is guaranteed to outlast the module_list_entry */
1853 entry->kid = change->update->kid;
1854 entry->applied = to_be_applied;
1855 list_add(&entry->update_list, &change->update->ksplice_module_list);
1856 return OK;
1859 static void cleanup_module_list_entries(struct update *update)
1861 struct ksplice_module_list_entry *entry;
1862 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
1863 kfree(entry->target_mod_name);
1864 kfree(entry->new_code_mod_name);
1866 clear_list(&update->ksplice_module_list,
1867 struct ksplice_module_list_entry, update_list);
1870 /* Replacement address used for functions deleted by the patch */
1871 static void __attribute__((noreturn)) ksplice_deleted(void)
1873 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1874 BUG();
1875 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1876 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1877 for (;;);
1878 #endif
1881 /* Floodfill to run-pre match the sections within a change. */
1882 static abort_t match_change_sections(struct ksplice_mod_change *change,
1883 bool consider_data_sections)
1885 struct ksplice_section *sect;
1886 abort_t ret;
1887 int remaining = 0;
1888 bool progress;
1890 for (sect = change->old_code.sections;
1891 sect < change->old_code.sections_end; sect++) {
1892 if ((sect->flags & KSPLICE_SECTION_DATA) == 0 &&
1893 (sect->flags & KSPLICE_SECTION_STRING) == 0 &&
1894 (sect->flags & KSPLICE_SECTION_MATCHED) == 0)
1895 remaining++;
1898 while (remaining > 0) {
1899 progress = false;
1900 for (sect = change->old_code.sections;
1901 sect < change->old_code.sections_end; sect++) {
1902 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0)
1903 continue;
1904 if ((!consider_data_sections &&
1905 (sect->flags & KSPLICE_SECTION_DATA) != 0) ||
1906 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1907 continue;
1908 ret = find_section(change, sect);
1909 if (ret == OK) {
1910 sect->flags |= KSPLICE_SECTION_MATCHED;
1911 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1912 remaining--;
1913 progress = true;
1914 } else if (ret != NO_MATCH) {
1915 return ret;
1919 if (progress)
1920 continue;
1922 for (sect = change->old_code.sections;
1923 sect < change->old_code.sections_end; sect++) {
1924 if ((sect->flags & KSPLICE_SECTION_MATCHED) != 0 ||
1925 (sect->flags & KSPLICE_SECTION_STRING) != 0)
1926 continue;
1927 ksdebug(change, "run-pre: could not match %s "
1928 "section %s\n",
1929 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1930 "data" :
1931 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1932 "rodata" : "text", sect->symbol->label);
1934 ksdebug(change, "Aborted. run-pre: could not match some "
1935 "sections.\n");
1936 return NO_MATCH;
1938 return OK;
1942 * Search for the section in the running kernel. Returns OK if and
1943 * only if it finds precisely one address in the kernel matching the
1944 * section.
1946 static abort_t find_section(struct ksplice_mod_change *change,
1947 struct ksplice_section *sect)
1949 int i;
1950 abort_t ret;
1951 unsigned long run_addr;
1952 LIST_HEAD(vals);
1953 struct candidate_val *v, *n;
1955 #ifdef KSPLICE_STANDALONE
1956 ret = add_system_map_candidates(change, change->old_code.system_map,
1957 change->old_code.system_map_end,
1958 sect->symbol->label, &vals);
1959 if (ret != OK) {
1960 release_vals(&vals);
1961 return ret;
1963 #endif /* KSPLICE_STANDALONE */
1964 ret = lookup_symbol(change, sect->symbol, &vals);
1965 if (ret != OK) {
1966 release_vals(&vals);
1967 return ret;
1970 ksdebug(change, "run-pre: starting sect search for %s\n",
1971 sect->symbol->label);
1973 list_for_each_entry_safe(v, n, &vals, list) {
1974 run_addr = v->val;
1976 yield();
1977 ret = try_addr(change, sect, run_addr, NULL, RUN_PRE_INITIAL);
1978 if (ret == NO_MATCH) {
1979 list_del(&v->list);
1980 kfree(v);
1981 } else if (ret != OK) {
1982 release_vals(&vals);
1983 return ret;
1987 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1988 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1989 ret = brute_search_all(change, sect, &vals);
1990 if (ret != OK) {
1991 release_vals(&vals);
1992 return ret;
1995 * Make sure run-pre matching output is displayed if
1996 * brute_search succeeds.
1998 if (singular(&vals)) {
1999 run_addr = list_entry(vals.next, struct candidate_val,
2000 list)->val;
2001 ret = try_addr(change, sect, run_addr, NULL,
2002 RUN_PRE_INITIAL);
2003 if (ret != OK) {
2004 ksdebug(change, "run-pre: Debug run failed for "
2005 "sect %s:\n", sect->symbol->label);
2006 release_vals(&vals);
2007 return ret;
2011 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2013 if (singular(&vals)) {
2014 LIST_HEAD(safety_records);
2015 run_addr = list_entry(vals.next, struct candidate_val,
2016 list)->val;
2017 ret = try_addr(change, sect, run_addr, &safety_records,
2018 RUN_PRE_FINAL);
2019 release_vals(&vals);
2020 if (ret != OK) {
2021 clear_list(&safety_records, struct safety_record, list);
2022 ksdebug(change, "run-pre: Final run failed for sect "
2023 "%s:\n", sect->symbol->label);
2024 } else {
2025 list_splice(&safety_records, &change->safety_records);
2027 return ret;
2028 } else if (!list_empty(&vals)) {
2029 struct candidate_val *val;
2030 ksdebug(change, "run-pre: multiple candidates for sect %s:\n",
2031 sect->symbol->label);
2032 i = 0;
2033 list_for_each_entry(val, &vals, list) {
2034 i++;
2035 ksdebug(change, "%lx\n", val->val);
2036 if (i > 5) {
2037 ksdebug(change, "...\n");
2038 break;
2041 release_vals(&vals);
2042 return NO_MATCH;
2044 release_vals(&vals);
2045 return NO_MATCH;
2049 * try_addr is the the interface to run-pre matching. Its primary
2050 * purpose is to manage debugging information for run-pre matching;
2051 * all the hard work is in run_pre_cmp.
2053 static abort_t try_addr(struct ksplice_mod_change *change,
2054 struct ksplice_section *sect,
2055 unsigned long run_addr,
2056 struct list_head *safety_records,
2057 enum run_pre_mode mode)
2059 abort_t ret;
2060 const struct module *run_module = __module_address(run_addr);
2062 if (run_module == change->new_code_mod) {
2063 ksdebug(change, "run-pre: unexpected address %lx in new_code "
2064 "module %s for sect %s\n", run_addr, run_module->name,
2065 sect->symbol->label);
2066 return UNEXPECTED;
2068 if (!patches_module(run_module, change->target)) {
2069 ksdebug(change, "run-pre: ignoring address %lx in other module "
2070 "%s for sect %s\n", run_addr, run_module == NULL ?
2071 "vmlinux" : run_module->name, sect->symbol->label);
2072 return NO_MATCH;
2075 ret = create_labelval(change, sect->symbol, run_addr, TEMP);
2076 if (ret != OK)
2077 return ret;
2079 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2080 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2081 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2082 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2083 ret = arch_run_pre_cmp(change, sect, run_addr, safety_records,
2084 mode);
2085 else
2086 ret = run_pre_cmp(change, sect, run_addr, safety_records, mode);
2087 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2088 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
2089 set_temp_labelvals(change, NOVAL);
2090 ksdebug(change, "run-pre: %s sect %s does not match (r_a=%lx "
2091 "p_a=%lx s=%lx)\n",
2092 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "rodata" :
2093 (sect->flags & KSPLICE_SECTION_DATA) != 0 ? "data" :
2094 "text", sect->symbol->label, run_addr, sect->address,
2095 sect->size);
2096 ksdebug(change, "run-pre: ");
2097 if (change->update->debug >= 1) {
2098 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
2099 ret = run_pre_cmp(change, sect, run_addr,
2100 safety_records, RUN_PRE_DEBUG);
2101 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
2102 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
2103 ret = arch_run_pre_cmp(change, sect, run_addr,
2104 safety_records,
2105 RUN_PRE_DEBUG);
2106 else
2107 ret = run_pre_cmp(change, sect, run_addr,
2108 safety_records,
2109 RUN_PRE_DEBUG);
2110 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
2111 set_temp_labelvals(change, NOVAL);
2113 ksdebug(change, "\n");
2114 return ret;
2115 } else if (ret != OK) {
2116 set_temp_labelvals(change, NOVAL);
2117 return ret;
2120 if (mode != RUN_PRE_FINAL) {
2121 set_temp_labelvals(change, NOVAL);
2122 ksdebug(change, "run-pre: candidate for sect %s=%lx\n",
2123 sect->symbol->label, run_addr);
2124 return OK;
2127 set_temp_labelvals(change, VAL);
2128 ksdebug(change, "run-pre: found sect %s=%lx\n", sect->symbol->label,
2129 run_addr);
2130 return OK;
2134 * run_pre_cmp is the primary run-pre matching function; it determines
2135 * whether the given ksplice_section matches the code or data in the
2136 * running kernel starting at run_addr.
2138 * If run_pre_mode is RUN_PRE_FINAL, a safety record for the matched
2139 * section is created.
2141 * The run_pre_mode is also used to determine what debugging
2142 * information to display.
2144 static abort_t run_pre_cmp(struct ksplice_mod_change *change,
2145 const struct ksplice_section *sect,
2146 unsigned long run_addr,
2147 struct list_head *safety_records,
2148 enum run_pre_mode mode)
2150 int matched = 0;
2151 abort_t ret;
2152 const struct ksplice_reloc *r, *finger;
2153 const unsigned char *pre, *run, *pre_start, *run_start;
2154 unsigned char runval;
2156 pre_start = (const unsigned char *)sect->address;
2157 run_start = (const unsigned char *)run_addr;
2159 finger = init_reloc_search(change, sect);
2161 pre = pre_start;
2162 run = run_start;
2163 while (pre < pre_start + sect->size) {
2164 unsigned long offset = pre - pre_start;
2165 ret = lookup_reloc(change, &finger, (unsigned long)pre, &r);
2166 if (ret == OK) {
2167 ret = handle_reloc(change, sect, r, (unsigned long)run,
2168 mode);
2169 if (ret != OK) {
2170 if (mode == RUN_PRE_INITIAL)
2171 ksdebug(change, "reloc in sect does "
2172 "not match after %lx/%lx "
2173 "bytes\n", offset, sect->size);
2174 return ret;
2176 if (mode == RUN_PRE_DEBUG)
2177 print_bytes(change, run, r->howto->size, pre,
2178 r->howto->size);
2179 pre += r->howto->size;
2180 run += r->howto->size;
2181 finger++;
2182 continue;
2183 } else if (ret != NO_MATCH) {
2184 return ret;
2187 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
2188 ret = handle_paravirt(change, (unsigned long)pre,
2189 (unsigned long)run, &matched);
2190 if (ret != OK)
2191 return ret;
2192 if (matched != 0) {
2193 if (mode == RUN_PRE_DEBUG)
2194 print_bytes(change, run, matched, pre,
2195 matched);
2196 pre += matched;
2197 run += matched;
2198 continue;
2202 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
2203 if (mode == RUN_PRE_INITIAL)
2204 ksdebug(change, "sect unmapped after %lx/%lx "
2205 "bytes\n", offset, sect->size);
2206 return NO_MATCH;
2209 if (runval != *pre &&
2210 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
2211 if (mode == RUN_PRE_INITIAL)
2212 ksdebug(change, "sect does not match after "
2213 "%lx/%lx bytes\n", offset, sect->size);
2214 if (mode == RUN_PRE_DEBUG) {
2215 print_bytes(change, run, 1, pre, 1);
2216 ksdebug(change, "[p_o=%lx] ! ", offset);
2217 print_bytes(change, run + 1, 2, pre + 1, 2);
2219 return NO_MATCH;
2221 if (mode == RUN_PRE_DEBUG)
2222 print_bytes(change, run, 1, pre, 1);
2223 pre++;
2224 run++;
2226 return create_safety_record(change, sect, safety_records, run_addr,
2227 run - run_start);
2230 static void print_bytes(struct ksplice_mod_change *change,
2231 const unsigned char *run, int runc,
2232 const unsigned char *pre, int prec)
2234 int o;
2235 int matched = min(runc, prec);
2236 for (o = 0; o < matched; o++) {
2237 if (run[o] == pre[o])
2238 ksdebug(change, "%02x ", run[o]);
2239 else
2240 ksdebug(change, "%02x/%02x ", run[o], pre[o]);
2242 for (o = matched; o < runc; o++)
2243 ksdebug(change, "%02x/ ", run[o]);
2244 for (o = matched; o < prec; o++)
2245 ksdebug(change, "/%02x ", pre[o]);
2248 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
2249 static abort_t brute_search(struct ksplice_mod_change *change,
2250 struct ksplice_section *sect,
2251 const void *start, unsigned long len,
2252 struct list_head *vals)
2254 unsigned long addr;
2255 char run, pre;
2256 abort_t ret;
2258 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
2259 addr++) {
2260 if (addr % 100000 == 0)
2261 yield();
2263 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
2264 return OK;
2266 pre = *(const unsigned char *)(sect->address);
2268 if (run != pre)
2269 continue;
2271 ret = try_addr(change, sect, addr, NULL, RUN_PRE_INITIAL);
2272 if (ret == OK) {
2273 ret = add_candidate_val(change, vals, addr);
2274 if (ret != OK)
2275 return ret;
2276 } else if (ret != NO_MATCH) {
2277 return ret;
2281 return OK;
2284 extern struct list_head modules;
2286 static abort_t brute_search_all(struct ksplice_mod_change *change,
2287 struct ksplice_section *sect,
2288 struct list_head *vals)
2290 struct module *m;
2291 abort_t ret = OK;
2292 int saved_debug;
2294 ksdebug(change, "brute_search: searching for %s\n",
2295 sect->symbol->label);
2296 saved_debug = change->update->debug;
2297 change->update->debug = 0;
2299 list_for_each_entry(m, &modules, list) {
2300 if (!patches_module(m, change->target) ||
2301 m == change->new_code_mod)
2302 continue;
2303 ret = brute_search(change, sect, m->module_core, m->core_size,
2304 vals);
2305 if (ret != OK)
2306 goto out;
2307 ret = brute_search(change, sect, m->module_init, m->init_size,
2308 vals);
2309 if (ret != OK)
2310 goto out;
2313 ret = brute_search(change, sect, (const void *)init_mm.start_code,
2314 init_mm.end_code - init_mm.start_code, vals);
2316 out:
2317 change->update->debug = saved_debug;
2318 return ret;
2320 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
2322 struct range {
2323 unsigned long address;
2324 unsigned long size;
2327 static int reloc_bsearch_compare(const void *key, const void *elt)
2329 const struct range *range = key;
2330 const struct ksplice_reloc *r = elt;
2331 if (range->address + range->size <= r->blank_addr)
2332 return -1;
2333 if (range->address > r->blank_addr)
2334 return 1;
2335 return 0;
2338 static const struct ksplice_reloc *find_reloc(const struct ksplice_reloc *start,
2339 const struct ksplice_reloc *end,
2340 unsigned long address,
2341 unsigned long size)
2343 const struct ksplice_reloc *r;
2344 struct range range = { address, size };
2345 r = bsearch((void *)&range, start, end - start, sizeof(*r),
2346 reloc_bsearch_compare);
2347 if (r == NULL)
2348 return NULL;
2349 while (r > start && (r - 1)->blank_addr >= address)
2350 r--;
2351 return r;
2354 static const struct ksplice_reloc *
2355 init_reloc_search(struct ksplice_mod_change *change,
2356 const struct ksplice_section *sect)
2358 const struct ksplice_reloc *r;
2359 r = find_reloc(change->old_code.relocs, change->old_code.relocs_end,
2360 sect->address, sect->size);
2361 if (r == NULL)
2362 return change->old_code.relocs_end;
2363 return r;
2367 * lookup_reloc implements an amortized O(1) lookup for the next
2368 * old_code relocation. It must be called with a strictly increasing
2369 * sequence of addresses.
2371 * The fingerp is private data for lookup_reloc, and needs to have
2372 * been initialized as a pointer to the result of find_reloc (or
2373 * init_reloc_search).
2375 static abort_t lookup_reloc(struct ksplice_mod_change *change,
2376 const struct ksplice_reloc **fingerp,
2377 unsigned long addr,
2378 const struct ksplice_reloc **relocp)
2380 const struct ksplice_reloc *r = *fingerp;
2381 int canary_ret;
2383 while (r < change->old_code.relocs_end &&
2384 addr >= r->blank_addr + r->howto->size &&
2385 !(addr == r->blank_addr && r->howto->size == 0))
2386 r++;
2387 *fingerp = r;
2388 if (r == change->old_code.relocs_end)
2389 return NO_MATCH;
2390 if (addr < r->blank_addr)
2391 return NO_MATCH;
2392 *relocp = r;
2393 if (r->howto->type != KSPLICE_HOWTO_RELOC)
2394 return OK;
2396 canary_ret = contains_canary(change, r->blank_addr, r->howto);
2397 if (canary_ret < 0)
2398 return UNEXPECTED;
2399 if (canary_ret == 0) {
2400 ksdebug(change, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
2401 "(altinstr)\n", r->blank_addr, r->symbol->label,
2402 r->target_addend);
2403 return NO_MATCH;
2405 if (addr != r->blank_addr) {
2406 ksdebug(change, "Invalid nonzero relocation offset\n");
2407 return UNEXPECTED;
2409 return OK;
2412 static abort_t handle_reloc(struct ksplice_mod_change *change,
2413 const struct ksplice_section *sect,
2414 const struct ksplice_reloc *r,
2415 unsigned long run_addr, enum run_pre_mode mode)
2417 switch (r->howto->type) {
2418 case KSPLICE_HOWTO_RELOC:
2419 return handle_howto_reloc(change, sect, r, run_addr, mode);
2420 case KSPLICE_HOWTO_DATE:
2421 case KSPLICE_HOWTO_TIME:
2422 return handle_howto_date(change, sect, r, run_addr, mode);
2423 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
2424 #ifdef CONFIG_BUG
2425 case KSPLICE_HOWTO_BUG:
2426 return handle_bug(change, r, run_addr);
2427 #endif /* CONFIG_BUG */
2428 #else /* LINUX_VERSION_CODE < */
2429 /* 7664c5a1da4711bb6383117f51b94c8dc8f3f1cd was after 2.6.19 */
2430 #endif /* LINUX_VERSION_CODE */
2431 case KSPLICE_HOWTO_EXTABLE:
2432 return handle_extable(change, r, run_addr);
2433 default:
2434 ksdebug(change, "Unexpected howto type %d\n", r->howto->type);
2435 return UNEXPECTED;
2440 * For date/time relocations, we check that the sequence of bytes
2441 * matches the format of a date or time.
2443 static abort_t handle_howto_date(struct ksplice_mod_change *change,
2444 const struct ksplice_section *sect,
2445 const struct ksplice_reloc *r,
2446 unsigned long run_addr, enum run_pre_mode mode)
2448 abort_t ret;
2449 char *buf = kmalloc(r->howto->size, GFP_KERNEL);
2451 if (buf == NULL)
2452 return OUT_OF_MEMORY;
2453 if (probe_kernel_read(buf, (void *)run_addr, r->howto->size) == -EFAULT) {
2454 ret = NO_MATCH;
2455 goto out;
2458 switch (r->howto->type) {
2459 case KSPLICE_HOWTO_TIME:
2460 if (isdigit(buf[0]) && isdigit(buf[1]) && buf[2] == ':' &&
2461 isdigit(buf[3]) && isdigit(buf[4]) && buf[5] == ':' &&
2462 isdigit(buf[6]) && isdigit(buf[7]))
2463 ret = OK;
2464 else
2465 ret = NO_MATCH;
2466 break;
2467 case KSPLICE_HOWTO_DATE:
2468 if (isalpha(buf[0]) && isalpha(buf[1]) && isalpha(buf[2]) &&
2469 buf[3] == ' ' && (buf[4] == ' ' || isdigit(buf[4])) &&
2470 isdigit(buf[5]) && buf[6] == ' ' && isdigit(buf[7]) &&
2471 isdigit(buf[8]) && isdigit(buf[9]) && isdigit(buf[10]))
2472 ret = OK;
2473 else
2474 ret = NO_MATCH;
2475 break;
2476 default:
2477 ret = UNEXPECTED;
2479 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2480 ksdebug(change, "%s string: \"%.*s\" does not match format\n",
2481 r->howto->type == KSPLICE_HOWTO_DATE ? "date" : "time",
2482 r->howto->size, buf);
2484 if (ret != OK)
2485 goto out;
2486 ret = create_labelval(change, r->symbol, run_addr, TEMP);
2487 out:
2488 kfree(buf);
2489 return ret;
2493 * Extract the value of a symbol used in a relocation in the pre code
2494 * during run-pre matching, giving an error if it conflicts with a
2495 * previously found value of that symbol
2497 static abort_t handle_howto_reloc(struct ksplice_mod_change *change,
2498 const struct ksplice_section *sect,
2499 const struct ksplice_reloc *r,
2500 unsigned long run_addr,
2501 enum run_pre_mode mode)
2503 struct ksplice_section *sym_sect = symbol_section(change, r->symbol);
2504 unsigned long offset = r->target_addend;
2505 unsigned long val;
2506 abort_t ret;
2508 ret = read_reloc_value(change, r, run_addr, &val);
2509 if (ret != OK)
2510 return ret;
2511 if (r->howto->pcrel)
2512 val += run_addr;
2514 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
2515 if (sym_sect == NULL || sym_sect->match_map == NULL || offset == 0) {
2517 } else if (offset < 0 || offset >= sym_sect->size) {
2518 ksdebug(change, "Out of range relocation: %s+%lx -> %s+%lx",
2519 sect->symbol->label, r->blank_addr - sect->address,
2520 r->symbol->label, offset);
2521 return NO_MATCH;
2522 } else if (sect == sym_sect && sect->match_map[offset] == NULL) {
2523 sym_sect->match_map[offset] =
2524 (const unsigned char *)r->symbol->value + offset;
2525 } else if (sect == sym_sect && (unsigned long)sect->match_map[offset] ==
2526 r->symbol->value + offset) {
2528 } else if (sect == sym_sect) {
2529 ksdebug(change, "Relocations to nonmatching locations within "
2530 "section %s: %lx does not match %lx\n",
2531 sect->symbol->label, offset,
2532 (unsigned long)sect->match_map[offset] -
2533 r->symbol->value);
2534 return NO_MATCH;
2535 } else if ((sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0) {
2536 if (mode == RUN_PRE_INITIAL)
2537 ksdebug(change, "Delaying matching of %s due to reloc "
2538 "from to unmatching section: %s+%lx\n",
2539 sect->symbol->label, r->symbol->label, offset);
2540 return NO_MATCH;
2541 } else if (sym_sect->match_map[offset] == NULL) {
2542 if (mode == RUN_PRE_INITIAL)
2543 ksdebug(change, "Relocation not to instruction "
2544 "boundary: %s+%lx -> %s+%lx",
2545 sect->symbol->label, r->blank_addr -
2546 sect->address, r->symbol->label, offset);
2547 return NO_MATCH;
2548 } else if ((unsigned long)sym_sect->match_map[offset] !=
2549 r->symbol->value + offset) {
2550 if (mode == RUN_PRE_INITIAL)
2551 ksdebug(change, "Match map shift %s+%lx: %lx != %lx\n",
2552 r->symbol->label, offset,
2553 r->symbol->value + offset,
2554 (unsigned long)sym_sect->match_map[offset]);
2555 val += r->symbol->value + offset -
2556 (unsigned long)sym_sect->match_map[offset];
2558 #endif /* !CONFIG_FUNCTION_DATA_SECTIONS */
2560 if (mode == RUN_PRE_INITIAL)
2561 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2562 "found %s = %lx\n", run_addr, r->blank_addr,
2563 r->symbol->label, offset, r->symbol->label, val);
2565 if (contains_canary(change, run_addr, r->howto) != 0) {
2566 ksdebug(change, "Aborted. Unexpected canary in run code at %lx"
2567 "\n", run_addr);
2568 return UNEXPECTED;
2571 if ((sect->flags & KSPLICE_SECTION_DATA) != 0 &&
2572 sect->symbol == r->symbol)
2573 return OK;
2574 ret = create_labelval(change, r->symbol, val, TEMP);
2575 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL)
2576 ksdebug(change, "run-pre: reloc at r_a=%lx p_a=%lx: labelval "
2577 "%s = %lx does not match expected %lx\n", run_addr,
2578 r->blank_addr, r->symbol->label, r->symbol->value, val);
2580 if (ret != OK)
2581 return ret;
2582 if (sym_sect != NULL && (sym_sect->flags & KSPLICE_SECTION_MATCHED) == 0
2583 && (sym_sect->flags & KSPLICE_SECTION_STRING) != 0) {
2584 if (mode == RUN_PRE_INITIAL)
2585 ksdebug(change, "Recursively comparing string section "
2586 "%s\n", sym_sect->symbol->label);
2587 else if (mode == RUN_PRE_DEBUG)
2588 ksdebug(change, "[str start] ");
2589 ret = run_pre_cmp(change, sym_sect, val, NULL, mode);
2590 if (mode == RUN_PRE_DEBUG)
2591 ksdebug(change, "[str end] ");
2592 if (ret == OK && mode == RUN_PRE_INITIAL)
2593 ksdebug(change, "Successfully matched string section %s"
2594 "\n", sym_sect->symbol->label);
2595 else if (mode == RUN_PRE_INITIAL)
2596 ksdebug(change, "Failed to match string section %s\n",
2597 sym_sect->symbol->label);
2599 return ret;
2602 #ifdef CONFIG_GENERIC_BUG
2603 static abort_t handle_bug(struct ksplice_mod_change *change,
2604 const struct ksplice_reloc *r, unsigned long run_addr)
2606 const struct bug_entry *run_bug = find_bug(run_addr);
2607 struct ksplice_section *bug_sect = symbol_section(change, r->symbol);
2608 if (run_bug == NULL)
2609 return NO_MATCH;
2610 if (bug_sect == NULL)
2611 return UNEXPECTED;
2612 return create_labelval(change, bug_sect->symbol, (unsigned long)run_bug,
2613 TEMP);
2615 #endif /* CONFIG_GENERIC_BUG */
2617 static abort_t handle_extable(struct ksplice_mod_change *change,
2618 const struct ksplice_reloc *r,
2619 unsigned long run_addr)
2621 const struct exception_table_entry *run_ent =
2622 search_exception_tables(run_addr);
2623 struct ksplice_section *ex_sect = symbol_section(change, r->symbol);
2624 if (run_ent == NULL)
2625 return NO_MATCH;
2626 if (ex_sect == NULL)
2627 return UNEXPECTED;
2628 return create_labelval(change, ex_sect->symbol, (unsigned long)run_ent,
2629 TEMP);
2632 static int symbol_section_bsearch_compare(const void *a, const void *b)
2634 const struct ksplice_symbol *sym = a;
2635 const struct ksplice_section *sect = b;
2636 return strcmp(sym->label, sect->symbol->label);
2639 static int compare_section_labels(const void *va, const void *vb)
2641 const struct ksplice_section *a = va, *b = vb;
2642 return strcmp(a->symbol->label, b->symbol->label);
2645 static struct ksplice_section *symbol_section(struct ksplice_mod_change *change,
2646 const struct ksplice_symbol *sym)
2648 return bsearch(sym, change->old_code.sections,
2649 change->old_code.sections_end -
2650 change->old_code.sections,
2651 sizeof(struct ksplice_section),
2652 symbol_section_bsearch_compare);
2655 /* Find the relocation for the oldaddr of a ksplice_patch */
2656 static const struct ksplice_reloc *
2657 patch_reloc(struct ksplice_mod_change *change,
2658 const struct ksplice_patch *p)
2660 unsigned long addr = (unsigned long)&p->oldaddr;
2661 const struct ksplice_reloc *r =
2662 find_reloc(change->new_code.relocs, change->new_code.relocs_end,
2663 addr, sizeof(addr));
2664 if (r == NULL || r->blank_addr < addr ||
2665 r->blank_addr >= addr + sizeof(addr))
2666 return NULL;
2667 return r;
2671 * Populates vals with the possible values for ksym from the various
2672 * sources Ksplice uses to resolve symbols
2674 static abort_t lookup_symbol(struct ksplice_mod_change *change,
2675 const struct ksplice_symbol *ksym,
2676 struct list_head *vals)
2678 abort_t ret;
2680 #ifdef KSPLICE_STANDALONE
2681 if (!bootstrapped)
2682 return OK;
2683 #endif /* KSPLICE_STANDALONE */
2685 if (ksym->candidate_vals == NULL) {
2686 release_vals(vals);
2687 ksdebug(change, "using detected sym %s=%lx\n", ksym->label,
2688 ksym->value);
2689 return add_candidate_val(change, vals, ksym->value);
2692 #ifdef CONFIG_MODULE_UNLOAD
2693 if (strcmp(ksym->label, "cleanup_module") == 0 && change->target != NULL
2694 && change->target->exit != NULL) {
2695 ret = add_candidate_val(change, vals,
2696 (unsigned long)change->target->exit);
2697 if (ret != OK)
2698 return ret;
2700 #endif
2702 if (ksym->name != NULL) {
2703 struct candidate_val *val;
2704 list_for_each_entry(val, ksym->candidate_vals, list) {
2705 ret = add_candidate_val(change, vals, val->val);
2706 if (ret != OK)
2707 return ret;
2710 ret = new_export_lookup(change, ksym->name, vals);
2711 if (ret != OK)
2712 return ret;
2715 return OK;
2718 #ifdef KSPLICE_STANDALONE
2719 static abort_t
2720 add_system_map_candidates(struct ksplice_mod_change *change,
2721 const struct ksplice_system_map *start,
2722 const struct ksplice_system_map *end,
2723 const char *label, struct list_head *vals)
2725 abort_t ret;
2726 long off;
2727 int i;
2728 const struct ksplice_system_map *smap;
2730 /* Some Fedora kernel releases have System.map files whose symbol
2731 * addresses disagree with the running kernel by a constant address
2732 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2733 * values used to compile these kernels. This constant address offset
2734 * is always a multiple of 0x100000.
2736 * If we observe an offset that is NOT a multiple of 0x100000, then the
2737 * user provided us with an incorrect System.map file, and we should
2738 * abort.
2739 * If we observe an offset that is a multiple of 0x100000, then we can
2740 * adjust the System.map address values accordingly and proceed.
2742 off = (unsigned long)printk - change->map_printk;
2743 if (off & 0xfffff) {
2744 ksdebug(change,
2745 "Aborted. System.map does not match kernel.\n");
2746 return BAD_SYSTEM_MAP;
2749 smap = bsearch(label, start, end - start, sizeof(*smap),
2750 system_map_bsearch_compare);
2751 if (smap == NULL)
2752 return OK;
2754 for (i = 0; i < smap->nr_candidates; i++) {
2755 ret = add_candidate_val(change, vals,
2756 smap->candidates[i] + off);
2757 if (ret != OK)
2758 return ret;
2760 return OK;
2763 static int system_map_bsearch_compare(const void *key, const void *elt)
2765 const struct ksplice_system_map *map = elt;
2766 const char *label = key;
2767 return strcmp(label, map->label);
2769 #endif /* !KSPLICE_STANDALONE */
2772 * An update could one module to export a symbol and at the same time
2773 * change another module to use that symbol. This violates the normal
2774 * situation where the changes can be handled independently.
2776 * new_export_lookup obtains symbol values from the changes to the
2777 * exported symbol table made by other changes.
2779 static abort_t new_export_lookup(struct ksplice_mod_change *ichange,
2780 const char *name, struct list_head *vals)
2782 struct ksplice_mod_change *change;
2783 struct ksplice_patch *p;
2784 list_for_each_entry(change, &ichange->update->changes, list) {
2785 for (p = change->patches; p < change->patches_end; p++) {
2786 const struct kernel_symbol *sym;
2787 const struct ksplice_reloc *r;
2788 if (p->type != KSPLICE_PATCH_EXPORT ||
2789 strcmp(name, *(const char **)p->contents) != 0)
2790 continue;
2792 /* Check that the p->oldaddr reloc has been resolved. */
2793 r = patch_reloc(change, p);
2794 if (r == NULL ||
2795 contains_canary(change, r->blank_addr,
2796 r->howto) != 0)
2797 continue;
2798 sym = (const struct kernel_symbol *)r->symbol->value;
2801 * Check that the sym->value reloc has been resolved,
2802 * if there is a Ksplice relocation there.
2804 r = find_reloc(change->new_code.relocs,
2805 change->new_code.relocs_end,
2806 (unsigned long)&sym->value,
2807 sizeof(&sym->value));
2808 if (r != NULL &&
2809 r->blank_addr == (unsigned long)&sym->value &&
2810 contains_canary(change, r->blank_addr,
2811 r->howto) != 0)
2812 continue;
2813 return add_candidate_val(ichange, vals, sym->value);
2816 return OK;
2820 * When patch_action is called, the update should be fully prepared.
2821 * patch_action will try to actually insert or remove trampolines for
2822 * the update.
2824 static abort_t patch_action(struct update *update, enum ksplice_action action)
2826 static int (*const __patch_actions[KS_ACTIONS])(void *) = {
2827 [KS_APPLY] = __apply_patches,
2828 [KS_REVERSE] = __reverse_patches,
2830 int i;
2831 abort_t ret;
2832 struct ksplice_mod_change *change;
2834 ret = map_trampoline_pages(update);
2835 if (ret != OK)
2836 return ret;
2838 list_for_each_entry(change, &update->changes, list) {
2839 const typeof(int (*)(void)) *f;
2840 for (f = change->hooks[action].pre;
2841 f < change->hooks[action].pre_end; f++) {
2842 if ((*f)() != 0) {
2843 ret = CALL_FAILED;
2844 goto out;
2849 for (i = 0; i < 5; i++) {
2850 cleanup_conflicts(update);
2851 #ifdef KSPLICE_STANDALONE
2852 bust_spinlocks(1);
2853 #endif /* KSPLICE_STANDALONE */
2854 ret = (__force abort_t)stop_machine(__patch_actions[action],
2855 update, NULL);
2856 #ifdef KSPLICE_STANDALONE
2857 bust_spinlocks(0);
2858 #endif /* KSPLICE_STANDALONE */
2859 if (ret != CODE_BUSY)
2860 break;
2861 set_current_state(TASK_INTERRUPTIBLE);
2862 schedule_timeout(msecs_to_jiffies(1000));
2864 out:
2865 unmap_trampoline_pages(update);
2867 if (ret == CODE_BUSY) {
2868 print_conflicts(update);
2869 _ksdebug(update, "Aborted %s. stack check: to-be-%s "
2870 "code is busy.\n", update->kid,
2871 action == KS_APPLY ? "replaced" : "reversed");
2872 } else if (ret == ALREADY_REVERSED) {
2873 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2874 "reversed.\n", update->kid, update->kid);
2875 } else if (ret == MODULE_BUSY) {
2876 _ksdebug(update, "Update %s is in use by another module\n",
2877 update->kid);
2880 if (ret != OK) {
2881 list_for_each_entry(change, &update->changes, list) {
2882 const typeof(void (*)(void)) *f;
2883 for (f = change->hooks[action].fail;
2884 f < change->hooks[action].fail_end; f++)
2885 (*f)();
2888 return ret;
2891 list_for_each_entry(change, &update->changes, list) {
2892 const typeof(void (*)(void)) *f;
2893 for (f = change->hooks[action].post;
2894 f < change->hooks[action].post_end; f++)
2895 (*f)();
2898 _ksdebug(update, "Atomic patch %s for %s complete\n",
2899 action == KS_APPLY ? "insertion" : "removal", update->kid);
2900 return OK;
2903 /* Atomically insert the update; run from within stop_machine */
2904 static int __apply_patches(void *updateptr)
2906 struct update *update = updateptr;
2907 struct ksplice_mod_change *change;
2908 struct ksplice_module_list_entry *entry;
2909 struct ksplice_patch *p;
2910 abort_t ret;
2912 if (update->stage == STAGE_APPLIED)
2913 return (__force int)OK;
2915 if (update->stage != STAGE_PREPARING)
2916 return (__force int)UNEXPECTED;
2918 ret = check_each_task(update);
2919 if (ret != OK)
2920 return (__force int)ret;
2922 list_for_each_entry(change, &update->changes, list) {
2923 if (try_module_get(change->new_code_mod) != 1) {
2924 struct ksplice_mod_change *change1;
2925 list_for_each_entry(change1, &update->changes, list) {
2926 if (change1 == change)
2927 break;
2928 module_put(change1->new_code_mod);
2930 module_put(THIS_MODULE);
2931 return (__force int)UNEXPECTED;
2935 list_for_each_entry(change, &update->changes, list) {
2936 const typeof(int (*)(void)) *f;
2937 for (f = change->hooks[KS_APPLY].check;
2938 f < change->hooks[KS_APPLY].check_end; f++) {
2939 if ((*f)() != 0)
2940 return (__force int)CALL_FAILED;
2944 /* Commit point: the update application will succeed. */
2946 update->stage = STAGE_APPLIED;
2947 #ifdef TAINT_KSPLICE
2948 add_taint(TAINT_KSPLICE);
2949 #endif
2951 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
2952 list_add(&entry->list, &ksplice_modules);
2954 list_for_each_entry(change, &update->changes, list) {
2955 for (p = change->patches; p < change->patches_end; p++)
2956 insert_trampoline(p);
2959 list_for_each_entry(change, &update->changes, list) {
2960 const typeof(void (*)(void)) *f;
2961 for (f = change->hooks[KS_APPLY].intra;
2962 f < change->hooks[KS_APPLY].intra_end; f++)
2963 (*f)();
2966 return (__force int)OK;
2969 /* Atomically remove the update; run from within stop_machine */
2970 static int __reverse_patches(void *updateptr)
2972 struct update *update = updateptr;
2973 struct ksplice_mod_change *change;
2974 struct ksplice_module_list_entry *entry;
2975 const struct ksplice_patch *p;
2976 abort_t ret;
2978 if (update->stage != STAGE_APPLIED)
2979 return (__force int)OK;
2981 #ifdef CONFIG_MODULE_UNLOAD
2982 list_for_each_entry(change, &update->changes, list) {
2983 if (module_refcount(change->new_code_mod) != 1)
2984 return (__force int)MODULE_BUSY;
2986 #endif /* CONFIG_MODULE_UNLOAD */
2988 list_for_each_entry(entry, &update->ksplice_module_list, update_list) {
2989 if (!entry->applied &&
2990 find_module(entry->target_mod_name) != NULL)
2991 return COLD_UPDATE_LOADED;
2994 ret = check_each_task(update);
2995 if (ret != OK)
2996 return (__force int)ret;
2998 list_for_each_entry(change, &update->changes, list) {
2999 for (p = change->patches; p < change->patches_end; p++) {
3000 ret = verify_trampoline(change, p);
3001 if (ret != OK)
3002 return (__force int)ret;
3006 list_for_each_entry(change, &update->changes, list) {
3007 const typeof(int (*)(void)) *f;
3008 for (f = change->hooks[KS_REVERSE].check;
3009 f < change->hooks[KS_REVERSE].check_end; f++) {
3010 if ((*f)() != 0)
3011 return (__force int)CALL_FAILED;
3015 /* Commit point: the update reversal will succeed. */
3017 update->stage = STAGE_REVERSED;
3019 list_for_each_entry(change, &update->changes, list)
3020 module_put(change->new_code_mod);
3022 list_for_each_entry(entry, &update->ksplice_module_list, update_list)
3023 list_del(&entry->list);
3025 list_for_each_entry(change, &update->changes, list) {
3026 const typeof(void (*)(void)) *f;
3027 for (f = change->hooks[KS_REVERSE].intra;
3028 f < change->hooks[KS_REVERSE].intra_end; f++)
3029 (*f)();
3032 list_for_each_entry(change, &update->changes, list) {
3033 for (p = change->patches; p < change->patches_end; p++)
3034 remove_trampoline(p);
3037 return (__force int)OK;
3041 * Check whether any thread's instruction pointer or any address of
3042 * its stack is contained in one of the safety_records associated with
3043 * the update.
3045 * check_each_task must be called from inside stop_machine, because it
3046 * does not take tasklist_lock (which cannot be held by anyone else
3047 * during stop_machine).
3049 static abort_t check_each_task(struct update *update)
3051 const struct task_struct *g, *p;
3052 abort_t status = OK, ret;
3053 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3054 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3055 read_lock(&tasklist_lock);
3056 #endif /* LINUX_VERSION_CODE */
3057 do_each_thread(g, p) {
3058 /* do_each_thread is a double loop! */
3059 ret = check_task(update, p, false);
3060 if (ret != OK) {
3061 check_task(update, p, true);
3062 status = ret;
3064 if (ret != OK && ret != CODE_BUSY)
3065 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3066 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3067 goto out;
3068 #else /* LINUX_VERSION_CODE < */
3069 return ret;
3070 #endif /* LINUX_VERSION_CODE */
3071 } while_each_thread(g, p);
3072 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
3073 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
3074 out:
3075 read_unlock(&tasklist_lock);
3076 #endif /* LINUX_VERSION_CODE */
3077 return status;
3080 static abort_t check_task(struct update *update,
3081 const struct task_struct *t, bool rerun)
3083 abort_t status, ret;
3084 struct conflict *conf = NULL;
3086 if (rerun) {
3087 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
3088 if (conf == NULL)
3089 return OUT_OF_MEMORY;
3090 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
3091 if (conf->process_name == NULL) {
3092 kfree(conf);
3093 return OUT_OF_MEMORY;
3095 conf->pid = t->pid;
3096 INIT_LIST_HEAD(&conf->stack);
3097 list_add(&conf->list, &update->conflicts);
3100 status = check_address(update, conf, KSPLICE_IP(t));
3101 if (t == current) {
3102 ret = check_stack(update, conf, task_thread_info(t),
3103 (unsigned long *)__builtin_frame_address(0));
3104 if (status == OK)
3105 status = ret;
3106 } else if (!task_curr(t)) {
3107 ret = check_stack(update, conf, task_thread_info(t),
3108 (unsigned long *)KSPLICE_SP(t));
3109 if (status == OK)
3110 status = ret;
3111 } else if (!is_stop_machine(t)) {
3112 status = UNEXPECTED_RUNNING_TASK;
3114 return status;
3117 static abort_t check_stack(struct update *update, struct conflict *conf,
3118 const struct thread_info *tinfo,
3119 const unsigned long *stack)
3121 abort_t status = OK, ret;
3122 unsigned long addr;
3124 while (valid_stack_ptr(tinfo, stack)) {
3125 addr = *stack++;
3126 ret = check_address(update, conf, addr);
3127 if (ret != OK)
3128 status = ret;
3130 return status;
3133 static abort_t check_address(struct update *update,
3134 struct conflict *conf, unsigned long addr)
3136 abort_t status = OK, ret;
3137 const struct safety_record *rec;
3138 struct ksplice_mod_change *change;
3139 struct conflict_addr *ca = NULL;
3141 if (conf != NULL) {
3142 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
3143 if (ca == NULL)
3144 return OUT_OF_MEMORY;
3145 ca->addr = addr;
3146 ca->has_conflict = false;
3147 ca->label = NULL;
3148 list_add(&ca->list, &conf->stack);
3151 list_for_each_entry(change, &update->changes, list) {
3152 unsigned long tramp_addr = follow_trampolines(change, addr);
3153 list_for_each_entry(rec, &change->safety_records, list) {
3154 ret = check_record(ca, rec, tramp_addr);
3155 if (ret != OK)
3156 status = ret;
3159 return status;
3162 static abort_t check_record(struct conflict_addr *ca,
3163 const struct safety_record *rec, unsigned long addr)
3165 if (addr >= rec->addr && addr < rec->addr + rec->size) {
3166 if (ca != NULL) {
3167 ca->label = rec->label;
3168 ca->has_conflict = true;
3170 return CODE_BUSY;
3172 return OK;
3175 /* Is the task one of the stop_machine tasks? */
3176 static bool is_stop_machine(const struct task_struct *t)
3178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
3179 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3180 const char *kstop_prefix = "kstop/";
3181 #else /* LINUX_VERSION_CODE < */
3182 /* c9583e55fa2b08a230c549bd1e3c0bde6c50d9cc was after 2.6.27 */
3183 const char *kstop_prefix = "kstop";
3184 #endif /* LINUX_VERSION_CODE */
3185 const char *num;
3186 if (!strstarts(t->comm, kstop_prefix))
3187 return false;
3188 num = t->comm + strlen(kstop_prefix);
3189 return num[strspn(num, "0123456789")] == '\0';
3190 #else /* LINUX_VERSION_CODE < */
3191 /* ffdb5976c47609c862917d4c186ecbb5706d2dda was after 2.6.26 */
3192 return strcmp(t->comm, "kstopmachine") == 0;
3193 #endif /* LINUX_VERSION_CODE */
3196 static void cleanup_conflicts(struct update *update)
3198 struct conflict *conf;
3199 list_for_each_entry(conf, &update->conflicts, list) {
3200 clear_list(&conf->stack, struct conflict_addr, list);
3201 kfree(conf->process_name);
3203 clear_list(&update->conflicts, struct conflict, list);
3206 static void print_conflicts(struct update *update)
3208 const struct conflict *conf;
3209 const struct conflict_addr *ca;
3210 list_for_each_entry(conf, &update->conflicts, list) {
3211 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
3212 conf->process_name);
3213 list_for_each_entry(ca, &conf->stack, list) {
3214 _ksdebug(update, " %lx", ca->addr);
3215 if (ca->has_conflict)
3216 _ksdebug(update, " [<-CONFLICT]");
3218 _ksdebug(update, "\n");
3222 static void insert_trampoline(struct ksplice_patch *p)
3224 mm_segment_t old_fs = get_fs();
3225 set_fs(KERNEL_DS);
3226 memcpy(p->saved, p->vaddr, p->size);
3227 memcpy(p->vaddr, p->contents, p->size);
3228 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3229 set_fs(old_fs);
3232 static abort_t verify_trampoline(struct ksplice_mod_change *change,
3233 const struct ksplice_patch *p)
3235 if (memcmp(p->vaddr, p->contents, p->size) != 0) {
3236 ksdebug(change, "Aborted. Trampoline at %lx has been "
3237 "overwritten.\n", p->oldaddr);
3238 return CODE_BUSY;
3240 return OK;
3243 static void remove_trampoline(const struct ksplice_patch *p)
3245 mm_segment_t old_fs = get_fs();
3246 set_fs(KERNEL_DS);
3247 memcpy(p->vaddr, p->saved, p->size);
3248 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
3249 set_fs(old_fs);
3252 /* Returns NO_MATCH if there's already a labelval with a different value */
3253 static abort_t create_labelval(struct ksplice_mod_change *change,
3254 struct ksplice_symbol *ksym,
3255 unsigned long val, int status)
3257 val = follow_trampolines(change, val);
3258 if (ksym->candidate_vals == NULL)
3259 return ksym->value == val ? OK : NO_MATCH;
3261 ksym->value = val;
3262 if (status == TEMP) {
3263 struct labelval *lv = kmalloc(sizeof(*lv), GFP_KERNEL);
3264 if (lv == NULL)
3265 return OUT_OF_MEMORY;
3266 lv->symbol = ksym;
3267 lv->saved_vals = ksym->candidate_vals;
3268 list_add(&lv->list, &change->temp_labelvals);
3270 ksym->candidate_vals = NULL;
3271 return OK;
3275 * Creates a new safety_record for a old_code section based on its
3276 * ksplice_section and run-pre matching information.
3278 static abort_t create_safety_record(struct ksplice_mod_change *change,
3279 const struct ksplice_section *sect,
3280 struct list_head *record_list,
3281 unsigned long run_addr,
3282 unsigned long run_size)
3284 struct safety_record *rec;
3285 struct ksplice_patch *p;
3287 if (record_list == NULL)
3288 return OK;
3290 for (p = change->patches; p < change->patches_end; p++) {
3291 const struct ksplice_reloc *r = patch_reloc(change, p);
3292 if (strcmp(sect->symbol->label, r->symbol->label) == 0)
3293 break;
3295 if (p >= change->patches_end)
3296 return OK;
3298 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
3299 if (rec == NULL)
3300 return OUT_OF_MEMORY;
3302 * The old_code might be unloaded when checking reversing
3303 * patches, so we need to kstrdup the label here.
3305 rec->label = kstrdup(sect->symbol->label, GFP_KERNEL);
3306 if (rec->label == NULL) {
3307 kfree(rec);
3308 return OUT_OF_MEMORY;
3310 rec->addr = run_addr;
3311 rec->size = run_size;
3313 list_add(&rec->list, record_list);
3314 return OK;
3317 static abort_t add_candidate_val(struct ksplice_mod_change *change,
3318 struct list_head *vals, unsigned long val)
3320 struct candidate_val *tmp, *new;
3323 * Careful: follow trampolines before comparing values so that we do
3324 * not mistake the obsolete function for another copy of the function.
3326 val = follow_trampolines(change, val);
3328 list_for_each_entry(tmp, vals, list) {
3329 if (tmp->val == val)
3330 return OK;
3332 new = kmalloc(sizeof(*new), GFP_KERNEL);
3333 if (new == NULL)
3334 return OUT_OF_MEMORY;
3335 new->val = val;
3336 list_add(&new->list, vals);
3337 return OK;
3340 static void release_vals(struct list_head *vals)
3342 clear_list(vals, struct candidate_val, list);
3346 * The temp_labelvals list is used to cache those temporary labelvals
3347 * that have been created to cross-check the symbol values obtained
3348 * from different relocations within a single section being matched.
3350 * If status is VAL, commit the temp_labelvals as final values.
3352 * If status is NOVAL, restore the list of possible values to the
3353 * ksplice_symbol, so that it no longer has a known value.
3355 static void set_temp_labelvals(struct ksplice_mod_change *change, int status)
3357 struct labelval *lv, *n;
3358 list_for_each_entry_safe(lv, n, &change->temp_labelvals, list) {
3359 if (status == NOVAL) {
3360 lv->symbol->candidate_vals = lv->saved_vals;
3361 } else {
3362 release_vals(lv->saved_vals);
3363 kfree(lv->saved_vals);
3365 list_del(&lv->list);
3366 kfree(lv);
3370 /* Is there a Ksplice canary with given howto at blank_addr? */
3371 static int contains_canary(struct ksplice_mod_change *change,
3372 unsigned long blank_addr,
3373 const struct ksplice_reloc_howto *howto)
3375 switch (howto->size) {
3376 case 1:
3377 return (*(uint8_t *)blank_addr & howto->dst_mask) ==
3378 (KSPLICE_CANARY & howto->dst_mask);
3379 case 2:
3380 return (*(uint16_t *)blank_addr & howto->dst_mask) ==
3381 (KSPLICE_CANARY & howto->dst_mask);
3382 case 4:
3383 return (*(uint32_t *)blank_addr & howto->dst_mask) ==
3384 (KSPLICE_CANARY & howto->dst_mask);
3385 #if BITS_PER_LONG >= 64
3386 case 8:
3387 return (*(uint64_t *)blank_addr & howto->dst_mask) ==
3388 (KSPLICE_CANARY & howto->dst_mask);
3389 #endif /* BITS_PER_LONG */
3390 default:
3391 ksdebug(change, "Aborted. Invalid relocation size.\n");
3392 return -1;
3397 * Compute the address of the code you would actually run if you were
3398 * to call the function at addr (i.e., follow the sequence of jumps
3399 * starting at addr)
3401 static unsigned long follow_trampolines(struct ksplice_mod_change *change,
3402 unsigned long addr)
3404 unsigned long new_addr;
3405 struct module *m;
3407 while (1) {
3408 #ifdef KSPLICE_STANDALONE
3409 if (!bootstrapped)
3410 return addr;
3411 #endif /* KSPLICE_STANDALONE */
3412 if (!__kernel_text_address(addr) ||
3413 trampoline_target(change, addr, &new_addr) != OK)
3414 return addr;
3415 m = __module_text_address(new_addr);
3416 if (m == NULL || m == change->target ||
3417 !strstarts(m->name, "ksplice"))
3418 return addr;
3419 addr = new_addr;
3423 /* Does module a patch module b? */
3424 static bool patches_module(const struct module *a, const struct module *b)
3426 #ifdef KSPLICE_NO_KERNEL_SUPPORT
3427 const char *name;
3428 const char *modname = b == NULL ? "vmlinux" : b->name;
3429 if (a == b)
3430 return true;
3431 if (a == NULL || !strstarts(a->name, "ksplice_"))
3432 return false;
3433 name = a->name + strlen("ksplice_");
3434 name += strcspn(name, "_");
3435 if (name[0] != '_')
3436 return false;
3437 name++;
3438 return strstarts(name, modname) &&
3439 strcmp(name + strlen(modname), "_new") == 0;
3440 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
3441 struct ksplice_module_list_entry *entry;
3442 if (a == b)
3443 return true;
3444 list_for_each_entry(entry, &ksplice_modules, list) {
3445 if (strcmp(entry->target_mod_name, b->name) == 0 &&
3446 strcmp(entry->new_code_mod_name, a->name) == 0)
3447 return true;
3449 return false;
3450 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3453 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3454 /* 66f92cf9d415e96a5bdd6c64de8dd8418595d2fc was after 2.6.29 */
3455 static bool strstarts(const char *str, const char *prefix)
3457 return strncmp(str, prefix, strlen(prefix)) == 0;
3459 #endif /* LINUX_VERSION_CODE */
3461 static bool singular(struct list_head *list)
3463 return !list_empty(list) && list->next->next == list;
3466 static void *bsearch(const void *key, const void *base, size_t n,
3467 size_t size, int (*cmp)(const void *key, const void *elt))
3469 int start = 0, end = n - 1, mid, result;
3470 if (n == 0)
3471 return NULL;
3472 while (start <= end) {
3473 mid = (start + end) / 2;
3474 result = cmp(key, base + mid * size);
3475 if (result < 0)
3476 end = mid - 1;
3477 else if (result > 0)
3478 start = mid + 1;
3479 else
3480 return (void *)base + mid * size;
3482 return NULL;
3485 static int compare_relocs(const void *a, const void *b)
3487 const struct ksplice_reloc *ra = a, *rb = b;
3488 if (ra->blank_addr > rb->blank_addr)
3489 return 1;
3490 else if (ra->blank_addr < rb->blank_addr)
3491 return -1;
3492 else
3493 return ra->howto->size - rb->howto->size;
3496 #ifdef KSPLICE_STANDALONE
3497 static int compare_system_map(const void *a, const void *b)
3499 const struct ksplice_system_map *sa = a, *sb = b;
3500 return strcmp(sa->label, sb->label);
3502 #endif /* KSPLICE_STANDALONE */
3504 #ifdef CONFIG_DEBUG_FS
3505 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
3506 /* Old kernels don't have debugfs_create_blob */
3507 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
3508 size_t count, loff_t *ppos)
3510 struct debugfs_blob_wrapper *blob = file->private_data;
3511 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
3512 blob->size);
3515 static int blob_open(struct inode *inode, struct file *file)
3517 if (inode->i_private)
3518 file->private_data = inode->i_private;
3519 return 0;
3522 static struct file_operations fops_blob = {
3523 .read = read_file_blob,
3524 .open = blob_open,
3527 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
3528 struct dentry *parent,
3529 struct debugfs_blob_wrapper *blob)
3531 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
3533 #endif /* LINUX_VERSION_CODE */
3535 static abort_t init_debug_buf(struct update *update)
3537 update->debug_blob.size = 0;
3538 update->debug_blob.data = NULL;
3539 update->debugfs_dentry =
3540 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
3541 &update->debug_blob);
3542 if (update->debugfs_dentry == NULL)
3543 return OUT_OF_MEMORY;
3544 return OK;
3547 static void clear_debug_buf(struct update *update)
3549 if (update->debugfs_dentry == NULL)
3550 return;
3551 debugfs_remove(update->debugfs_dentry);
3552 update->debugfs_dentry = NULL;
3553 update->debug_blob.size = 0;
3554 vfree(update->debug_blob.data);
3555 update->debug_blob.data = NULL;
3558 static int _ksdebug(struct update *update, const char *fmt, ...)
3560 va_list args;
3561 unsigned long size, old_size, new_size;
3563 if (update->debug == 0)
3564 return 0;
3566 /* size includes the trailing '\0' */
3567 va_start(args, fmt);
3568 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
3569 va_end(args);
3570 old_size = update->debug_blob.size == 0 ? 0 :
3571 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
3572 new_size = update->debug_blob.size + size == 0 ? 0 :
3573 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
3574 if (new_size > old_size) {
3575 char *buf = vmalloc(new_size);
3576 if (buf == NULL)
3577 return -ENOMEM;
3578 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
3579 vfree(update->debug_blob.data);
3580 update->debug_blob.data = buf;
3582 va_start(args, fmt);
3583 update->debug_blob.size += vsnprintf(update->debug_blob.data +
3584 update->debug_blob.size,
3585 size, fmt, args);
3586 va_end(args);
3587 return 0;
3589 #else /* CONFIG_DEBUG_FS */
3590 static abort_t init_debug_buf(struct update *update)
3592 return OK;
3595 static void clear_debug_buf(struct update *update)
3597 return;
3600 static int _ksdebug(struct update *update, const char *fmt, ...)
3602 va_list args;
3604 if (update->debug == 0)
3605 return 0;
3607 if (!update->debug_continue_line)
3608 printk(KERN_DEBUG "ksplice: ");
3610 va_start(args, fmt);
3611 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
3612 vprintk(fmt, args);
3613 #else /* LINUX_VERSION_CODE < */
3614 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
3616 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
3617 printk("%s", buf);
3618 kfree(buf);
3620 #endif /* LINUX_VERSION_CODE */
3621 va_end(args);
3623 update->debug_continue_line =
3624 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
3625 return 0;
3627 #endif /* CONFIG_DEBUG_FS */
3629 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) && defined(CONFIG_KALLSYMS)
3630 /* 75a66614db21007bcc8c37f9c5d5b922981387b9 was after 2.6.29 */
3631 extern unsigned long kallsyms_addresses[];
3632 extern unsigned long kallsyms_num_syms;
3633 extern u8 kallsyms_names[];
3635 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3636 struct module *, unsigned long),
3637 void *data)
3639 char namebuf[KSYM_NAME_LEN];
3640 unsigned long i;
3641 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3642 unsigned int off;
3643 #endif /* LINUX_VERSION_CODE */
3644 int ret;
3646 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3647 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
3648 off = kallsyms_expand_symbol(off, namebuf);
3649 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3650 if (ret != 0)
3651 return ret;
3653 #else /* LINUX_VERSION_CODE < */
3654 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3655 char *knames;
3657 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
3658 unsigned prefix = *knames++;
3660 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
3662 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
3663 if (ret != OK)
3664 return ret;
3666 knames += strlen(knames) + 1;
3668 #endif /* LINUX_VERSION_CODE */
3669 return module_kallsyms_on_each_symbol(fn, data);
3672 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
3673 extern u8 kallsyms_token_table[];
3674 extern u16 kallsyms_token_index[];
3676 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
3678 long len, skipped_first = 0;
3679 const u8 *tptr, *data;
3681 data = &kallsyms_names[off];
3682 len = *data;
3683 data++;
3685 off += len + 1;
3687 while (len) {
3688 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
3689 data++;
3690 len--;
3692 while (*tptr) {
3693 if (skipped_first) {
3694 *result = *tptr;
3695 result++;
3696 } else
3697 skipped_first = 1;
3698 tptr++;
3702 *result = '\0';
3704 return off;
3706 #else /* LINUX_VERSION_CODE < */
3707 /* 5648d78927ca65e74aadc88a2b1d6431e55e78ec was after 2.6.9 */
3708 #endif /* LINUX_VERSION_CODE */
3710 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3711 struct module *,
3712 unsigned long),
3713 void *data)
3715 struct module *mod;
3716 unsigned int i;
3717 int ret;
3719 list_for_each_entry(mod, &modules, list) {
3720 for (i = 0; i < mod->num_symtab; i++) {
3721 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3722 mod, mod->symtab[i].st_value);
3723 if (ret != 0)
3724 return ret;
3727 return 0;
3729 #endif /* LINUX_VERSION_CODE && CONFIG_KALLSYMS */
3731 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
3732 /* c6b37801911d7f4663c99cad8aa230bc934cea82 was after 2.6.29 */
3733 static struct module *find_module(const char *name)
3735 struct module *mod;
3737 list_for_each_entry(mod, &modules, list) {
3738 if (strcmp(mod->name, name) == 0)
3739 return mod;
3741 return NULL;
3744 #ifdef CONFIG_MODULE_UNLOAD
3745 struct module_use {
3746 struct list_head list;
3747 struct module *module_which_uses;
3750 /* I'm not yet certain whether we need the strong form of this. */
3751 static inline int strong_try_module_get(struct module *mod)
3753 if (mod && mod->state != MODULE_STATE_LIVE)
3754 return -EBUSY;
3755 if (try_module_get(mod))
3756 return 0;
3757 return -ENOENT;
3760 /* Does a already use b? */
3761 static int already_uses(struct module *a, struct module *b)
3763 struct module_use *use;
3764 list_for_each_entry(use, &b->modules_which_use_me, list) {
3765 if (use->module_which_uses == a)
3766 return 1;
3768 return 0;
3771 /* Make it so module a uses b. Must be holding module_mutex */
3772 static int use_module(struct module *a, struct module *b)
3774 struct module_use *use;
3775 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3776 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3777 int no_warn;
3778 #endif /* LINUX_VERSION_CODE */
3779 if (b == NULL || already_uses(a, b))
3780 return 1;
3782 if (strong_try_module_get(b) < 0)
3783 return 0;
3785 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3786 if (!use) {
3787 module_put(b);
3788 return 0;
3790 use->module_which_uses = a;
3791 list_add(&use->list, &b->modules_which_use_me);
3792 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3793 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3794 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3795 #endif /* LINUX_VERSION_CODE */
3796 return 1;
3798 #else /* CONFIG_MODULE_UNLOAD */
3799 static int use_module(struct module *a, struct module *b)
3801 return 1;
3803 #endif /* CONFIG_MODULE_UNLOAD */
3805 #ifndef CONFIG_MODVERSIONS
3806 #define symversion(base, idx) NULL
3807 #else
3808 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3809 #endif
3811 static bool each_symbol_in_section(const struct symsearch *arr,
3812 unsigned int arrsize,
3813 struct module *owner,
3814 bool (*fn)(const struct symsearch *syms,
3815 struct module *owner,
3816 unsigned int symnum, void *data),
3817 void *data)
3819 unsigned int i, j;
3821 for (j = 0; j < arrsize; j++) {
3822 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3823 if (fn(&arr[j], owner, i, data))
3824 return true;
3827 return false;
3830 /* Returns true as soon as fn returns true, otherwise false. */
3831 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3832 struct module *owner,
3833 unsigned int symnum, void *data),
3834 void *data)
3836 struct module *mod;
3837 const struct symsearch arr[] = {
3838 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3839 NOT_GPL_ONLY, false },
3840 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3841 __start___kcrctab_gpl,
3842 GPL_ONLY, false },
3843 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3844 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3845 __start___kcrctab_gpl_future,
3846 WILL_BE_GPL_ONLY, false },
3847 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3848 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3849 { __start___ksymtab_unused, __stop___ksymtab_unused,
3850 __start___kcrctab_unused,
3851 NOT_GPL_ONLY, true },
3852 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3853 __start___kcrctab_unused_gpl,
3854 GPL_ONLY, true },
3855 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3858 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3859 return 1;
3861 list_for_each_entry(mod, &modules, list) {
3862 struct symsearch module_arr[] = {
3863 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3864 NOT_GPL_ONLY, false },
3865 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3866 mod->gpl_crcs,
3867 GPL_ONLY, false },
3868 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3869 { mod->gpl_future_syms,
3870 mod->gpl_future_syms + mod->num_gpl_future_syms,
3871 mod->gpl_future_crcs,
3872 WILL_BE_GPL_ONLY, false },
3873 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3874 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3875 { mod->unused_syms,
3876 mod->unused_syms + mod->num_unused_syms,
3877 mod->unused_crcs,
3878 NOT_GPL_ONLY, true },
3879 { mod->unused_gpl_syms,
3880 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3881 mod->unused_gpl_crcs,
3882 GPL_ONLY, true },
3883 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3886 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3887 mod, fn, data))
3888 return true;
3890 return false;
3893 struct find_symbol_arg {
3894 /* Input */
3895 const char *name;
3896 bool gplok;
3897 bool warn;
3899 /* Output */
3900 struct module *owner;
3901 const unsigned long *crc;
3902 const struct kernel_symbol *sym;
3905 static bool find_symbol_in_section(const struct symsearch *syms,
3906 struct module *owner,
3907 unsigned int symnum, void *data)
3909 struct find_symbol_arg *fsa = data;
3911 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3912 return false;
3914 if (!fsa->gplok) {
3915 if (syms->licence == GPL_ONLY)
3916 return false;
3917 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3918 printk(KERN_WARNING "Symbol %s is being used "
3919 "by a non-GPL module, which will not "
3920 "be allowed in the future\n", fsa->name);
3921 printk(KERN_WARNING "Please see the file "
3922 "Documentation/feature-removal-schedule.txt "
3923 "in the kernel source tree for more details.\n");
3927 #ifdef CONFIG_UNUSED_SYMBOLS
3928 if (syms->unused && fsa->warn) {
3929 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3930 "however this module is using it.\n", fsa->name);
3931 printk(KERN_WARNING
3932 "This symbol will go away in the future.\n");
3933 printk(KERN_WARNING
3934 "Please evalute if this is the right api to use and if "
3935 "it really is, submit a report the linux kernel "
3936 "mailinglist together with submitting your code for "
3937 "inclusion.\n");
3939 #endif
3941 fsa->owner = owner;
3942 fsa->crc = symversion(syms->crcs, symnum);
3943 fsa->sym = &syms->start[symnum];
3944 return true;
3947 /* Find a symbol and return it, along with, (optional) crc and
3948 * (optional) module which owns it */
3949 static const struct kernel_symbol *find_symbol(const char *name,
3950 struct module **owner,
3951 const unsigned long **crc,
3952 bool gplok, bool warn)
3954 struct find_symbol_arg fsa;
3956 fsa.name = name;
3957 fsa.gplok = gplok;
3958 fsa.warn = warn;
3960 if (each_symbol(find_symbol_in_section, &fsa)) {
3961 if (owner)
3962 *owner = fsa.owner;
3963 if (crc)
3964 *crc = fsa.crc;
3965 return fsa.sym;
3968 return NULL;
3971 static inline int within_module_core(unsigned long addr, struct module *mod)
3973 return (unsigned long)mod->module_core <= addr &&
3974 addr < (unsigned long)mod->module_core + mod->core_size;
3977 static inline int within_module_init(unsigned long addr, struct module *mod)
3979 return (unsigned long)mod->module_init <= addr &&
3980 addr < (unsigned long)mod->module_init + mod->init_size;
3983 static struct module *__module_address(unsigned long addr)
3985 struct module *mod;
3987 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,28)
3988 list_for_each_entry_rcu(mod, &modules, list)
3989 #else
3990 /* d72b37513cdfbd3f53f3d485a8c403cc96d2c95f was after 2.6.27 */
3991 list_for_each_entry(mod, &modules, list)
3992 #endif
3993 if (within_module_core(addr, mod) ||
3994 within_module_init(addr, mod))
3995 return mod;
3996 return NULL;
3998 #endif /* LINUX_VERSION_CODE */
4000 struct update_attribute {
4001 struct attribute attr;
4002 ssize_t (*show)(struct update *update, char *buf);
4003 ssize_t (*store)(struct update *update, const char *buf, size_t len);
4006 static ssize_t update_attr_show(struct kobject *kobj, struct attribute *attr,
4007 char *buf)
4009 struct update_attribute *attribute =
4010 container_of(attr, struct update_attribute, attr);
4011 struct update *update = container_of(kobj, struct update, kobj);
4012 if (attribute->show == NULL)
4013 return -EIO;
4014 return attribute->show(update, buf);
4017 static ssize_t update_attr_store(struct kobject *kobj, struct attribute *attr,
4018 const char *buf, size_t len)
4020 struct update_attribute *attribute =
4021 container_of(attr, struct update_attribute, attr);
4022 struct update *update = container_of(kobj, struct update, kobj);
4023 if (attribute->store == NULL)
4024 return -EIO;
4025 return attribute->store(update, buf, len);
4028 static struct sysfs_ops update_sysfs_ops = {
4029 .show = update_attr_show,
4030 .store = update_attr_store,
4033 static void update_release(struct kobject *kobj)
4035 struct update *update;
4036 update = container_of(kobj, struct update, kobj);
4037 cleanup_ksplice_update(update);
4040 static ssize_t stage_show(struct update *update, char *buf)
4042 switch (update->stage) {
4043 case STAGE_PREPARING:
4044 return snprintf(buf, PAGE_SIZE, "preparing\n");
4045 case STAGE_APPLIED:
4046 return snprintf(buf, PAGE_SIZE, "applied\n");
4047 case STAGE_REVERSED:
4048 return snprintf(buf, PAGE_SIZE, "reversed\n");
4050 return 0;
4053 static ssize_t abort_cause_show(struct update *update, char *buf)
4055 switch (update->abort_cause) {
4056 case OK:
4057 return snprintf(buf, PAGE_SIZE, "ok\n");
4058 case NO_MATCH:
4059 return snprintf(buf, PAGE_SIZE, "no_match\n");
4060 #ifdef KSPLICE_STANDALONE
4061 case BAD_SYSTEM_MAP:
4062 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
4063 #endif /* KSPLICE_STANDALONE */
4064 case CODE_BUSY:
4065 return snprintf(buf, PAGE_SIZE, "code_busy\n");
4066 case MODULE_BUSY:
4067 return snprintf(buf, PAGE_SIZE, "module_busy\n");
4068 case OUT_OF_MEMORY:
4069 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
4070 case FAILED_TO_FIND:
4071 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
4072 case ALREADY_REVERSED:
4073 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
4074 case MISSING_EXPORT:
4075 return snprintf(buf, PAGE_SIZE, "missing_export\n");
4076 case UNEXPECTED_RUNNING_TASK:
4077 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
4078 case TARGET_NOT_LOADED:
4079 return snprintf(buf, PAGE_SIZE, "target_not_loaded\n");
4080 case CALL_FAILED:
4081 return snprintf(buf, PAGE_SIZE, "call_failed\n");
4082 case COLD_UPDATE_LOADED:
4083 return snprintf(buf, PAGE_SIZE, "cold_update_loaded\n");
4084 case UNEXPECTED:
4085 return snprintf(buf, PAGE_SIZE, "unexpected\n");
4086 default:
4087 return snprintf(buf, PAGE_SIZE, "unknown\n");
4089 return 0;
4092 static ssize_t conflict_show(struct update *update, char *buf)
4094 const struct conflict *conf;
4095 const struct conflict_addr *ca;
4096 int used = 0;
4097 mutex_lock(&module_mutex);
4098 list_for_each_entry(conf, &update->conflicts, list) {
4099 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
4100 conf->process_name, conf->pid);
4101 list_for_each_entry(ca, &conf->stack, list) {
4102 if (!ca->has_conflict)
4103 continue;
4104 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
4105 ca->label);
4107 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
4109 mutex_unlock(&module_mutex);
4110 return used;
4113 /* Used to pass maybe_cleanup_ksplice_update to kthread_run */
4114 static int maybe_cleanup_ksplice_update_wrapper(void *updateptr)
4116 struct update *update = updateptr;
4117 mutex_lock(&module_mutex);
4118 maybe_cleanup_ksplice_update(update);
4119 mutex_unlock(&module_mutex);
4120 return 0;
4123 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
4125 enum stage old_stage;
4126 mutex_lock(&module_mutex);
4127 old_stage = update->stage;
4128 if ((strncmp(buf, "applied", len) == 0 ||
4129 strncmp(buf, "applied\n", len) == 0) &&
4130 update->stage == STAGE_PREPARING)
4131 update->abort_cause = apply_update(update);
4132 else if ((strncmp(buf, "reversed", len) == 0 ||
4133 strncmp(buf, "reversed\n", len) == 0) &&
4134 update->stage == STAGE_APPLIED)
4135 update->abort_cause = reverse_update(update);
4136 else if ((strncmp(buf, "cleanup", len) == 0 ||
4137 strncmp(buf, "cleanup\n", len) == 0) &&
4138 update->stage == STAGE_REVERSED)
4139 kthread_run(maybe_cleanup_ksplice_update_wrapper, update,
4140 "ksplice_cleanup_%s", update->kid);
4142 mutex_unlock(&module_mutex);
4143 return len;
4146 static ssize_t debug_show(struct update *update, char *buf)
4148 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
4151 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
4153 unsigned long l;
4154 int ret = strict_strtoul(buf, 10, &l);
4155 if (ret != 0)
4156 return ret;
4157 update->debug = l;
4158 return len;
4161 static ssize_t partial_show(struct update *update, char *buf)
4163 return snprintf(buf, PAGE_SIZE, "%d\n", update->partial);
4166 static ssize_t partial_store(struct update *update, const char *buf, size_t len)
4168 unsigned long l;
4169 int ret = strict_strtoul(buf, 10, &l);
4170 if (ret != 0)
4171 return ret;
4172 update->partial = l;
4173 return len;
4176 static struct update_attribute stage_attribute =
4177 __ATTR(stage, 0600, stage_show, stage_store);
4178 static struct update_attribute abort_cause_attribute =
4179 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
4180 static struct update_attribute debug_attribute =
4181 __ATTR(debug, 0600, debug_show, debug_store);
4182 static struct update_attribute partial_attribute =
4183 __ATTR(partial, 0600, partial_show, partial_store);
4184 static struct update_attribute conflict_attribute =
4185 __ATTR(conflicts, 0400, conflict_show, NULL);
4187 static struct attribute *update_attrs[] = {
4188 &stage_attribute.attr,
4189 &abort_cause_attribute.attr,
4190 &debug_attribute.attr,
4191 &partial_attribute.attr,
4192 &conflict_attribute.attr,
4193 NULL
4196 static struct kobj_type update_ktype = {
4197 .sysfs_ops = &update_sysfs_ops,
4198 .release = update_release,
4199 .default_attrs = update_attrs,
4202 #ifdef KSPLICE_STANDALONE
4203 static int debug;
4204 module_param(debug, int, 0600);
4205 MODULE_PARM_DESC(debug, "Debug level");
4207 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
4209 static struct ksplice_mod_change bootstrap_mod_change = {
4210 .name = "ksplice_" __stringify(KSPLICE_KID),
4211 .kid = "init_" __stringify(KSPLICE_KID),
4212 .target_name = NULL,
4213 .target = NULL,
4214 .map_printk = MAP_PRINTK,
4215 .new_code_mod = THIS_MODULE,
4216 .new_code.system_map = ksplice_system_map,
4217 .new_code.system_map_end = ksplice_system_map_end,
4219 #endif /* KSPLICE_STANDALONE */
4221 static int init_ksplice(void)
4223 #ifdef KSPLICE_STANDALONE
4224 struct ksplice_mod_change *change = &bootstrap_mod_change;
4225 change->update = init_ksplice_update(change->kid);
4226 sort(change->new_code.system_map,
4227 change->new_code.system_map_end - change->new_code.system_map,
4228 sizeof(struct ksplice_system_map), compare_system_map, NULL);
4229 if (change->update == NULL)
4230 return -ENOMEM;
4231 add_to_update(change, change->update);
4232 change->update->debug = debug;
4233 change->update->abort_cause =
4234 apply_relocs(change, ksplice_init_relocs, ksplice_init_relocs_end);
4235 if (change->update->abort_cause == OK)
4236 bootstrapped = true;
4237 cleanup_ksplice_update(bootstrap_mod_change.update);
4238 #else /* !KSPLICE_STANDALONE */
4239 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
4240 if (ksplice_kobj == NULL)
4241 return -ENOMEM;
4242 #endif /* KSPLICE_STANDALONE */
4243 return 0;
4246 static void cleanup_ksplice(void)
4248 #ifndef KSPLICE_STANDALONE
4249 kobject_put(ksplice_kobj);
4250 #endif /* KSPLICE_STANDALONE */
4253 module_init(init_ksplice);
4254 module_exit(cleanup_ksplice);
4256 MODULE_AUTHOR("Ksplice, Inc.");
4257 MODULE_DESCRIPTION("Ksplice rebootless update system");
4258 #ifdef KSPLICE_VERSION
4259 MODULE_VERSION(KSPLICE_VERSION);
4260 #endif
4261 MODULE_LICENSE("GPL v2");