Set ret to 0 on success path out of init_ksplice_pack.
[ksplice.git] / kmodsrc / ksplice.c
blob37da6617d58371fe4f87d8b8ba6214e54eb91c38
1 /* Copyright (C) 2007-2008 Jeffrey Brian Arnold <jbarnold@mit.edu>
2 * Copyright (C) 2008 Anders Kaseorg <andersk@mit.edu>,
3 * Tim Abbott <tabbott@mit.edu>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
16 * 02110-1301, USA.
19 #include <linux/module.h>
20 #include <linux/version.h>
21 #if defined CONFIG_DEBUG_FS || LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
22 #include <linux/debugfs.h>
23 #else /* CONFIG_DEBUG_FS */
24 /* a7a76cefc4b12bb6508afa4c77f11c2752cc365d was after 2.6.11 */
25 #endif /* CONFIG_DEBUG_FS */
26 #include <linux/errno.h>
27 #include <linux/kallsyms.h>
28 #include <linux/kobject.h>
29 #include <linux/kthread.h>
30 #include <linux/pagemap.h>
31 #include <linux/sched.h>
32 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12)
33 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
34 #include <linux/sort.h>
35 #endif /* LINUX_VERSION_CODE < */
36 #include <linux/stop_machine.h>
37 #include <linux/sysfs.h>
38 #include <linux/time.h>
39 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18)
40 #include <linux/uaccess.h>
41 #else /* LINUX_VERSION_CODE < */
42 /* linux/uaccess.h doesn't exist in kernels before 2.6.18 */
43 #include <asm/uaccess.h>
44 #endif /* LINUX_VERSION_CODE */
45 #include <linux/vmalloc.h>
46 #ifdef KSPLICE_STANDALONE
47 #include "ksplice.h"
48 #else /* !KSPLICE_STANDALONE */
49 #include <linux/ksplice.h>
50 #endif /* KSPLICE_STANDALONE */
51 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
52 #include <asm/alternative.h>
53 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
55 #if defined(KSPLICE_STANDALONE) && \
56 !defined(CONFIG_KSPLICE) && !defined(CONFIG_KSPLICE_MODULE)
57 #define KSPLICE_NO_KERNEL_SUPPORT 1
58 #endif /* KSPLICE_STANDALONE && !CONFIG_KSPLICE && !CONFIG_KSPLICE_MODULE */
60 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
61 /* 6e21828743247270d09a86756a0c11702500dbfb was after 2.6.18 */
62 #define bool _Bool
63 #define false 0
64 #define true 1
65 #endif /* LINUX_VERSION_CODE */
67 enum stage {
68 STAGE_PREPARING, STAGE_APPLIED, STAGE_REVERSED
71 enum run_pre_mode {
72 RUN_PRE_INITIAL, RUN_PRE_DEBUG, RUN_PRE_FINAL
75 enum { NOVAL, TEMP, VAL };
77 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
78 /* 5d7b32de9935c65ca8285ac6ec2382afdbb5d479 was after 2.6.8 */
79 #define __bitwise__
80 #elif LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15)
81 /* af4ca457eaf2d6682059c18463eb106e2ce58198 was after 2.6.14 */
82 #define __bitwise__ __bitwise
83 #endif
85 typedef int __bitwise__ abort_t;
87 #define OK ((__force abort_t) 0)
88 #define NO_MATCH ((__force abort_t) 1)
89 #define CODE_BUSY ((__force abort_t) 2)
90 #define MODULE_BUSY ((__force abort_t) 3)
91 #define OUT_OF_MEMORY ((__force abort_t) 4)
92 #define FAILED_TO_FIND ((__force abort_t) 5)
93 #define ALREADY_REVERSED ((__force abort_t) 6)
94 #define MISSING_EXPORT ((__force abort_t) 7)
95 #define UNEXPECTED_RUNNING_TASK ((__force abort_t) 8)
96 #define UNEXPECTED ((__force abort_t) 9)
97 #ifdef KSPLICE_STANDALONE
98 #define BAD_SYSTEM_MAP ((__force abort_t) 10)
99 #endif /* KSPLICE_STANDALONE */
101 struct update {
102 const char *kid;
103 const char *name;
104 struct kobject kobj;
105 enum stage stage;
106 abort_t abort_cause;
107 int debug;
108 #ifdef CONFIG_DEBUG_FS
109 struct debugfs_blob_wrapper debug_blob;
110 struct dentry *debugfs_dentry;
111 #else /* !CONFIG_DEBUG_FS */
112 bool debug_continue_line;
113 #endif /* CONFIG_DEBUG_FS */
114 struct list_head packs;
115 struct list_head conflicts;
116 struct list_head list;
119 struct conflict {
120 const char *process_name;
121 pid_t pid;
122 struct list_head stack;
123 struct list_head list;
126 struct conflict_addr {
127 unsigned long addr;
128 bool has_conflict;
129 const char *label;
130 struct list_head list;
133 #if defined(CONFIG_DEBUG_FS) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
134 /* Old kernels don't have debugfs_create_blob */
135 struct debugfs_blob_wrapper {
136 void *data;
137 unsigned long size;
139 #endif /* CONFIG_DEBUG_FS && LINUX_VERSION_CODE */
141 struct labelval {
142 struct list_head list;
143 const char *label;
144 unsigned long val;
145 struct ksplice_symbol *symbol;
148 struct safety_record {
149 struct list_head list;
150 const char *label;
151 unsigned long addr;
152 unsigned long size;
153 bool first_byte_safe;
156 struct candidate_val {
157 struct list_head list;
158 unsigned long val;
161 struct accumulate_struct {
162 struct ksplice_pack *pack;
163 const char *desired_name;
164 struct list_head *vals;
167 struct ksplice_lookup {
168 /* input */
169 struct ksplice_pack *pack;
170 struct ksplice_symbol **arr;
171 size_t size;
172 /* output */
173 abort_t ret;
176 #ifdef KSPLICE_NO_KERNEL_SUPPORT
177 struct symsearch {
178 const struct kernel_symbol *start, *stop;
179 const unsigned long *crcs;
180 enum {
181 NOT_GPL_ONLY,
182 GPL_ONLY,
183 WILL_BE_GPL_ONLY,
184 } licence;
185 bool unused;
187 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
189 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)
190 /* c33fa9f5609e918824446ef9a75319d4a802f1f4 was after 2.6.25 */
192 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
193 /* 2fff0a48416af891dce38fd425246e337831e0bb was after 2.6.19 */
194 static bool virtual_address_mapped(unsigned long addr)
196 char retval;
197 return probe_kernel_address(addr, retval) != -EFAULT;
199 #else /* LINUX_VERSION_CODE < */
200 static bool virtual_address_mapped(unsigned long addr);
201 #endif /* LINUX_VERSION_CODE */
203 static long probe_kernel_read(void *dst, void *src, size_t size)
205 if (!virtual_address_mapped((unsigned long)src) ||
206 !virtual_address_mapped((unsigned long)src + size))
207 return -EFAULT;
209 memcpy(dst, src, size);
210 return 0;
212 #endif /* LINUX_VERSION_CODE */
214 static LIST_HEAD(updates);
215 #ifdef KSPLICE_STANDALONE
216 #if defined(CONFIG_KSPLICE) || defined(CONFIG_KSPLICE_MODULE)
217 extern struct list_head ksplice_module_list;
218 #else /* !CONFIG_KSPLICE */
219 LIST_HEAD(ksplice_module_list);
220 #endif /* CONFIG_KSPLICE */
221 #else /* !KSPLICE_STANDALONE */
222 LIST_HEAD(ksplice_module_list);
223 EXPORT_SYMBOL_GPL(ksplice_module_list);
224 static struct kobject *ksplice_kobj;
225 #endif /* KSPLICE_STANDALONE */
227 static struct kobj_type ksplice_ktype;
229 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)
230 /* Old kernels do not have kcalloc
231 * e629946abd0bb8266e9c3d0fd1bff2ef8dec5443 was after 2.6.8
233 static void *kcalloc(size_t n, size_t size, typeof(GFP_KERNEL) flags)
235 char *mem;
236 if (n != 0 && size > ULONG_MAX / n)
237 return NULL;
238 mem = kmalloc(n * size, flags);
239 if (mem)
240 memset(mem, 0, n * size);
241 return mem;
243 #endif /* LINUX_VERSION_CODE */
245 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12)
246 /* 8c63b6d337534a6b5fb111dc27d0850f535118c0 was after 2.6.11 */
247 static void u32_swap(void *a, void *b, int size)
249 u32 t = *(u32 *)a;
250 *(u32 *)a = *(u32 *)b;
251 *(u32 *)b = t;
254 static void generic_swap(void *a, void *b, int size)
256 char t;
258 do {
259 t = *(char *)a;
260 *(char *)a++ = *(char *)b;
261 *(char *)b++ = t;
262 } while (--size > 0);
266 * sort - sort an array of elements
267 * @base: pointer to data to sort
268 * @num: number of elements
269 * @size: size of each element
270 * @cmp: pointer to comparison function
271 * @swap: pointer to swap function or NULL
273 * This function does a heapsort on the given array. You may provide a
274 * swap function optimized to your element type.
276 * Sorting time is O(n log n) both on average and worst-case. While
277 * qsort is about 20% faster on average, it suffers from exploitable
278 * O(n*n) worst-case behavior and extra memory requirements that make
279 * it less suitable for kernel use.
282 void sort(void *base, size_t num, size_t size,
283 int (*cmp)(const void *, const void *),
284 void (*swap)(void *, void *, int size))
286 /* pre-scale counters for performance */
287 int i = (num / 2 - 1) * size, n = num * size, c, r;
289 if (!swap)
290 swap = (size == 4 ? u32_swap : generic_swap);
292 /* heapify */
293 for (; i >= 0; i -= size) {
294 for (r = i; r * 2 + size < n; r = c) {
295 c = r * 2 + size;
296 if (c < n - size && cmp(base + c, base + c + size) < 0)
297 c += size;
298 if (cmp(base + r, base + c) >= 0)
299 break;
300 swap(base + r, base + c, size);
304 /* sort */
305 for (i = n - size; i > 0; i -= size) {
306 swap(base, base + i, size);
307 for (r = 0; r * 2 + size < i; r = c) {
308 c = r * 2 + size;
309 if (c < i - size && cmp(base + c, base + c + size) < 0)
310 c += size;
311 if (cmp(base + r, base + c) >= 0)
312 break;
313 swap(base + r, base + c, size);
317 #endif /* LINUX_VERSION_CODE < */
319 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13)
320 /* Old kernels do not have kstrdup
321 * 543537bd922692bc978e2e356fcd8bfc9c2ee7d5 was 2.6.13-rc4
323 static char *kstrdup(const char *s, typeof(GFP_KERNEL) gfp)
325 size_t len;
326 char *buf;
328 if (!s)
329 return NULL;
331 len = strlen(s) + 1;
332 buf = kmalloc(len, gfp);
333 if (buf)
334 memcpy(buf, s, len);
335 return buf;
337 #endif /* LINUX_VERSION_CODE */
339 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
340 /* Old kernels use semaphore instead of mutex
341 * 97d1f15b7ef52c1e9c28dc48b454024bb53a5fd2 was after 2.6.16
343 #define mutex semaphore
344 #define mutex_lock down
345 #define mutex_unlock up
346 #endif /* LINUX_VERSION_CODE */
348 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
349 /* 11443ec7d9286dd25663516436a14edfb5f43857 was after 2.6.21 */
350 static char * __attribute_used__
351 kvasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, va_list ap)
353 unsigned int len;
354 char *p, dummy[1];
355 va_list aq;
357 va_copy(aq, ap);
358 len = vsnprintf(dummy, 0, fmt, aq);
359 va_end(aq);
361 p = kmalloc(len + 1, gfp);
362 if (!p)
363 return NULL;
365 vsnprintf(p, len + 1, fmt, ap);
367 return p;
369 #endif
371 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18)
372 /* e905914f96e11862b130dd229f73045dad9a34e8 was after 2.6.17 */
373 static char * __attribute__((format (printf, 2, 3)))
374 kasprintf(typeof(GFP_KERNEL) gfp, const char *fmt, ...)
376 va_list ap;
377 char *p;
379 va_start(ap, fmt);
380 p = kvasprintf(gfp, fmt, ap);
381 va_end(ap);
383 return p;
385 #endif
387 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25)
388 /* 06b2a76d25d3cfbd14680021c1d356c91be6904e was after 2.6.24 */
389 static int strict_strtoul(const char *cp, unsigned int base, unsigned long *res)
391 char *tail;
392 unsigned long val;
393 size_t len;
395 *res = 0;
396 len = strlen(cp);
397 if (len == 0)
398 return -EINVAL;
400 val = simple_strtoul(cp, &tail, base);
401 if ((*tail == '\0') ||
402 ((len == (size_t)(tail - cp) + 1) && (*tail == '\n'))) {
403 *res = val;
404 return 0;
407 return -EINVAL;
409 #endif
411 #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,17)
412 /* 5e376613899076396d0c97de67ad072587267370 was after 2.6.16 */
413 static int core_kernel_text(unsigned long addr)
415 return addr >= init_mm.start_code && addr < init_mm.end_code;
417 #endif /* LINUX_VERSION_CODE */
419 #ifndef task_thread_info
420 #define task_thread_info(task) (task)->thread_info
421 #endif /* !task_thread_info */
423 #ifdef KSPLICE_STANDALONE
425 static bool bootstrapped = false;
427 #ifdef CONFIG_KALLSYMS
428 extern unsigned long kallsyms_addresses[], kallsyms_num_syms;
429 extern u8 kallsyms_names[];
430 #endif /* CONFIG_KALLSYMS */
432 /* defined by ksplice-create */
433 extern const struct ksplice_reloc ksplice_init_relocs[],
434 ksplice_init_relocs_end[];
436 /* Obtained via System.map */
437 extern struct list_head modules;
438 extern struct mutex module_mutex;
439 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,18) && defined(CONFIG_UNUSED_SYMBOLS)
440 /* f71d20e961474dde77e6558396efb93d6ac80a4b was after 2.6.17 */
441 #define KSPLICE_KSYMTAB_UNUSED_SUPPORT 1
442 #endif /* LINUX_VERSION_CODE */
443 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,17)
444 /* 9f28bb7e1d0188a993403ab39b774785892805e1 was after 2.6.16 */
445 #define KSPLICE_KSYMTAB_FUTURE_SUPPORT 1
446 #endif /* LINUX_VERSION_CODE */
447 extern const struct kernel_symbol __start___ksymtab[];
448 extern const struct kernel_symbol __stop___ksymtab[];
449 extern const unsigned long __start___kcrctab[];
450 extern const struct kernel_symbol __start___ksymtab_gpl[];
451 extern const struct kernel_symbol __stop___ksymtab_gpl[];
452 extern const unsigned long __start___kcrctab_gpl[];
453 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
454 extern const struct kernel_symbol __start___ksymtab_unused[];
455 extern const struct kernel_symbol __stop___ksymtab_unused[];
456 extern const unsigned long __start___kcrctab_unused[];
457 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
458 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
459 extern const unsigned long __start___kcrctab_unused_gpl[];
460 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
461 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
462 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
463 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
464 extern const unsigned long __start___kcrctab_gpl_future[];
465 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
467 #endif /* KSPLICE_STANDALONE */
469 static struct update *init_ksplice_update(const char *kid);
470 static void cleanup_ksplice_update(struct update *update);
471 static void add_to_update(struct ksplice_pack *pack, struct update *update);
472 static int ksplice_sysfs_init(struct update *update);
474 /* Preparing the relocations and patches for application */
475 static abort_t apply_update(struct update *update);
476 static abort_t prepare_pack(struct ksplice_pack *pack);
477 static abort_t finalize_pack(struct ksplice_pack *pack);
478 static abort_t finalize_exports(struct ksplice_pack *pack);
479 static abort_t finalize_patches(struct ksplice_pack *pack);
480 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
481 unsigned long addr);
482 static abort_t map_trampoline_pages(struct update *update);
483 static void unmap_trampoline_pages(struct update *update);
484 static void *map_writable(void *addr, size_t len);
485 static abort_t apply_relocs(struct ksplice_pack *pack,
486 const struct ksplice_reloc *relocs,
487 const struct ksplice_reloc *relocs_end);
488 static abort_t apply_reloc(struct ksplice_pack *pack,
489 const struct ksplice_reloc *r);
490 static abort_t read_reloc_value(struct ksplice_pack *pack,
491 const struct ksplice_reloc *r,
492 unsigned long addr, unsigned long *valp);
493 static abort_t write_reloc_value(struct ksplice_pack *pack,
494 const struct ksplice_reloc *r,
495 unsigned long addr, unsigned long sym_addr);
496 static void __attribute__((noreturn)) ksplice_deleted(void);
498 /* run-pre matching */
499 static abort_t match_pack_sections(struct ksplice_pack *pack,
500 bool consider_data_sections);
501 static abort_t find_section(struct ksplice_pack *pack,
502 const struct ksplice_section *sect);
503 static abort_t try_addr(struct ksplice_pack *pack,
504 const struct ksplice_section *sect,
505 unsigned long run_addr,
506 struct list_head *safety_records,
507 enum run_pre_mode mode);
508 static abort_t run_pre_cmp(struct ksplice_pack *pack,
509 const struct ksplice_section *sect,
510 unsigned long run_addr,
511 struct list_head *safety_records,
512 enum run_pre_mode mode);
513 #ifndef CONFIG_FUNCTION_DATA_SECTIONS
514 /* defined in arch/ARCH/kernel/ksplice-arch.c */
515 static abort_t arch_run_pre_cmp(struct ksplice_pack *pack,
516 const struct ksplice_section *sect,
517 unsigned long run_addr,
518 struct list_head *safety_records,
519 enum run_pre_mode mode);
520 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
521 static void print_bytes(struct ksplice_pack *pack,
522 const unsigned char *run, int runc,
523 const unsigned char *pre, int prec);
524 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
525 static abort_t brute_search(struct ksplice_pack *pack,
526 const struct ksplice_section *sect,
527 const void *start, unsigned long len,
528 struct list_head *vals);
529 static abort_t brute_search_all(struct ksplice_pack *pack,
530 const struct ksplice_section *sect,
531 struct list_head *vals);
532 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
533 static const struct ksplice_reloc *
534 init_reloc_search(struct ksplice_pack *pack,
535 const struct ksplice_section *sect);
536 static abort_t lookup_reloc(struct ksplice_pack *pack,
537 const struct ksplice_reloc **fingerp,
538 unsigned long addr,
539 const struct ksplice_reloc **relocp);
540 static abort_t handle_reloc(struct ksplice_pack *pack,
541 const struct ksplice_reloc *r,
542 unsigned long run_addr, enum run_pre_mode mode);
544 /* Computing possible addresses for symbols */
545 static abort_t lookup_symbol(struct ksplice_pack *pack,
546 const struct ksplice_symbol *ksym,
547 struct list_head *vals);
548 static void cleanup_symbol_arrays(struct ksplice_pack *pack);
549 static abort_t init_symbol_arrays(struct ksplice_pack *pack);
550 static abort_t init_symbol_array(struct ksplice_pack *pack,
551 struct ksplice_symbol *start,
552 struct ksplice_symbol *end);
553 static abort_t uniquify_symbols(struct ksplice_pack *pack);
554 static abort_t add_matching_values(struct ksplice_lookup *lookup,
555 const char *sym_name, unsigned long sym_val);
556 static bool add_export_values(const struct symsearch *syms,
557 struct module *owner,
558 unsigned int symnum, void *data);
559 static int symbolp_bsearch_compare(const void *key, const void *elt);
560 static int compare_symbolp_names(const void *a, const void *b);
561 static int compare_symbolp_labels(const void *a, const void *b);
562 #ifdef CONFIG_KALLSYMS
563 static int add_kallsyms_values(void *data, const char *name,
564 struct module *owner, unsigned long val);
565 #endif /* CONFIG_KALLSYMS */
566 #ifdef KSPLICE_STANDALONE
567 static abort_t
568 add_system_map_candidates(struct ksplice_pack *pack,
569 const struct ksplice_system_map *start,
570 const struct ksplice_system_map *end,
571 const char *label, struct list_head *vals);
572 static int compare_system_map(const void *a, const void *b);
573 static int system_map_bsearch_compare(const void *key, const void *elt);
574 #endif /* KSPLICE_STANDALONE */
575 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
576 const char *name, struct list_head *vals);
578 /* Atomic update insertion and removal */
579 static abort_t apply_patches(struct update *update);
580 static abort_t reverse_patches(struct update *update);
581 static int __apply_patches(void *update);
582 static int __reverse_patches(void *update);
583 static abort_t check_each_task(struct update *update);
584 static abort_t check_task(struct update *update,
585 const struct task_struct *t, bool rerun);
586 static abort_t check_stack(struct update *update, struct conflict *conf,
587 const struct thread_info *tinfo,
588 const unsigned long *stack);
589 static abort_t check_address(struct update *update,
590 struct conflict *conf, unsigned long addr);
591 static abort_t check_record(struct conflict_addr *ca,
592 const struct safety_record *rec,
593 unsigned long addr);
594 static bool is_stop_machine(const struct task_struct *t);
595 static void cleanup_conflicts(struct update *update);
596 static void print_conflicts(struct update *update);
597 static void insert_trampoline(struct ksplice_patch *p);
598 static abort_t verify_trampoline(struct ksplice_pack *pack,
599 const struct ksplice_patch *p);
600 static void remove_trampoline(const struct ksplice_patch *p);
602 static struct labelval *find_labelval(struct ksplice_pack *pack,
603 const char *label);
604 static abort_t create_labelval(struct ksplice_pack *pack,
605 struct ksplice_symbol *ksym,
606 unsigned long val, int status);
607 static abort_t create_safety_record(struct ksplice_pack *pack,
608 const struct ksplice_section *sect,
609 struct list_head *record_list,
610 unsigned long run_addr,
611 unsigned long run_size);
612 static abort_t add_candidate_val(struct ksplice_pack *pack,
613 struct list_head *vals, unsigned long val);
614 static void release_vals(struct list_head *vals);
615 static void set_temp_labelvals(struct ksplice_pack *pack, int status_val);
617 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
618 int size, long dst_mask);
619 static unsigned long follow_trampolines(struct ksplice_pack *pack,
620 unsigned long addr);
621 static bool patches_module(const struct module *a, const struct module *b);
622 static bool starts_with(const char *str, const char *prefix);
623 static bool singular(struct list_head *list);
624 static void *bsearch(const void *key, const void *base, size_t n,
625 size_t size, int (*cmp)(const void *key, const void *elt));
626 static int compare_reloc_addresses(const void *a, const void *b);
627 static int reloc_bsearch_compare(const void *key, const void *elt);
629 /* Debugging */
630 static abort_t init_debug_buf(struct update *update);
631 static void clear_debug_buf(struct update *update);
632 static int __attribute__((format(printf, 2, 3)))
633 _ksdebug(struct update *update, const char *fmt, ...);
634 #define ksdebug(pack, fmt, ...) \
635 _ksdebug(pack->update, fmt, ## __VA_ARGS__)
637 #ifdef KSPLICE_NO_KERNEL_SUPPORT
638 /* Functions defined here that will be exported in later kernels */
639 #ifdef CONFIG_KALLSYMS
640 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
641 struct module *, unsigned long),
642 void *data);
643 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
644 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result);
645 #endif /* LINUX_VERSION_CODE */
646 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
647 struct module *,
648 unsigned long),
649 void *data);
650 #endif /* CONFIG_KALLSYMS */
651 static struct module *find_module(const char *name);
652 static int use_module(struct module *a, struct module *b);
653 static const struct kernel_symbol *find_symbol(const char *name,
654 struct module **owner,
655 const unsigned long **crc,
656 bool gplok, bool warn);
657 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
658 struct module *owner,
659 unsigned int symnum, void *data),
660 void *data);
661 static struct module *__module_data_address(unsigned long addr);
662 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
664 /* Architecture-specific functions defined in arch/ARCH/kernel/ksplice-arch.c */
665 static abort_t prepare_trampoline(struct ksplice_pack *pack,
666 struct ksplice_patch *p);
667 static abort_t trampoline_target(struct ksplice_pack *pack, unsigned long addr,
668 unsigned long *new_addr);
669 static abort_t handle_paravirt(struct ksplice_pack *pack, unsigned long pre,
670 unsigned long run, int *matched);
671 static bool valid_stack_ptr(const struct thread_info *tinfo, const void *p);
673 #ifndef KSPLICE_STANDALONE
674 #include "ksplice-arch.c"
675 #elif defined CONFIG_X86
676 #include "x86/ksplice-arch.c"
677 #elif defined CONFIG_ARM
678 #include "arm/ksplice-arch.c"
679 #endif /* KSPLICE_STANDALONE */
681 #define clear_list(head, type, member) \
682 do { \
683 struct list_head *_pos, *_n; \
684 list_for_each_safe(_pos, _n, head) { \
685 list_del(_pos); \
686 kfree(list_entry(_pos, type, member)); \
688 } while (0)
690 int init_ksplice_pack(struct ksplice_pack *pack)
692 struct update *update;
693 struct ksplice_patch *p;
694 int ret = 0;
696 #ifdef KSPLICE_STANDALONE
697 if (!bootstrapped)
698 return -1;
699 #endif /* KSPLICE_STANDALONE */
701 INIT_LIST_HEAD(&pack->labelvals);
702 INIT_LIST_HEAD(&pack->temp_labelvals);
703 INIT_LIST_HEAD(&pack->safety_records);
705 sort(pack->helper_relocs,
706 (pack->helper_relocs_end - pack->helper_relocs),
707 sizeof(struct ksplice_reloc), compare_reloc_addresses, NULL);
708 #ifdef KSPLICE_STANDALONE
709 sort(pack->primary_system_map,
710 (pack->primary_system_map_end - pack->primary_system_map),
711 sizeof(struct ksplice_system_map), compare_system_map, NULL);
712 sort(pack->helper_system_map,
713 (pack->helper_system_map_end - pack->helper_system_map),
714 sizeof(struct ksplice_system_map), compare_system_map, NULL);
715 #endif /* KSPLICE_STANDALONE */
717 mutex_lock(&module_mutex);
718 if (strcmp(pack->target_name, "vmlinux") == 0) {
719 pack->target = NULL;
720 } else {
721 pack->target = find_module(pack->target_name);
722 if (pack->target == NULL || !module_is_live(pack->target)) {
723 ret = -ENODEV;
724 goto out;
726 ret = use_module(pack->primary, pack->target);
727 if (ret != 1) {
728 ret = -ENODEV;
729 goto out;
733 for (p = pack->patches; p < pack->patches_end; p++)
734 p->vaddr = NULL;
736 list_for_each_entry(update, &updates, list) {
737 if (strcmp(pack->kid, update->kid) == 0) {
738 if (update->stage != STAGE_PREPARING) {
739 ret = -EPERM;
740 goto out;
742 add_to_update(pack, update);
743 ret = 0;
744 goto out;
747 update = init_ksplice_update(pack->kid);
748 if (update == NULL) {
749 ret = -ENOMEM;
750 goto out;
752 ret = ksplice_sysfs_init(update);
753 if (ret != 0) {
754 cleanup_ksplice_update(update);
755 goto out;
757 add_to_update(pack, update);
758 out:
759 mutex_unlock(&module_mutex);
760 return ret;
762 EXPORT_SYMBOL_GPL(init_ksplice_pack);
764 void cleanup_ksplice_pack(struct ksplice_pack *pack)
766 if (pack->update == NULL || pack->update->stage == STAGE_APPLIED)
767 return;
768 mutex_lock(&module_mutex);
769 list_del(&pack->list);
770 mutex_unlock(&module_mutex);
771 if (list_empty(&pack->update->packs))
772 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
773 kobject_put(&pack->update->kobj);
774 #else /* LINUX_VERSION_CODE < */
775 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
776 kobject_unregister(&pack->update->kobj);
777 #endif /* LINUX_VERSION_CODE */
778 pack->update = NULL;
780 EXPORT_SYMBOL_GPL(cleanup_ksplice_pack);
782 static struct update *init_ksplice_update(const char *kid)
784 struct update *update;
785 update = kcalloc(1, sizeof(struct update), GFP_KERNEL);
786 if (update == NULL)
787 return NULL;
788 update->name = kasprintf(GFP_KERNEL, "ksplice_%s", kid);
789 if (update->name == NULL) {
790 kfree(update);
791 return NULL;
793 update->kid = kstrdup(kid, GFP_KERNEL);
794 if (update->kid == NULL) {
795 kfree(update->name);
796 kfree(update);
797 return NULL;
799 INIT_LIST_HEAD(&update->packs);
800 if (init_debug_buf(update) != OK) {
801 kfree(update->kid);
802 kfree(update->name);
803 kfree(update);
804 return NULL;
806 list_add(&update->list, &updates);
807 update->stage = STAGE_PREPARING;
808 update->abort_cause = OK;
809 INIT_LIST_HEAD(&update->conflicts);
810 return update;
813 static void cleanup_ksplice_update(struct update *update)
815 #ifdef KSPLICE_STANDALONE
816 if (bootstrapped)
817 mutex_lock(&module_mutex);
818 list_del(&update->list);
819 if (bootstrapped)
820 mutex_unlock(&module_mutex);
821 #else /* !KSPLICE_STANDALONE */
822 mutex_lock(&module_mutex);
823 list_del(&update->list);
824 mutex_unlock(&module_mutex);
825 #endif /* KSPLICE_STANDALONE */
826 cleanup_conflicts(update);
827 clear_debug_buf(update);
828 kfree(update->kid);
829 kfree(update->name);
830 kfree(update);
833 static void add_to_update(struct ksplice_pack *pack, struct update *update)
835 pack->update = update;
836 list_add(&pack->list, &update->packs);
837 pack->module_list_entry.target = pack->target;
838 pack->module_list_entry.primary = pack->primary;
841 static int ksplice_sysfs_init(struct update *update)
843 int ret = 0;
844 memset(&update->kobj, 0, sizeof(update->kobj));
845 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,25)
846 #ifndef KSPLICE_STANDALONE
847 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
848 ksplice_kobj, "%s", update->kid);
849 #else /* KSPLICE_STANDALONE */
850 /* 6d06adfaf82d154023141ddc0c9de18b6a49090b was after 2.6.24 */
851 ret = kobject_init_and_add(&update->kobj, &ksplice_ktype,
852 &THIS_MODULE->mkobj.kobj, "ksplice");
853 #endif /* KSPLICE_STANDALONE */
854 #else /* LINUX_VERSION_CODE < */
855 ret = kobject_set_name(&update->kobj, "%s", "ksplice");
856 if (ret != 0)
857 return ret;
858 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,11)
859 update->kobj.parent = &THIS_MODULE->mkobj.kobj;
860 #else /* LINUX_VERSION_CODE < */
861 /* b86ab02803095190d6b72bcc18dcf620bf378df9 was after 2.6.10 */
862 update->kobj.parent = &THIS_MODULE->mkobj->kobj;
863 #endif /* LINUX_VERSION_CODE */
864 update->kobj.ktype = &ksplice_ktype;
865 ret = kobject_register(&update->kobj);
866 #endif /* LINUX_VERSION_CODE */
867 if (ret != 0)
868 return ret;
869 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15)
870 kobject_uevent(&update->kobj, KOBJ_ADD);
871 #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
872 /* 312c004d36ce6c739512bac83b452f4c20ab1f62 was after 2.6.14 */
873 /* 12025235884570ba7f02a6f427f973ac6be7ec54 was after 2.6.9 */
874 kobject_uevent(&update->kobj, KOBJ_ADD, NULL);
875 #endif /* LINUX_VERSION_CODE */
876 return 0;
879 static abort_t apply_update(struct update *update)
881 struct ksplice_pack *pack;
882 abort_t ret;
884 mutex_lock(&module_mutex);
885 #ifdef KSPLICE_NEED_PARAINSTRUCTIONS
886 list_for_each_entry(pack, &update->packs, list) {
887 if (pack->target == NULL) {
888 apply_paravirt(pack->primary_parainstructions,
889 pack->primary_parainstructions_end);
890 apply_paravirt(pack->helper_parainstructions,
891 pack->helper_parainstructions_end);
894 #endif /* KSPLICE_NEED_PARAINSTRUCTIONS */
896 list_for_each_entry(pack, &update->packs, list) {
897 ret = init_symbol_arrays(pack);
898 if (ret != OK) {
899 cleanup_symbol_arrays(pack);
900 return ret;
902 ret = prepare_pack(pack);
903 cleanup_symbol_arrays(pack);
904 if (ret != OK)
905 goto out;
907 ret = apply_patches(update);
908 out:
909 list_for_each_entry(pack, &update->packs, list) {
910 clear_list(&pack->labelvals, struct labelval, list);
911 if (update->stage == STAGE_PREPARING)
912 clear_list(&pack->safety_records, struct safety_record,
913 list);
915 mutex_unlock(&module_mutex);
916 return ret;
919 static int compare_symbolp_names(const void *a, const void *b)
921 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
922 if ((*sympa)->name == NULL && (*sympb)->name == NULL)
923 return 0;
924 if ((*sympa)->name == NULL)
925 return -1;
926 if ((*sympb)->name == NULL)
927 return 1;
928 return strcmp((*sympa)->name, (*sympb)->name);
931 static int compare_symbolp_labels(const void *a, const void *b)
933 const struct ksplice_symbol *const *sympa = a, *const *sympb = b;
934 return strcmp((*sympa)->label, (*sympb)->label);
937 static int symbolp_bsearch_compare(const void *key, const void *elt)
939 const char *name = key;
940 const struct ksplice_symbol *const *symp = elt;
941 const struct ksplice_symbol *sym = *symp;
942 if (sym->name == NULL)
943 return 1;
944 return strcmp(name, sym->name);
947 static abort_t add_matching_values(struct ksplice_lookup *lookup,
948 const char *sym_name, unsigned long sym_val)
950 struct ksplice_symbol **symp;
951 abort_t ret;
953 symp = bsearch(sym_name, lookup->arr, lookup->size,
954 sizeof(*lookup->arr), symbolp_bsearch_compare);
955 if (symp == NULL)
956 return OK;
958 while (symp > lookup->arr &&
959 symbolp_bsearch_compare(sym_name, symp - 1) == 0)
960 symp--;
962 for (; symp < lookup->arr + lookup->size; symp++) {
963 struct ksplice_symbol *sym = *symp;
964 if (sym->name == NULL || strcmp(sym_name, sym->name) != 0)
965 break;
966 ret = add_candidate_val(lookup->pack, sym->vals, sym_val);
967 if (ret != OK)
968 return ret;
970 return OK;
973 #ifdef CONFIG_KALLSYMS
974 static int add_kallsyms_values(void *data, const char *name,
975 struct module *owner, unsigned long val)
977 struct ksplice_lookup *lookup = data;
978 if (owner == lookup->pack->primary ||
979 !patches_module(owner, lookup->pack->target))
980 return (__force int)OK;
981 return (__force int)add_matching_values(lookup, name, val);
983 #endif /* CONFIG_KALLSYMS */
985 static bool add_export_values(const struct symsearch *syms,
986 struct module *owner,
987 unsigned int symnum, void *data)
989 struct ksplice_lookup *lookup = data;
990 abort_t ret;
992 ret = add_matching_values(lookup, syms->start[symnum].name,
993 syms->start[symnum].value);
994 if (ret != OK) {
995 lookup->ret = ret;
996 return true;
998 return false;
1001 static void cleanup_symbol_arrays(struct ksplice_pack *pack)
1003 struct ksplice_symbol *sym;
1004 for (sym = pack->primary_symbols; sym < pack->primary_symbols_end;
1005 sym++) {
1006 clear_list(sym->vals, struct candidate_val, list);
1007 kfree(sym->vals);
1008 sym->vals = NULL;
1010 for (sym = pack->helper_symbols; sym < pack->helper_symbols_end; sym++) {
1011 clear_list(sym->vals, struct candidate_val, list);
1012 kfree(sym->vals);
1013 sym->vals = NULL;
1017 static abort_t uniquify_symbols(struct ksplice_pack *pack)
1019 struct ksplice_reloc *r;
1020 struct ksplice_section *s;
1021 struct ksplice_symbol *sym, **sym_arr, **symp;
1022 size_t size = pack->primary_symbols_end - pack->primary_symbols;
1024 if (size == 0)
1025 return OK;
1027 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1028 if (sym_arr == NULL)
1029 return OUT_OF_MEMORY;
1031 for (symp = sym_arr, sym = pack->primary_symbols;
1032 symp < sym_arr + size && sym < pack->primary_symbols_end;
1033 sym++, symp++)
1034 *symp = sym;
1036 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_labels, NULL);
1038 for (r = pack->helper_relocs; r < pack->helper_relocs_end; r++) {
1039 symp = bsearch(&r->symbol, sym_arr, size, sizeof(*sym_arr),
1040 compare_symbolp_labels);
1041 if (symp != NULL)
1042 r->symbol = *symp;
1045 for (s = pack->helper_sections; s < pack->helper_sections_end; s++) {
1046 symp = bsearch(&s->symbol, sym_arr, size, sizeof(*sym_arr),
1047 compare_symbolp_labels);
1048 if (symp != NULL)
1049 s->symbol = *symp;
1052 vfree(sym_arr);
1053 return OK;
1056 static abort_t init_symbol_array(struct ksplice_pack *pack,
1057 struct ksplice_symbol *start,
1058 struct ksplice_symbol *end)
1060 struct ksplice_symbol *sym, **sym_arr, **symp;
1061 struct ksplice_lookup lookup;
1062 size_t size = end - start;
1063 abort_t ret;
1065 if (size == 0)
1066 return OK;
1068 for (sym = start; sym < end; sym++) {
1069 sym->vals = kmalloc(sizeof(*sym->vals), GFP_KERNEL);
1070 if (sym->vals == NULL)
1071 return OUT_OF_MEMORY;
1072 INIT_LIST_HEAD(sym->vals);
1073 sym->lv = NULL;
1076 sym_arr = vmalloc(sizeof(*sym_arr) * size);
1077 if (sym_arr == NULL)
1078 return OUT_OF_MEMORY;
1080 for (symp = sym_arr, sym = start; symp < sym_arr + size && sym < end;
1081 sym++, symp++)
1082 *symp = sym;
1084 sort(sym_arr, size, sizeof(*sym_arr), compare_symbolp_names, NULL);
1086 lookup.pack = pack;
1087 lookup.arr = sym_arr;
1088 lookup.size = size;
1089 lookup.ret = OK;
1091 each_symbol(add_export_values, &lookup);
1092 ret = lookup.ret;
1093 #ifdef CONFIG_KALLSYMS
1094 if (ret == OK)
1095 ret = (__force abort_t)
1096 kallsyms_on_each_symbol(add_kallsyms_values, &lookup);
1097 #endif /* CONFIG_KALLSYMS */
1098 vfree(sym_arr);
1099 return ret;
1102 static abort_t init_symbol_arrays(struct ksplice_pack *pack)
1104 abort_t ret;
1106 ret = init_symbol_array(pack, pack->helper_symbols,
1107 pack->helper_symbols_end);
1108 if (ret != OK)
1109 return ret;
1111 ret = init_symbol_array(pack, pack->primary_symbols,
1112 pack->primary_symbols_end);
1113 if (ret != OK)
1114 return ret;
1116 ret = uniquify_symbols(pack);
1117 if (ret != OK)
1118 return ret;
1120 return OK;
1123 static abort_t prepare_pack(struct ksplice_pack *pack)
1125 abort_t ret;
1127 ksdebug(pack, "Preparing and checking %s\n", pack->name);
1128 ret = match_pack_sections(pack, false);
1129 if (ret == NO_MATCH) {
1130 /* It is possible that by using relocations from .data sections
1131 we can successfully run-pre match the rest of the sections.
1132 To avoid using any symbols obtained from .data sections
1133 (which may be unreliable) in the post code, we first prepare
1134 the post code and then try to run-pre match the remaining
1135 sections with the help of .data sections.
1137 ksdebug(pack, "Continuing without some sections; we might "
1138 "find them later.\n");
1139 ret = finalize_pack(pack);
1140 if (ret != OK) {
1141 ksdebug(pack, "Aborted. Unable to continue without "
1142 "the unmatched sections.\n");
1143 return ret;
1146 ksdebug(pack, "run-pre: Considering .data sections to find the "
1147 "unmatched sections\n");
1148 ret = match_pack_sections(pack, true);
1149 if (ret != OK)
1150 return ret;
1152 ksdebug(pack, "run-pre: Found all previously unmatched "
1153 "sections\n");
1154 return OK;
1155 } else if (ret != OK) {
1156 return ret;
1159 return finalize_pack(pack);
1162 static abort_t finalize_pack(struct ksplice_pack *pack)
1164 abort_t ret;
1165 ret = apply_relocs(pack, pack->primary_relocs,
1166 pack->primary_relocs_end);
1167 if (ret != OK)
1168 return ret;
1170 ret = finalize_patches(pack);
1171 if (ret != OK)
1172 return ret;
1174 ret = finalize_exports(pack);
1175 if (ret != OK)
1176 return ret;
1178 return OK;
1181 static abort_t finalize_exports(struct ksplice_pack *pack)
1183 struct ksplice_export *exp;
1184 struct module *m;
1185 const struct kernel_symbol *sym;
1187 for (exp = pack->exports; exp < pack->exports_end; exp++) {
1188 sym = find_symbol(exp->name, &m, NULL, true, false);
1189 if (sym == NULL) {
1190 ksdebug(pack, "Could not find kernel_symbol struct for "
1191 "%s\n", exp->name);
1192 return MISSING_EXPORT;
1195 /* Cast away const since we are planning to mutate the
1196 * kernel_symbol structure. */
1197 exp->sym = (struct kernel_symbol *)sym;
1198 exp->saved_name = exp->sym->name;
1199 if (m != pack->primary && use_module(pack->primary, m) != 1) {
1200 ksdebug(pack, "Aborted. Could not add dependency on "
1201 "symbol %s from module %s.\n", sym->name,
1202 m->name);
1203 return UNEXPECTED;
1206 return OK;
1209 static abort_t finalize_patches(struct ksplice_pack *pack)
1211 struct ksplice_patch *p;
1212 struct safety_record *rec;
1213 abort_t ret;
1215 for (p = pack->patches; p < pack->patches_end; p++) {
1216 struct labelval *lv = find_labelval(pack, p->label);
1217 bool found = false;
1218 if (lv == NULL) {
1219 ksdebug(pack, "Failed to find %s for oldaddr\n",
1220 p->label);
1221 return FAILED_TO_FIND;
1223 p->oldaddr = lv->val;
1225 list_for_each_entry(rec, &pack->safety_records, list) {
1226 if (strcmp(rec->label, p->label) == 0 &&
1227 follow_trampolines(pack, p->oldaddr)
1228 == rec->addr) {
1229 found = true;
1230 break;
1233 if (!found) {
1234 ksdebug(pack, "No safety record for patch %s\n",
1235 p->label);
1236 return NO_MATCH;
1238 if (rec->size < p->size) {
1239 ksdebug(pack, "Symbol %s is too short for trampoline\n",
1240 p->label);
1241 return UNEXPECTED;
1243 /* Make sure the record's label field won't get freed
1244 when the helper module is unloaded */
1245 rec->label = p->label;
1247 if (p->repladdr == 0)
1248 p->repladdr = (unsigned long)ksplice_deleted;
1249 else
1250 rec->first_byte_safe = true;
1252 ret = prepare_trampoline(pack, p);
1253 if (ret != OK)
1254 return ret;
1256 ret = add_dependency_on_address(pack, p->oldaddr);
1257 if (ret != OK)
1258 return ret;
1260 return OK;
1263 static abort_t map_trampoline_pages(struct update *update)
1265 struct ksplice_pack *pack;
1266 list_for_each_entry(pack, &update->packs, list) {
1267 struct ksplice_patch *p;
1268 for (p = pack->patches; p < pack->patches_end; p++) {
1269 p->vaddr = map_writable((void *)p->oldaddr, p->size);
1270 if (p->vaddr == NULL) {
1271 ksdebug(pack, "Unable to map oldaddr read/write"
1272 "\n");
1273 unmap_trampoline_pages(update);
1274 return UNEXPECTED;
1278 return OK;
1281 static void unmap_trampoline_pages(struct update *update)
1283 struct ksplice_pack *pack;
1284 list_for_each_entry(pack, &update->packs, list) {
1285 struct ksplice_patch *p;
1286 for (p = pack->patches; p < pack->patches_end; p++) {
1287 vunmap((void *)((unsigned long)p->vaddr & PAGE_MASK));
1288 p->vaddr = NULL;
1293 /* Based off of linux's text_poke. */
1294 static void *map_writable(void *addr, size_t len)
1296 void *vaddr;
1297 int nr_pages = 2;
1298 struct page *pages[2];
1300 if (!core_kernel_text((unsigned long)addr)) {
1301 pages[0] = vmalloc_to_page(addr);
1302 pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
1303 } else {
1304 #if defined(CONFIG_X86_64) && LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22)
1305 /* e3ebadd95cb621e2c7436f3d3646447ac9d5c16d was after 2.6.21 */
1306 pages[0] = pfn_to_page(__pa_symbol(addr) >> PAGE_SHIFT);
1307 WARN_ON(!PageReserved(pages[0]));
1308 pages[1] = pfn_to_page(__pa_symbol(addr + PAGE_SIZE) >>
1309 PAGE_SHIFT);
1310 #else /* !CONFIG_X86_64 || LINUX_VERSION_CODE >= */
1311 pages[0] = virt_to_page(addr);
1312 WARN_ON(!PageReserved(pages[0]));
1313 pages[1] = virt_to_page(addr + PAGE_SIZE);
1314 #endif /* CONFIG_X86_64 && LINUX_VERSION_CODE */
1316 if (!pages[0])
1317 return NULL;
1318 if (!pages[1])
1319 nr_pages = 1;
1320 vaddr = vmap(pages, nr_pages, VM_MAP, PAGE_KERNEL);
1321 if (vaddr == NULL)
1322 return NULL;
1323 return vaddr + offset_in_page(addr);
1326 static abort_t add_dependency_on_address(struct ksplice_pack *pack,
1327 unsigned long addr)
1329 struct module *m =
1330 __module_text_address(follow_trampolines(pack, addr));
1331 if (m == NULL || m == pack->primary)
1332 return OK;
1333 if (use_module(pack->primary, m) != 1)
1334 return MODULE_BUSY;
1335 return OK;
1338 static abort_t apply_relocs(struct ksplice_pack *pack,
1339 const struct ksplice_reloc *relocs,
1340 const struct ksplice_reloc *relocs_end)
1342 const struct ksplice_reloc *r;
1343 for (r = relocs; r < relocs_end; r++) {
1344 abort_t ret = apply_reloc(pack, r);
1345 if (ret != OK)
1346 return ret;
1348 return OK;
1351 static abort_t apply_reloc(struct ksplice_pack *pack,
1352 const struct ksplice_reloc *r)
1354 abort_t ret;
1355 int canary_ret;
1356 unsigned long sym_addr;
1357 LIST_HEAD(vals);
1359 canary_ret = contains_canary(pack, r->blank_addr, r->size, r->dst_mask);
1360 if (canary_ret < 0)
1361 return UNEXPECTED;
1362 if (canary_ret == 0) {
1363 ksdebug(pack, "reloc: skipped %lx to %s+%lx (altinstr)\n",
1364 r->blank_addr, r->symbol->label, r->addend);
1365 return OK;
1368 #ifdef KSPLICE_STANDALONE
1369 if (!bootstrapped) {
1370 ret = add_system_map_candidates(pack,
1371 pack->primary_system_map,
1372 pack->primary_system_map_end,
1373 r->symbol->label, &vals);
1374 if (ret != OK) {
1375 release_vals(&vals);
1376 return ret;
1379 #endif /* KSPLICE_STANDALONE */
1380 ret = lookup_symbol(pack, r->symbol, &vals);
1381 if (ret != OK) {
1382 release_vals(&vals);
1383 return ret;
1385 if (!singular(&vals)) {
1386 release_vals(&vals);
1387 ksdebug(pack, "Failed to find %s for reloc\n",
1388 r->symbol->label);
1389 return FAILED_TO_FIND;
1391 sym_addr = list_entry(vals.next, struct candidate_val, list)->val;
1392 release_vals(&vals);
1394 ret = write_reloc_value(pack, r, r->blank_addr,
1395 r->pcrel ? sym_addr - r->blank_addr : sym_addr);
1396 if (ret != OK)
1397 return ret;
1399 ksdebug(pack, "reloc: %lx to %s+%lx (S=%lx ", r->blank_addr,
1400 r->symbol->label, r->addend, sym_addr);
1401 switch (r->size) {
1402 case 1:
1403 ksdebug(pack, "aft=%02x)\n", *(uint8_t *)r->blank_addr);
1404 break;
1405 case 2:
1406 ksdebug(pack, "aft=%04x)\n", *(uint16_t *)r->blank_addr);
1407 break;
1408 case 4:
1409 ksdebug(pack, "aft=%08x)\n", *(uint32_t *)r->blank_addr);
1410 break;
1411 #if BITS_PER_LONG >= 64
1412 case 8:
1413 ksdebug(pack, "aft=%016llx)\n", *(uint64_t *)r->blank_addr);
1414 break;
1415 #endif /* BITS_PER_LONG */
1416 default:
1417 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1418 return UNEXPECTED;
1420 #ifdef KSPLICE_STANDALONE
1421 if (!bootstrapped)
1422 return OK;
1423 #endif /* KSPLICE_STANDALONE */
1425 /* Create labelvals so that we can verify our choices in the second
1426 round of run-pre matching that considers data sections. */
1427 ret = create_labelval(pack, r->symbol, sym_addr, VAL);
1428 if (ret != OK)
1429 return ret;
1430 return add_dependency_on_address(pack, sym_addr);
1433 static abort_t read_reloc_value(struct ksplice_pack *pack,
1434 const struct ksplice_reloc *r,
1435 unsigned long addr, unsigned long *valp)
1437 unsigned char bytes[sizeof(long)];
1438 unsigned long val;
1440 if (probe_kernel_read(bytes, (void *)addr, r->size) == -EFAULT)
1441 return NO_MATCH;
1443 switch (r->size) {
1444 case 1:
1445 val = *(uint8_t *)bytes;
1446 break;
1447 case 2:
1448 val = *(uint16_t *)bytes;
1449 break;
1450 case 4:
1451 val = *(uint32_t *)bytes;
1452 break;
1453 #if BITS_PER_LONG >= 64
1454 case 8:
1455 val = *(uint64_t *)bytes;
1456 break;
1457 #endif /* BITS_PER_LONG */
1458 default:
1459 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1460 return UNEXPECTED;
1463 val &= r->dst_mask;
1464 if (r->signed_addend)
1465 val |= -(val & (r->dst_mask & ~(r->dst_mask >> 1)));
1466 val <<= r->rightshift;
1467 val -= r->addend;
1468 *valp = val;
1469 return OK;
1472 static abort_t write_reloc_value(struct ksplice_pack *pack,
1473 const struct ksplice_reloc *r,
1474 unsigned long addr, unsigned long sym_addr)
1476 unsigned long val = sym_addr + r->addend;
1477 val >>= r->rightshift;
1478 switch (r->size) {
1479 case 1:
1480 *(uint8_t *)addr =
1481 (*(uint8_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1482 break;
1483 case 2:
1484 *(uint16_t *)addr =
1485 (*(uint16_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1486 break;
1487 case 4:
1488 *(uint32_t *)addr =
1489 (*(uint32_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1490 break;
1491 #if BITS_PER_LONG >= 64
1492 case 8:
1493 *(uint64_t *)addr =
1494 (*(uint64_t *)addr & ~r->dst_mask) | (val & r->dst_mask);
1495 break;
1496 #endif /* BITS_PER_LONG */
1497 default:
1498 ksdebug(pack, "Aborted. Invalid relocation size.\n");
1499 return UNEXPECTED;
1502 if (read_reloc_value(pack, r, addr, &val) != OK || val != sym_addr) {
1503 ksdebug(pack, "Aborted. Relocation overflow.\n");
1504 return UNEXPECTED;
1507 return OK;
1510 static void __attribute__((noreturn)) ksplice_deleted(void)
1512 printk(KERN_CRIT "Called a kernel function deleted by Ksplice!\n");
1513 BUG();
1514 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
1515 /* 91768d6c2bad0d2766a166f13f2f57e197de3458 was after 2.6.19 */
1516 for (;;);
1517 #endif
1520 static abort_t match_pack_sections(struct ksplice_pack *pack,
1521 bool consider_data_sections)
1523 const struct ksplice_section *sect;
1524 abort_t ret;
1525 char *finished;
1526 int i, remaining = 0;
1527 bool progress;
1529 finished = kcalloc(pack->helper_sections_end - pack->helper_sections,
1530 sizeof(*finished), GFP_KERNEL);
1531 if (finished == NULL)
1532 return OUT_OF_MEMORY;
1533 for (sect = pack->helper_sections; sect < pack->helper_sections_end;
1534 sect++) {
1535 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1536 remaining++;
1539 while (remaining > 0) {
1540 progress = false;
1541 for (sect = pack->helper_sections;
1542 sect < pack->helper_sections_end; sect++) {
1543 i = sect - pack->helper_sections;
1544 if (finished[i])
1545 continue;
1546 if (!consider_data_sections &&
1547 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1548 continue;
1549 ret = find_section(pack, sect);
1550 if (ret == OK) {
1551 finished[i] = 1;
1552 if ((sect->flags & KSPLICE_SECTION_DATA) == 0)
1553 remaining--;
1554 progress = true;
1555 } else if (ret != NO_MATCH) {
1556 kfree(finished);
1557 return ret;
1561 if (progress)
1562 continue;
1564 for (sect = pack->helper_sections;
1565 sect < pack->helper_sections_end; sect++) {
1566 i = sect - pack->helper_sections;
1567 if (finished[i] != 0)
1568 continue;
1569 ksdebug(pack, "run-pre: could not match %s "
1570 "section %s\n",
1571 (sect->flags & KSPLICE_SECTION_DATA) != 0 ?
1572 "data" :
1573 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ?
1574 "rodata" : "text", sect->symbol->label);
1576 ksdebug(pack, "Aborted. run-pre: could not match some "
1577 "sections.\n");
1578 kfree(finished);
1579 return NO_MATCH;
1581 kfree(finished);
1582 return OK;
1585 static abort_t find_section(struct ksplice_pack *pack,
1586 const struct ksplice_section *sect)
1588 int i;
1589 abort_t ret;
1590 unsigned long run_addr;
1591 LIST_HEAD(vals);
1592 struct candidate_val *v, *n;
1594 #ifdef KSPLICE_STANDALONE
1595 ret = add_system_map_candidates(pack, pack->helper_system_map,
1596 pack->helper_system_map_end,
1597 sect->symbol->label, &vals);
1598 if (ret != OK) {
1599 release_vals(&vals);
1600 return ret;
1602 #endif /* KSPLICE_STANDALONE */
1603 ret = lookup_symbol(pack, sect->symbol, &vals);
1604 if (ret != OK) {
1605 release_vals(&vals);
1606 return ret;
1609 ksdebug(pack, "run-pre: starting sect search for %s\n",
1610 sect->symbol->label);
1612 list_for_each_entry_safe(v, n, &vals, list) {
1613 run_addr = v->val;
1615 yield();
1616 ret = try_addr(pack, sect, run_addr, NULL, RUN_PRE_INITIAL);
1617 if (ret == NO_MATCH) {
1618 list_del(&v->list);
1619 kfree(v);
1620 } else if (ret != OK) {
1621 release_vals(&vals);
1622 return ret;
1626 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1627 if (list_empty(&vals) && (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1628 ret = brute_search_all(pack, sect, &vals);
1629 if (ret != OK) {
1630 release_vals(&vals);
1631 return ret;
1633 /* Make sure run-pre matching output is displayed if
1634 brute_search succeeds */
1635 if (singular(&vals)) {
1636 run_addr = list_entry(vals.next, struct candidate_val,
1637 list)->val;
1638 ret = try_addr(pack, sect, run_addr, NULL,
1639 RUN_PRE_INITIAL);
1640 if (ret != OK) {
1641 ksdebug(pack, "run-pre: Debug run failed for "
1642 "sect %s:\n", sect->symbol->label);
1643 release_vals(&vals);
1644 return ret;
1648 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1650 if (singular(&vals)) {
1651 LIST_HEAD(safety_records);
1652 run_addr = list_entry(vals.next, struct candidate_val,
1653 list)->val;
1654 ret = try_addr(pack, sect, run_addr, &safety_records,
1655 RUN_PRE_FINAL);
1656 release_vals(&vals);
1657 if (ret != OK) {
1658 clear_list(&safety_records, struct safety_record, list);
1659 ksdebug(pack, "run-pre: Final run failed for sect "
1660 "%s:\n", sect->symbol->label);
1661 } else {
1662 list_splice(&safety_records, &pack->safety_records);
1664 return ret;
1665 } else if (!list_empty(&vals)) {
1666 struct candidate_val *val;
1667 ksdebug(pack, "run-pre: multiple candidates for sect %s:\n",
1668 sect->symbol->label);
1669 i = 0;
1670 list_for_each_entry(val, &vals, list) {
1671 i++;
1672 ksdebug(pack, "%lx\n", val->val);
1673 if (i > 5) {
1674 ksdebug(pack, "...\n");
1675 break;
1678 release_vals(&vals);
1679 return NO_MATCH;
1681 release_vals(&vals);
1682 return NO_MATCH;
1685 static abort_t try_addr(struct ksplice_pack *pack,
1686 const struct ksplice_section *sect,
1687 unsigned long run_addr,
1688 struct list_head *safety_records,
1689 enum run_pre_mode mode)
1691 abort_t ret;
1692 const struct module *run_module;
1694 if ((sect->flags & KSPLICE_SECTION_RODATA) != 0 ||
1695 (sect->flags & KSPLICE_SECTION_DATA) != 0)
1696 run_module = __module_data_address(run_addr);
1697 else
1698 run_module = __module_text_address(run_addr);
1699 if (run_module == pack->primary) {
1700 ksdebug(pack, "run-pre: unexpected address %lx in primary "
1701 "module %s for sect %s\n", run_addr, run_module->name,
1702 sect->symbol->label);
1703 return UNEXPECTED;
1705 if (!patches_module(run_module, pack->target)) {
1706 ksdebug(pack, "run-pre: ignoring address %lx in other module "
1707 "%s for sect %s\n", run_addr, run_module == NULL ?
1708 "vmlinux" : run_module->name, sect->symbol->label);
1709 return NO_MATCH;
1712 ret = create_labelval(pack, sect->symbol, run_addr, TEMP);
1713 if (ret != OK)
1714 return ret;
1716 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1717 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1718 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1719 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1720 ret = arch_run_pre_cmp(pack, sect, run_addr, safety_records,
1721 mode);
1722 else
1723 ret = run_pre_cmp(pack, sect, run_addr, safety_records, mode);
1724 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1725 if (ret == NO_MATCH && mode != RUN_PRE_FINAL) {
1726 set_temp_labelvals(pack, NOVAL);
1727 ksdebug(pack, "run-pre: %s sect %s does not match (r_a=%lx "
1728 "p_a=%lx s=%lx)\n",
1729 (sect->flags & KSPLICE_SECTION_RODATA) != 0 ? "data" :
1730 "text", sect->symbol->label, run_addr, sect->address,
1731 sect->size);
1732 ksdebug(pack, "run-pre: ");
1733 if (pack->update->debug >= 1) {
1734 #ifdef CONFIG_FUNCTION_DATA_SECTIONS
1735 ret = run_pre_cmp(pack, sect, run_addr, safety_records,
1736 RUN_PRE_DEBUG);
1737 #else /* !CONFIG_FUNCTION_DATA_SECTIONS */
1738 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0)
1739 ret = arch_run_pre_cmp(pack, sect, run_addr,
1740 safety_records,
1741 RUN_PRE_DEBUG);
1742 else
1743 ret = run_pre_cmp(pack, sect, run_addr,
1744 safety_records,
1745 RUN_PRE_DEBUG);
1746 #endif /* CONFIG_FUNCTION_DATA_SECTIONS */
1747 set_temp_labelvals(pack, NOVAL);
1749 ksdebug(pack, "\n");
1750 return ret;
1751 } else if (ret != OK) {
1752 set_temp_labelvals(pack, NOVAL);
1753 return ret;
1756 if (mode != RUN_PRE_FINAL) {
1757 set_temp_labelvals(pack, NOVAL);
1758 ksdebug(pack, "run-pre: candidate for sect %s=%lx\n",
1759 sect->symbol->label, run_addr);
1760 return OK;
1763 set_temp_labelvals(pack, VAL);
1764 ksdebug(pack, "run-pre: found sect %s=%lx\n", sect->symbol->label,
1765 run_addr);
1766 return OK;
1769 static abort_t run_pre_cmp(struct ksplice_pack *pack,
1770 const struct ksplice_section *sect,
1771 unsigned long run_addr,
1772 struct list_head *safety_records,
1773 enum run_pre_mode mode)
1775 int matched = 0;
1776 abort_t ret;
1777 const struct ksplice_reloc *r, *finger;
1778 const unsigned char *pre, *run, *pre_start, *run_start;
1779 unsigned char runval;
1781 pre_start = (const unsigned char *)sect->address;
1782 run_start = (const unsigned char *)run_addr;
1784 finger = init_reloc_search(pack, sect);
1786 pre = pre_start;
1787 run = run_start;
1788 while (pre < pre_start + sect->size) {
1789 unsigned long offset = pre - pre_start;
1790 ret = lookup_reloc(pack, &finger, (unsigned long)pre, &r);
1791 if (ret == OK) {
1792 ret = handle_reloc(pack, r, (unsigned long)run, mode);
1793 if (ret != OK) {
1794 if (mode == RUN_PRE_INITIAL)
1795 ksdebug(pack, "reloc in sect does not "
1796 "match after %lx/%lx bytes\n",
1797 offset, sect->size);
1798 return ret;
1800 if (mode == RUN_PRE_DEBUG)
1801 print_bytes(pack, run, r->size, pre, r->size);
1802 pre += r->size;
1803 run += r->size;
1804 continue;
1805 } else if (ret != NO_MATCH) {
1806 return ret;
1809 if ((sect->flags & KSPLICE_SECTION_TEXT) != 0) {
1810 ret = handle_paravirt(pack, (unsigned long)pre,
1811 (unsigned long)run, &matched);
1812 if (ret != OK)
1813 return ret;
1814 if (matched != 0) {
1815 if (mode == RUN_PRE_DEBUG)
1816 print_bytes(pack, run, matched, pre,
1817 matched);
1818 pre += matched;
1819 run += matched;
1820 continue;
1824 if (probe_kernel_read(&runval, (void *)run, 1) == -EFAULT) {
1825 if (mode == RUN_PRE_INITIAL)
1826 ksdebug(pack, "sect unmapped after %lx/%lx "
1827 "bytes\n", offset, sect->size);
1828 return NO_MATCH;
1831 if (runval != *pre &&
1832 (sect->flags & KSPLICE_SECTION_DATA) == 0) {
1833 if (mode == RUN_PRE_INITIAL)
1834 ksdebug(pack, "sect does not match after "
1835 "%lx/%lx bytes\n", offset, sect->size);
1836 if (mode == RUN_PRE_DEBUG) {
1837 print_bytes(pack, run, 1, pre, 1);
1838 ksdebug(pack, "[p_o=%lx] ! ", offset);
1839 print_bytes(pack, run + 1, 2, pre + 1, 2);
1841 return NO_MATCH;
1843 if (mode == RUN_PRE_DEBUG)
1844 print_bytes(pack, run, 1, pre, 1);
1845 pre++;
1846 run++;
1848 return create_safety_record(pack, sect, safety_records, run_addr,
1849 run - run_start);
1852 static void print_bytes(struct ksplice_pack *pack,
1853 const unsigned char *run, int runc,
1854 const unsigned char *pre, int prec)
1856 int o;
1857 int matched = min(runc, prec);
1858 for (o = 0; o < matched; o++) {
1859 if (run[o] == pre[o])
1860 ksdebug(pack, "%02x ", run[o]);
1861 else
1862 ksdebug(pack, "%02x/%02x ", run[o], pre[o]);
1864 for (o = matched; o < runc; o++)
1865 ksdebug(pack, "%02x/ ", run[o]);
1866 for (o = matched; o < prec; o++)
1867 ksdebug(pack, "/%02x ", pre[o]);
1870 #if defined(KSPLICE_STANDALONE) && !defined(CONFIG_KALLSYMS)
1871 static abort_t brute_search(struct ksplice_pack *pack,
1872 const struct ksplice_section *sect,
1873 const void *start, unsigned long len,
1874 struct list_head *vals)
1876 unsigned long addr;
1877 char run, pre;
1878 abort_t ret;
1880 for (addr = (unsigned long)start; addr < (unsigned long)start + len;
1881 addr++) {
1882 if (addr % 100000 == 0)
1883 yield();
1885 if (probe_kernel_read(&run, (void *)addr, 1) == -EFAULT)
1886 return OK;
1888 pre = *(const unsigned char *)(sect->address);
1890 if (run != pre)
1891 continue;
1893 ret = try_addr(pack, sect, addr, NULL, RUN_PRE_INITIAL);
1894 if (ret == OK) {
1895 ret = add_candidate_val(pack, vals, addr);
1896 if (ret != OK)
1897 return ret;
1898 } else if (ret != NO_MATCH) {
1899 return ret;
1903 return OK;
1906 static abort_t brute_search_all(struct ksplice_pack *pack,
1907 const struct ksplice_section *sect,
1908 struct list_head *vals)
1910 struct module *m;
1911 abort_t ret = OK;
1912 int saved_debug;
1914 ksdebug(pack, "brute_search: searching for %s\n", sect->symbol->label);
1915 saved_debug = pack->update->debug;
1916 pack->update->debug = 0;
1918 list_for_each_entry(m, &modules, list) {
1919 if (!patches_module(m, pack->target) || m == pack->primary)
1920 continue;
1921 ret = brute_search(pack, sect, m->module_core, m->core_size,
1922 vals);
1923 if (ret != OK)
1924 goto out;
1925 ret = brute_search(pack, sect, m->module_init, m->init_size,
1926 vals);
1927 if (ret != OK)
1928 goto out;
1931 ret = brute_search(pack, sect, (const void *)init_mm.start_code,
1932 init_mm.end_code - init_mm.start_code, vals);
1934 out:
1935 pack->update->debug = saved_debug;
1936 return ret;
1938 #endif /* KSPLICE_STANDALONE && !CONFIG_KALLSYMS */
1940 static int reloc_bsearch_compare(const void *key, const void *elt)
1942 const struct ksplice_section *sect = key;
1943 const struct ksplice_reloc *r = elt;
1944 if (sect->address + sect->size < r->blank_addr)
1945 return -1;
1946 if (sect->address > r->blank_addr)
1947 return 1;
1948 return 0;
1951 static const struct ksplice_reloc *
1952 init_reloc_search(struct ksplice_pack *pack, const struct ksplice_section *sect)
1954 const struct ksplice_reloc *r;
1955 r = bsearch((void *)sect, pack->helper_relocs, pack->helper_relocs_end -
1956 pack->helper_relocs, sizeof(*r), reloc_bsearch_compare);
1957 if (r != NULL) {
1958 while (r > pack->helper_relocs &&
1959 (r - 1)->blank_addr >= sect->address)
1960 r--;
1961 return r;
1963 return pack->helper_relocs_end;
1966 static abort_t lookup_reloc(struct ksplice_pack *pack,
1967 const struct ksplice_reloc **fingerp,
1968 unsigned long addr,
1969 const struct ksplice_reloc **relocp)
1971 const struct ksplice_reloc *r = *fingerp;
1972 int canary_ret;
1974 while (r < pack->helper_relocs_end && addr >= r->blank_addr + r->size)
1975 r++;
1976 *fingerp = r;
1977 if (r == pack->helper_relocs_end)
1978 return NO_MATCH;
1979 if (addr < r->blank_addr)
1980 return NO_MATCH;
1982 canary_ret = contains_canary(pack, r->blank_addr, r->size, r->dst_mask);
1983 if (canary_ret < 0)
1984 return UNEXPECTED;
1985 if (canary_ret == 0) {
1986 ksdebug(pack, "run-pre: reloc skipped at p_a=%lx to %s+%lx "
1987 "(altinstr)\n", r->blank_addr, r->symbol->label,
1988 r->addend);
1989 return NO_MATCH;
1991 if (addr != r->blank_addr) {
1992 ksdebug(pack, "Invalid nonzero relocation offset\n");
1993 return UNEXPECTED;
1995 *relocp = r;
1996 return OK;
1999 static abort_t handle_reloc(struct ksplice_pack *pack,
2000 const struct ksplice_reloc *r,
2001 unsigned long run_addr, enum run_pre_mode mode)
2003 unsigned long val;
2004 abort_t ret;
2006 ret = read_reloc_value(pack, r, run_addr, &val);
2007 if (ret != OK)
2008 return ret;
2009 if (r->pcrel)
2010 val += run_addr;
2012 if (mode == RUN_PRE_INITIAL)
2013 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx to %s+%lx: "
2014 "found %s = %lx\n", run_addr, r->blank_addr,
2015 r->symbol->label, r->addend, r->symbol->label, val);
2017 if (contains_canary(pack, run_addr, r->size, r->dst_mask) != 0) {
2018 ksdebug(pack, "Aborted. Unexpected canary in run code at %lx"
2019 "\n", run_addr);
2020 return UNEXPECTED;
2023 ret = create_labelval(pack, r->symbol, val, TEMP);
2024 if (ret == NO_MATCH && mode == RUN_PRE_INITIAL) {
2025 struct labelval *lv = r->symbol->lv;
2026 ksdebug(pack, "run-pre: reloc at r_a=%lx p_a=%lx: labelval %s "
2027 "= %lx does not match expected %lx\n", run_addr,
2028 r->blank_addr, r->symbol->label, lv->val, val);
2030 return ret;
2033 static abort_t lookup_symbol(struct ksplice_pack *pack,
2034 const struct ksplice_symbol *ksym,
2035 struct list_head *vals)
2037 abort_t ret;
2038 struct labelval *lv = ksym->lv;
2040 #ifdef KSPLICE_STANDALONE
2041 if (!bootstrapped)
2042 return OK;
2043 #endif /* KSPLICE_STANDALONE */
2045 if (lv != NULL) {
2046 release_vals(vals);
2047 ksdebug(pack, "using detected sym %s=%lx\n", ksym->label,
2048 lv->val);
2049 return add_candidate_val(pack, vals, lv->val);
2052 #ifdef CONFIG_MODULE_UNLOAD
2053 if (strcmp(ksym->label, "cleanup_module") == 0 && pack->target != NULL
2054 && pack->target->exit != NULL) {
2055 ret = add_candidate_val(pack, vals,
2056 (unsigned long)pack->target->exit);
2057 if (ret != OK)
2058 return ret;
2060 #endif
2062 if (ksym->name != NULL) {
2063 struct candidate_val *val;
2064 list_for_each_entry(val, ksym->vals, list) {
2065 ret = add_candidate_val(pack, vals, val->val);
2066 if (ret != OK)
2067 return ret;
2070 ret = new_export_lookup(pack, pack->update, ksym->name, vals);
2071 if (ret != OK)
2072 return ret;
2075 return OK;
2078 #ifdef KSPLICE_STANDALONE
2079 static abort_t
2080 add_system_map_candidates(struct ksplice_pack *pack,
2081 const struct ksplice_system_map *start,
2082 const struct ksplice_system_map *end,
2083 const char *label, struct list_head *vals)
2085 abort_t ret;
2086 long off;
2087 int i;
2088 const struct ksplice_system_map *smap;
2090 /* Some Fedora kernel releases have System.map files whose symbol
2091 * addresses disagree with the running kernel by a constant address
2092 * offset because of the CONFIG_PHYSICAL_START and CONFIG_PHYSICAL_ALIGN
2093 * values used to compile these kernels. This constant address offset
2094 * is always a multiple of 0x100000.
2096 * If we observe an offset that is NOT a multiple of 0x100000, then the
2097 * user provided us with an incorrect System.map file, and we should
2098 * abort.
2099 * If we observe an offset that is a multiple of 0x100000, then we can
2100 * adjust the System.map address values accordingly and proceed.
2102 off = (unsigned long)printk - pack->map_printk;
2103 if (off & 0xfffff) {
2104 ksdebug(pack, "Aborted. System.map does not match kernel.\n");
2105 return BAD_SYSTEM_MAP;
2108 smap = bsearch(label, start, end - start, sizeof(*smap),
2109 system_map_bsearch_compare);
2110 if (smap == NULL)
2111 return OK;
2113 for (i = 0; i < smap->nr_candidates; i++) {
2114 ret = add_candidate_val(pack, vals, smap->candidates[i] + off);
2115 if (ret != OK)
2116 return ret;
2118 return OK;
2121 static int system_map_bsearch_compare(const void *key, const void *elt)
2123 const struct ksplice_system_map *map = elt;
2124 const char *label = key;
2125 return strcmp(label, map->label);
2127 #endif /* !KSPLICE_STANDALONE */
2129 static abort_t new_export_lookup(struct ksplice_pack *p, struct update *update,
2130 const char *name, struct list_head *vals)
2132 struct ksplice_pack *pack;
2133 struct ksplice_export *exp;
2134 list_for_each_entry(pack, &update->packs, list) {
2135 for (exp = pack->exports; exp < pack->exports_end; exp++) {
2136 if (strcmp(exp->new_name, name) == 0 &&
2137 exp->sym != NULL &&
2138 contains_canary(pack,
2139 (unsigned long)&exp->sym->value,
2140 sizeof(unsigned long), -1) == 0)
2141 return add_candidate_val(p, vals,
2142 exp->sym->value);
2145 return OK;
2148 static abort_t apply_patches(struct update *update)
2150 int i;
2151 abort_t ret;
2152 struct ksplice_pack *pack;
2153 const struct ksplice_section *sect;
2155 list_for_each_entry(pack, &update->packs, list) {
2156 for (sect = pack->primary_sections;
2157 sect < pack->primary_sections_end; sect++) {
2158 struct safety_record *rec = kmalloc(sizeof(*rec),
2159 GFP_KERNEL);
2160 if (rec == NULL)
2161 return OUT_OF_MEMORY;
2162 rec->addr = sect->address;
2163 rec->size = sect->size;
2164 rec->label = sect->symbol->label;
2165 rec->first_byte_safe = false;
2166 list_add(&rec->list, &pack->safety_records);
2170 ret = map_trampoline_pages(update);
2171 if (ret != OK)
2172 return ret;
2173 for (i = 0; i < 5; i++) {
2174 cleanup_conflicts(update);
2175 #ifdef KSPLICE_STANDALONE
2176 bust_spinlocks(1);
2177 #endif /* KSPLICE_STANDALONE */
2178 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2179 ret = (__force abort_t)stop_machine(__apply_patches, update,
2180 NULL);
2181 #else /* LINUX_VERSION_CODE < */
2182 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2183 ret = (__force abort_t)stop_machine_run(__apply_patches, update,
2184 NR_CPUS);
2185 #endif /* LINUX_VERSION_CODE */
2186 #ifdef KSPLICE_STANDALONE
2187 bust_spinlocks(0);
2188 #endif /* KSPLICE_STANDALONE */
2189 if (ret != CODE_BUSY)
2190 break;
2191 set_current_state(TASK_INTERRUPTIBLE);
2192 schedule_timeout(msecs_to_jiffies(1000));
2194 unmap_trampoline_pages(update);
2196 if (ret == CODE_BUSY) {
2197 print_conflicts(update);
2198 _ksdebug(update, "Aborted %s. stack check: to-be-replaced "
2199 "code is busy.\n", update->kid);
2200 } else if (ret == ALREADY_REVERSED) {
2201 _ksdebug(update, "Aborted %s. Ksplice update %s is already "
2202 "reversed.\n", update->kid, update->kid);
2205 if (ret != OK)
2206 return ret;
2208 _ksdebug(update, "Atomic patch insertion for %s complete\n",
2209 update->kid);
2210 return OK;
2213 static abort_t reverse_patches(struct update *update)
2215 int i;
2216 abort_t ret;
2217 struct ksplice_pack *pack;
2219 clear_debug_buf(update);
2220 ret = init_debug_buf(update);
2221 if (ret != OK)
2222 return ret;
2224 _ksdebug(update, "Preparing to reverse %s\n", update->kid);
2226 ret = map_trampoline_pages(update);
2227 if (ret != OK)
2228 return ret;
2229 for (i = 0; i < 5; i++) {
2230 cleanup_conflicts(update);
2231 clear_list(&update->conflicts, struct conflict, list);
2232 #ifdef KSPLICE_STANDALONE
2233 bust_spinlocks(1);
2234 #endif /* KSPLICE_STANDALONE */
2235 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2236 ret = (__force abort_t)stop_machine(__reverse_patches, update,
2237 NULL);
2238 #else /* LINUX_VERSION_CODE < */
2239 /* 9b1a4d38373a5581a4e01032a3ccdd94cd93477b was after 2.6.26 */
2240 ret = (__force abort_t)stop_machine_run(__reverse_patches,
2241 update, NR_CPUS);
2242 #endif /* LINUX_VERSION_CODE */
2243 #ifdef KSPLICE_STANDALONE
2244 bust_spinlocks(0);
2245 #endif /* KSPLICE_STANDALONE */
2246 if (ret != CODE_BUSY)
2247 break;
2248 set_current_state(TASK_INTERRUPTIBLE);
2249 schedule_timeout(msecs_to_jiffies(1000));
2251 unmap_trampoline_pages(update);
2253 if (ret == CODE_BUSY) {
2254 print_conflicts(update);
2255 _ksdebug(update, "Aborted %s. stack check: to-be-reversed "
2256 "code is busy.\n", update->kid);
2257 } else if (ret == MODULE_BUSY) {
2258 _ksdebug(update, "Update %s is in use by another module\n",
2259 update->kid);
2262 if (ret != OK)
2263 return ret;
2265 list_for_each_entry(pack, &update->packs, list)
2266 clear_list(&pack->safety_records, struct safety_record, list);
2268 _ksdebug(update, "Atomic patch removal for %s complete\n", update->kid);
2269 return OK;
2272 static int __apply_patches(void *updateptr)
2274 struct update *update = updateptr;
2275 struct ksplice_pack *pack;
2276 struct ksplice_patch *p;
2277 struct ksplice_export *exp;
2278 abort_t ret;
2280 if (update->stage == STAGE_APPLIED)
2281 return (__force int)OK;
2283 if (update->stage != STAGE_PREPARING)
2284 return (__force int)UNEXPECTED;
2286 ret = check_each_task(update);
2287 if (ret != OK)
2288 return (__force int)ret;
2290 list_for_each_entry(pack, &update->packs, list) {
2291 if (try_module_get(pack->primary) != 1) {
2292 struct ksplice_pack *pack1;
2293 list_for_each_entry(pack1, &update->packs, list) {
2294 if (pack1 == pack)
2295 break;
2296 module_put(pack1->primary);
2298 return (__force int)UNEXPECTED;
2302 update->stage = STAGE_APPLIED;
2303 #ifdef TAINT_KSPLICE
2304 add_taint(TAINT_KSPLICE);
2305 #endif
2307 list_for_each_entry(pack, &update->packs, list)
2308 list_add(&pack->module_list_entry.list, &ksplice_module_list);
2310 list_for_each_entry(pack, &update->packs, list) {
2311 for (exp = pack->exports; exp < pack->exports_end; exp++)
2312 exp->sym->name = exp->new_name;
2315 list_for_each_entry(pack, &update->packs, list) {
2316 for (p = pack->patches; p < pack->patches_end; p++)
2317 insert_trampoline(p);
2319 return (__force int)OK;
2322 static int __reverse_patches(void *updateptr)
2324 struct update *update = updateptr;
2325 struct ksplice_pack *pack;
2326 const struct ksplice_patch *p;
2327 struct ksplice_export *exp;
2328 abort_t ret;
2330 if (update->stage != STAGE_APPLIED)
2331 return (__force int)OK;
2333 #ifdef CONFIG_MODULE_UNLOAD
2334 list_for_each_entry(pack, &update->packs, list) {
2335 if (module_refcount(pack->primary) != 1)
2336 return (__force int)MODULE_BUSY;
2338 #endif /* CONFIG_MODULE_UNLOAD */
2340 ret = check_each_task(update);
2341 if (ret != OK)
2342 return (__force int)ret;
2344 list_for_each_entry(pack, &update->packs, list) {
2345 for (p = pack->patches; p < pack->patches_end; p++) {
2346 ret = verify_trampoline(pack, p);
2347 if (ret != OK)
2348 return (__force int)ret;
2352 update->stage = STAGE_REVERSED;
2354 list_for_each_entry(pack, &update->packs, list)
2355 module_put(pack->primary);
2357 list_for_each_entry(pack, &update->packs, list)
2358 list_del(&pack->module_list_entry.list);
2360 list_for_each_entry(pack, &update->packs, list) {
2361 for (exp = pack->exports; exp < pack->exports_end; exp++)
2362 exp->sym->name = exp->saved_name;
2365 list_for_each_entry(pack, &update->packs, list) {
2366 for (p = pack->patches; p < pack->patches_end; p++)
2367 remove_trampoline(p);
2369 return (__force int)OK;
2372 static abort_t check_each_task(struct update *update)
2374 const struct task_struct *g, *p;
2375 abort_t status = OK, ret;
2376 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2377 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2378 read_lock(&tasklist_lock);
2379 #endif /* LINUX_VERSION_CODE */
2380 do_each_thread(g, p) {
2381 /* do_each_thread is a double loop! */
2382 ret = check_task(update, p, false);
2383 if (ret != OK) {
2384 check_task(update, p, true);
2385 status = ret;
2387 if (ret != OK && ret != CODE_BUSY)
2388 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2389 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2390 goto out;
2391 #else /* LINUX_VERSION_CODE < */
2392 return ret;
2393 #endif /* LINUX_VERSION_CODE */
2394 } while_each_thread(g, p);
2395 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11)
2396 /* 5d4564e68210e4b1edb3f013bc3e59982bb35737 was after 2.6.10 */
2397 out:
2398 read_unlock(&tasklist_lock);
2399 #endif /* LINUX_VERSION_CODE */
2400 return status;
2403 static abort_t check_task(struct update *update,
2404 const struct task_struct *t, bool rerun)
2406 abort_t status, ret;
2407 struct conflict *conf = NULL;
2409 if (rerun) {
2410 conf = kmalloc(sizeof(*conf), GFP_ATOMIC);
2411 if (conf == NULL)
2412 return OUT_OF_MEMORY;
2413 conf->process_name = kstrdup(t->comm, GFP_ATOMIC);
2414 if (conf->process_name == NULL) {
2415 kfree(conf);
2416 return OUT_OF_MEMORY;
2418 conf->pid = t->pid;
2419 INIT_LIST_HEAD(&conf->stack);
2420 list_add(&conf->list, &update->conflicts);
2423 status = check_address(update, conf, KSPLICE_IP(t));
2424 if (t == current) {
2425 ret = check_stack(update, conf, task_thread_info(t),
2426 (unsigned long *)__builtin_frame_address(0));
2427 if (status == OK)
2428 status = ret;
2429 } else if (!task_curr(t)) {
2430 ret = check_stack(update, conf, task_thread_info(t),
2431 (unsigned long *)KSPLICE_SP(t));
2432 if (status == OK)
2433 status = ret;
2434 } else if (!is_stop_machine(t)) {
2435 status = UNEXPECTED_RUNNING_TASK;
2437 return status;
2440 static abort_t check_stack(struct update *update, struct conflict *conf,
2441 const struct thread_info *tinfo,
2442 const unsigned long *stack)
2444 abort_t status = OK, ret;
2445 unsigned long addr;
2447 while (valid_stack_ptr(tinfo, stack)) {
2448 addr = *stack++;
2449 ret = check_address(update, conf, addr);
2450 if (ret != OK)
2451 status = ret;
2453 return status;
2456 static abort_t check_address(struct update *update,
2457 struct conflict *conf, unsigned long addr)
2459 abort_t status = OK, ret;
2460 const struct safety_record *rec;
2461 struct ksplice_pack *pack;
2462 struct conflict_addr *ca = NULL;
2464 if (conf != NULL) {
2465 ca = kmalloc(sizeof(*ca), GFP_ATOMIC);
2466 if (ca == NULL)
2467 return OUT_OF_MEMORY;
2468 ca->addr = addr;
2469 ca->has_conflict = false;
2470 ca->label = NULL;
2471 list_add(&ca->list, &conf->stack);
2474 list_for_each_entry(pack, &update->packs, list) {
2475 list_for_each_entry(rec, &pack->safety_records, list) {
2476 ret = check_record(ca, rec, addr);
2477 if (ret != OK)
2478 status = ret;
2481 return status;
2484 static abort_t check_record(struct conflict_addr *ca,
2485 const struct safety_record *rec, unsigned long addr)
2487 if ((addr > rec->addr && addr < rec->addr + rec->size) ||
2488 (addr == rec->addr && !rec->first_byte_safe)) {
2489 if (ca != NULL) {
2490 ca->label = rec->label;
2491 ca->has_conflict = true;
2493 return CODE_BUSY;
2495 return OK;
2498 static bool is_stop_machine(const struct task_struct *t)
2500 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27)
2501 const char *num;
2502 if (!starts_with(t->comm, "kstop"))
2503 return false;
2504 num = t->comm + strlen("kstop");
2505 return num[strspn(num, "0123456789")] == '\0';
2506 #else /* LINUX_VERSION_CODE < */
2507 return strcmp(t->comm, "kstopmachine") == 0;
2508 #endif /* LINUX_VERSION_CODE */
2511 static void cleanup_conflicts(struct update *update)
2513 struct conflict *conf;
2514 list_for_each_entry(conf, &update->conflicts, list) {
2515 clear_list(&conf->stack, struct conflict_addr, list);
2516 kfree(conf->process_name);
2518 clear_list(&update->conflicts, struct conflict, list);
2521 static void print_conflicts(struct update *update)
2523 const struct conflict *conf;
2524 const struct conflict_addr *ca;
2525 list_for_each_entry(conf, &update->conflicts, list) {
2526 _ksdebug(update, "stack check: pid %d (%s):", conf->pid,
2527 conf->process_name);
2528 list_for_each_entry(ca, &conf->stack, list) {
2529 _ksdebug(update, " %lx", ca->addr);
2530 if (ca->has_conflict)
2531 _ksdebug(update, " [<-CONFLICT]");
2533 _ksdebug(update, "\n");
2537 static void insert_trampoline(struct ksplice_patch *p)
2539 mm_segment_t old_fs = get_fs();
2540 set_fs(KERNEL_DS);
2541 memcpy((void *)p->saved, p->vaddr, p->size);
2542 memcpy(p->vaddr, (void *)p->trampoline, p->size);
2543 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2544 set_fs(old_fs);
2547 static abort_t verify_trampoline(struct ksplice_pack *pack,
2548 const struct ksplice_patch *p)
2550 if (memcmp(p->vaddr, (void *)p->trampoline, p->size) != 0) {
2551 ksdebug(pack, "Aborted. Trampoline at %lx has been "
2552 "overwritten.\n", p->oldaddr);
2553 return CODE_BUSY;
2555 return OK;
2558 static void remove_trampoline(const struct ksplice_patch *p)
2560 mm_segment_t old_fs = get_fs();
2561 set_fs(KERNEL_DS);
2562 memcpy(p->vaddr, (void *)p->saved, p->size);
2563 flush_icache_range(p->oldaddr, p->oldaddr + p->size);
2564 set_fs(old_fs);
2567 static struct labelval *find_labelval(struct ksplice_pack *pack,
2568 const char *label)
2570 struct labelval *lv;
2571 list_for_each_entry(lv, &pack->labelvals, list) {
2572 if (strcmp(lv->label, label) == 0)
2573 return lv;
2575 return NULL;
2578 static abort_t create_labelval(struct ksplice_pack *pack,
2579 struct ksplice_symbol *ksym,
2580 unsigned long val, int status)
2582 struct labelval *lv = ksym->lv;
2583 val = follow_trampolines(pack, val);
2584 if (lv != NULL)
2585 return lv->val == val ? OK : NO_MATCH;
2587 lv = kmalloc(sizeof(*lv), GFP_KERNEL);
2588 if (lv == NULL)
2589 return OUT_OF_MEMORY;
2590 lv->label = ksym->label;
2591 lv->val = val;
2592 lv->symbol = ksym;
2593 if (status == VAL)
2594 list_add(&lv->list, &pack->labelvals);
2595 else
2596 list_add(&lv->list, &pack->temp_labelvals);
2597 ksym->lv = lv;
2598 return OK;
2601 static abort_t create_safety_record(struct ksplice_pack *pack,
2602 const struct ksplice_section *sect,
2603 struct list_head *record_list,
2604 unsigned long run_addr,
2605 unsigned long run_size)
2607 struct safety_record *rec;
2608 struct ksplice_patch *p;
2610 if (record_list == NULL)
2611 return OK;
2613 for (p = pack->patches; p < pack->patches_end; p++) {
2614 if (strcmp(sect->symbol->label, p->label) == 0)
2615 break;
2617 if (p >= pack->patches_end)
2618 return OK;
2620 if ((sect->flags & KSPLICE_SECTION_TEXT) == 0 && p->repladdr != 0) {
2621 ksdebug(pack, "Error: ksplice_patch %s is matched to a "
2622 "non-deleted non-text section!\n", sect->symbol->label);
2623 return UNEXPECTED;
2626 rec = kmalloc(sizeof(*rec), GFP_KERNEL);
2627 if (rec == NULL)
2628 return OUT_OF_MEMORY;
2629 rec->addr = run_addr;
2630 rec->size = run_size;
2631 rec->label = sect->symbol->label;
2632 rec->first_byte_safe = false;
2634 list_add(&rec->list, record_list);
2635 return OK;
2638 static abort_t add_candidate_val(struct ksplice_pack *pack,
2639 struct list_head *vals, unsigned long val)
2641 struct candidate_val *tmp, *new;
2642 val = follow_trampolines(pack, val);
2644 list_for_each_entry(tmp, vals, list) {
2645 if (tmp->val == val)
2646 return OK;
2648 new = kmalloc(sizeof(*new), GFP_KERNEL);
2649 if (new == NULL)
2650 return OUT_OF_MEMORY;
2651 new->val = val;
2652 list_add(&new->list, vals);
2653 return OK;
2656 static void release_vals(struct list_head *vals)
2658 clear_list(vals, struct candidate_val, list);
2661 static void set_temp_labelvals(struct ksplice_pack *pack, int status)
2663 struct labelval *lv, *n;
2664 list_for_each_entry_safe(lv, n, &pack->temp_labelvals, list) {
2665 list_del(&lv->list);
2666 if (status == NOVAL) {
2667 lv->symbol->lv = NULL;
2668 kfree(lv);
2669 } else {
2670 list_add(&lv->list, &pack->labelvals);
2675 static int contains_canary(struct ksplice_pack *pack, unsigned long blank_addr,
2676 int size, long dst_mask)
2678 switch (size) {
2679 case 1:
2680 return (*(uint8_t *)blank_addr & dst_mask) ==
2681 (KSPLICE_CANARY & dst_mask);
2682 case 2:
2683 return (*(uint16_t *)blank_addr & dst_mask) ==
2684 (KSPLICE_CANARY & dst_mask);
2685 case 4:
2686 return (*(uint32_t *)blank_addr & dst_mask) ==
2687 (KSPLICE_CANARY & dst_mask);
2688 #if BITS_PER_LONG >= 64
2689 case 8:
2690 return (*(uint64_t *)blank_addr & dst_mask) ==
2691 (KSPLICE_CANARY & dst_mask);
2692 #endif /* BITS_PER_LONG */
2693 default:
2694 ksdebug(pack, "Aborted. Invalid relocation size.\n");
2695 return -1;
2699 static unsigned long follow_trampolines(struct ksplice_pack *pack,
2700 unsigned long addr)
2702 unsigned long new_addr;
2703 struct module *m;
2705 while (1) {
2706 if (trampoline_target(pack, addr, &new_addr) != OK)
2707 return addr;
2708 m = __module_text_address(new_addr);
2709 if (m == NULL || m == pack->target ||
2710 !starts_with(m->name, "ksplice"))
2711 return addr;
2712 ksdebug(pack, "Following trampoline %lx %lx(%s)\n", addr,
2713 new_addr, m->name);
2714 addr = new_addr;
2718 /* Does module a patch module b? */
2719 static bool patches_module(const struct module *a, const struct module *b)
2721 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2722 const char *name;
2723 if (a == b)
2724 return true;
2725 if (a == NULL || !starts_with(a->name, "ksplice_"))
2726 return false;
2727 name = a->name + strlen("ksplice_");
2728 name += strcspn(name, "_");
2729 if (name[0] != '_')
2730 return false;
2731 name++;
2732 return strcmp(name, b == NULL ? "vmlinux" : b->name) == 0;
2733 #else /* !KSPLICE_NO_KERNEL_SUPPORT */
2734 struct ksplice_module_list_entry *entry;
2735 if (a == b)
2736 return true;
2737 list_for_each_entry(entry, &ksplice_module_list, list) {
2738 if (entry->target == b && entry->primary == a)
2739 return true;
2741 return false;
2742 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
2745 static bool starts_with(const char *str, const char *prefix)
2747 return strncmp(str, prefix, strlen(prefix)) == 0;
2750 static bool singular(struct list_head *list)
2752 return !list_empty(list) && list->next->next == list;
2755 static void *bsearch(const void *key, const void *base, size_t n,
2756 size_t size, int (*cmp)(const void *key, const void *elt))
2758 int start = 0, end = n - 1, mid, result;
2759 if (n == 0)
2760 return NULL;
2761 while (start <= end) {
2762 mid = (start + end) / 2;
2763 result = cmp(key, base + mid * size);
2764 if (result < 0)
2765 end = mid - 1;
2766 else if (result > 0)
2767 start = mid + 1;
2768 else
2769 return (void *)base + mid * size;
2771 return NULL;
2774 static int compare_reloc_addresses(const void *a, const void *b)
2776 const struct ksplice_reloc *ra = a, *rb = b;
2777 if (ra->blank_addr > rb->blank_addr)
2778 return 1;
2779 else if (ra->blank_addr < rb->blank_addr)
2780 return -1;
2781 else
2782 return 0;
2785 #ifdef KSPLICE_STANDALONE
2786 static int compare_system_map(const void *a, const void *b)
2788 const struct ksplice_system_map *sa = a, *sb = b;
2789 return strcmp(sa->label, sb->label);
2791 #endif /* KSPLICE_STANDALONE */
2793 #ifdef CONFIG_DEBUG_FS
2794 #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17)
2795 /* Old kernels don't have debugfs_create_blob */
2796 static ssize_t read_file_blob(struct file *file, char __user *user_buf,
2797 size_t count, loff_t *ppos)
2799 struct debugfs_blob_wrapper *blob = file->private_data;
2800 return simple_read_from_buffer(user_buf, count, ppos, blob->data,
2801 blob->size);
2804 static int blob_open(struct inode *inode, struct file *file)
2806 if (inode->i_private)
2807 file->private_data = inode->i_private;
2808 return 0;
2811 static struct file_operations fops_blob = {
2812 .read = read_file_blob,
2813 .open = blob_open,
2816 static struct dentry *debugfs_create_blob(const char *name, mode_t mode,
2817 struct dentry *parent,
2818 struct debugfs_blob_wrapper *blob)
2820 return debugfs_create_file(name, mode, parent, blob, &fops_blob);
2822 #endif /* LINUX_VERSION_CODE */
2824 static abort_t init_debug_buf(struct update *update)
2826 update->debug_blob.size = 0;
2827 update->debug_blob.data = NULL;
2828 update->debugfs_dentry =
2829 debugfs_create_blob(update->name, S_IFREG | S_IRUSR, NULL,
2830 &update->debug_blob);
2831 if (update->debugfs_dentry == NULL)
2832 return OUT_OF_MEMORY;
2833 return OK;
2836 static void clear_debug_buf(struct update *update)
2838 if (update->debugfs_dentry == NULL)
2839 return;
2840 debugfs_remove(update->debugfs_dentry);
2841 update->debugfs_dentry = NULL;
2842 update->debug_blob.size = 0;
2843 vfree(update->debug_blob.data);
2844 update->debug_blob.data = NULL;
2847 static int _ksdebug(struct update *update, const char *fmt, ...)
2849 va_list args;
2850 unsigned long size, old_size, new_size;
2852 if (update->debug == 0)
2853 return 0;
2855 /* size includes the trailing '\0' */
2856 va_start(args, fmt);
2857 size = 1 + vsnprintf(update->debug_blob.data, 0, fmt, args);
2858 va_end(args);
2859 old_size = update->debug_blob.size == 0 ? 0 :
2860 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size));
2861 new_size = update->debug_blob.size + size == 0 ? 0 :
2862 max(PAGE_SIZE, roundup_pow_of_two(update->debug_blob.size + size));
2863 if (new_size > old_size) {
2864 char *buf = vmalloc(new_size);
2865 if (buf == NULL)
2866 return -ENOMEM;
2867 memcpy(buf, update->debug_blob.data, update->debug_blob.size);
2868 vfree(update->debug_blob.data);
2869 update->debug_blob.data = buf;
2871 va_start(args, fmt);
2872 update->debug_blob.size += vsnprintf(update->debug_blob.data +
2873 update->debug_blob.size,
2874 size, fmt, args);
2875 va_end(args);
2876 return 0;
2878 #else /* CONFIG_DEBUG_FS */
2879 static abort_t init_debug_buf(struct update *update)
2881 return OK;
2884 static void clear_debug_buf(struct update *update)
2886 return;
2889 static int _ksdebug(struct update *update, const char *fmt, ...)
2891 va_list args;
2893 if (update->debug == 0)
2894 return 0;
2896 if (!update->debug_continue_line)
2897 printk(KERN_DEBUG "ksplice: ");
2899 va_start(args, fmt);
2900 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9)
2901 vprintk(fmt, args);
2902 #else /* LINUX_VERSION_CODE < */
2903 /* 683b229286b429244f35726b3c18caec429233bd was after 2.6.8 */
2905 char *buf = kvasprintf(GFP_KERNEL, fmt, args);
2906 printk("%s", buf);
2907 kfree(buf);
2909 #endif /* LINUX_VERSION_CODE */
2910 va_end(args);
2912 update->debug_continue_line =
2913 fmt[0] == '\0' || fmt[strlen(fmt) - 1] != '\n';
2914 return 0;
2916 #endif /* CONFIG_DEBUG_FS */
2918 #ifdef KSPLICE_NO_KERNEL_SUPPORT
2919 #ifdef CONFIG_KALLSYMS
2920 static int kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2921 struct module *, unsigned long),
2922 void *data)
2924 char namebuf[KSYM_NAME_LEN];
2925 unsigned long i;
2926 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2927 unsigned int off;
2928 #endif /* LINUX_VERSION_CODE */
2929 int ret;
2931 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2932 * 2.6.10 was the first release after this commit
2934 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2935 for (i = 0, off = 0; i < kallsyms_num_syms; i++) {
2936 off = kallsyms_expand_symbol(off, namebuf);
2937 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
2938 if (ret != 0)
2939 return ret;
2941 #else /* LINUX_VERSION_CODE < */
2942 char *knames;
2944 for (i = 0, knames = kallsyms_names; i < kallsyms_num_syms; i++) {
2945 unsigned prefix = *knames++;
2947 strlcpy(namebuf + prefix, knames, KSYM_NAME_LEN - prefix);
2949 ret = fn(data, namebuf, NULL, kallsyms_addresses[i]);
2950 if (ret != OK)
2951 return ret;
2953 knames += strlen(knames) + 1;
2955 #endif /* LINUX_VERSION_CODE */
2956 return module_kallsyms_on_each_symbol(fn, data);
2959 /* kallsyms compression was added by 5648d78927ca65e74aadc88a2b1d6431e55e78ec
2960 * 2.6.10 was the first release after this commit
2962 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,10)
2963 extern u8 kallsyms_token_table[];
2964 extern u16 kallsyms_token_index[];
2966 static unsigned int kallsyms_expand_symbol(unsigned int off, char *result)
2968 long len, skipped_first = 0;
2969 const u8 *tptr, *data;
2971 data = &kallsyms_names[off];
2972 len = *data;
2973 data++;
2975 off += len + 1;
2977 while (len) {
2978 tptr = &kallsyms_token_table[kallsyms_token_index[*data]];
2979 data++;
2980 len--;
2982 while (*tptr) {
2983 if (skipped_first) {
2984 *result = *tptr;
2985 result++;
2986 } else
2987 skipped_first = 1;
2988 tptr++;
2992 *result = '\0';
2994 return off;
2996 #endif /* LINUX_VERSION_CODE */
2998 static int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
2999 struct module *,
3000 unsigned long),
3001 void *data)
3003 struct module *mod;
3004 unsigned int i;
3005 int ret;
3007 list_for_each_entry(mod, &modules, list) {
3008 for (i = 0; i < mod->num_symtab; i++) {
3009 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3010 mod, mod->symtab[i].st_value);
3011 if (ret != 0)
3012 return ret;
3015 return 0;
3017 #endif /* CONFIG_KALLSYMS */
3019 static struct module *find_module(const char *name)
3021 struct module *mod;
3023 list_for_each_entry(mod, &modules, list) {
3024 if (strcmp(mod->name, name) == 0)
3025 return mod;
3027 return NULL;
3030 #ifdef CONFIG_MODULE_UNLOAD
3031 struct module_use {
3032 struct list_head list;
3033 struct module *module_which_uses;
3036 /* I'm not yet certain whether we need the strong form of this. */
3037 static inline int strong_try_module_get(struct module *mod)
3039 if (mod && mod->state != MODULE_STATE_LIVE)
3040 return -EBUSY;
3041 if (try_module_get(mod))
3042 return 0;
3043 return -ENOENT;
3046 /* Does a already use b? */
3047 static int already_uses(struct module *a, struct module *b)
3049 struct module_use *use;
3050 list_for_each_entry(use, &b->modules_which_use_me, list) {
3051 if (use->module_which_uses == a)
3052 return 1;
3054 return 0;
3057 /* Make it so module a uses b. Must be holding module_mutex */
3058 static int use_module(struct module *a, struct module *b)
3060 struct module_use *use;
3061 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3062 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3063 int no_warn;
3064 #endif /* LINUX_VERSION_CODE */
3065 if (b == NULL || already_uses(a, b))
3066 return 1;
3068 if (strong_try_module_get(b) < 0)
3069 return 0;
3071 use = kmalloc(sizeof(*use), GFP_ATOMIC);
3072 if (!use) {
3073 module_put(b);
3074 return 0;
3076 use->module_which_uses = a;
3077 list_add(&use->list, &b->modules_which_use_me);
3078 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)
3079 /* 270a6c4cad809e92d7b81adde92d0b3d94eeb8ee was after 2.6.20 */
3080 no_warn = sysfs_create_link(b->holders_dir, &a->mkobj.kobj, a->name);
3081 #endif /* LINUX_VERSION_CODE */
3082 return 1;
3084 #else /* CONFIG_MODULE_UNLOAD */
3085 static int use_module(struct module *a, struct module *b)
3087 return 1;
3089 #endif /* CONFIG_MODULE_UNLOAD */
3091 #ifndef CONFIG_MODVERSIONS
3092 #define symversion(base, idx) NULL
3093 #else
3094 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
3095 #endif
3097 static bool each_symbol_in_section(const struct symsearch *arr,
3098 unsigned int arrsize,
3099 struct module *owner,
3100 bool (*fn)(const struct symsearch *syms,
3101 struct module *owner,
3102 unsigned int symnum, void *data),
3103 void *data)
3105 unsigned int i, j;
3107 for (j = 0; j < arrsize; j++) {
3108 for (i = 0; i < arr[j].stop - arr[j].start; i++)
3109 if (fn(&arr[j], owner, i, data))
3110 return true;
3113 return false;
3116 /* Returns true as soon as fn returns true, otherwise false. */
3117 static bool each_symbol(bool (*fn)(const struct symsearch *arr,
3118 struct module *owner,
3119 unsigned int symnum, void *data),
3120 void *data)
3122 struct module *mod;
3123 const struct symsearch arr[] = {
3124 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
3125 NOT_GPL_ONLY, false },
3126 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
3127 __start___kcrctab_gpl,
3128 GPL_ONLY, false },
3129 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3130 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
3131 __start___kcrctab_gpl_future,
3132 WILL_BE_GPL_ONLY, false },
3133 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3134 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3135 { __start___ksymtab_unused, __stop___ksymtab_unused,
3136 __start___kcrctab_unused,
3137 NOT_GPL_ONLY, true },
3138 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
3139 __start___kcrctab_unused_gpl,
3140 GPL_ONLY, true },
3141 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3144 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
3145 return 1;
3147 list_for_each_entry(mod, &modules, list) {
3148 struct symsearch module_arr[] = {
3149 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
3150 NOT_GPL_ONLY, false },
3151 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
3152 mod->gpl_crcs,
3153 GPL_ONLY, false },
3154 #ifdef KSPLICE_KSYMTAB_FUTURE_SUPPORT
3155 { mod->gpl_future_syms,
3156 mod->gpl_future_syms + mod->num_gpl_future_syms,
3157 mod->gpl_future_crcs,
3158 WILL_BE_GPL_ONLY, false },
3159 #endif /* KSPLICE_KSYMTAB_FUTURE_SUPPORT */
3160 #ifdef KSPLICE_KSYMTAB_UNUSED_SUPPORT
3161 { mod->unused_syms,
3162 mod->unused_syms + mod->num_unused_syms,
3163 mod->unused_crcs,
3164 NOT_GPL_ONLY, true },
3165 { mod->unused_gpl_syms,
3166 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
3167 mod->unused_gpl_crcs,
3168 GPL_ONLY, true },
3169 #endif /* KSPLICE_KSYMTAB_UNUSED_SUPPORT */
3172 if (each_symbol_in_section(module_arr, ARRAY_SIZE(module_arr),
3173 mod, fn, data))
3174 return true;
3176 return false;
3179 struct find_symbol_arg {
3180 /* Input */
3181 const char *name;
3182 bool gplok;
3183 bool warn;
3185 /* Output */
3186 struct module *owner;
3187 const unsigned long *crc;
3188 const struct kernel_symbol *sym;
3191 static bool find_symbol_in_section(const struct symsearch *syms,
3192 struct module *owner,
3193 unsigned int symnum, void *data)
3195 struct find_symbol_arg *fsa = data;
3197 if (strcmp(syms->start[symnum].name, fsa->name) != 0)
3198 return false;
3200 if (!fsa->gplok) {
3201 if (syms->licence == GPL_ONLY)
3202 return false;
3203 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
3204 printk(KERN_WARNING "Symbol %s is being used "
3205 "by a non-GPL module, which will not "
3206 "be allowed in the future\n", fsa->name);
3207 printk(KERN_WARNING "Please see the file "
3208 "Documentation/feature-removal-schedule.txt "
3209 "in the kernel source tree for more details.\n");
3213 #ifdef CONFIG_UNUSED_SYMBOLS
3214 if (syms->unused && fsa->warn) {
3215 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
3216 "however this module is using it.\n", fsa->name);
3217 printk(KERN_WARNING
3218 "This symbol will go away in the future.\n");
3219 printk(KERN_WARNING
3220 "Please evalute if this is the right api to use and if "
3221 "it really is, submit a report the linux kernel "
3222 "mailinglist together with submitting your code for "
3223 "inclusion.\n");
3225 #endif
3227 fsa->owner = owner;
3228 fsa->crc = symversion(syms->crcs, symnum);
3229 fsa->sym = &syms->start[symnum];
3230 return true;
3233 /* Find a symbol and return it, along with, (optional) crc and
3234 * (optional) module which owns it */
3235 static const struct kernel_symbol *find_symbol(const char *name,
3236 struct module **owner,
3237 const unsigned long **crc,
3238 bool gplok, bool warn)
3240 struct find_symbol_arg fsa;
3242 fsa.name = name;
3243 fsa.gplok = gplok;
3244 fsa.warn = warn;
3246 if (each_symbol(find_symbol_in_section, &fsa)) {
3247 if (owner)
3248 *owner = fsa.owner;
3249 if (crc)
3250 *crc = fsa.crc;
3251 return fsa.sym;
3254 return NULL;
3257 static struct module *__module_data_address(unsigned long addr)
3259 struct module *mod;
3261 list_for_each_entry(mod, &modules, list) {
3262 if (addr >= (unsigned long)mod->module_core +
3263 mod->core_text_size &&
3264 addr < (unsigned long)mod->module_core + mod->core_size)
3265 return mod;
3267 return NULL;
3269 #endif /* KSPLICE_NO_KERNEL_SUPPORT */
3271 struct ksplice_attribute {
3272 struct attribute attr;
3273 ssize_t (*show)(struct update *update, char *buf);
3274 ssize_t (*store)(struct update *update, const char *buf, size_t len);
3277 static ssize_t ksplice_attr_show(struct kobject *kobj, struct attribute *attr,
3278 char *buf)
3280 struct ksplice_attribute *attribute =
3281 container_of(attr, struct ksplice_attribute, attr);
3282 struct update *update = container_of(kobj, struct update, kobj);
3283 if (attribute->show == NULL)
3284 return -EIO;
3285 return attribute->show(update, buf);
3288 static ssize_t ksplice_attr_store(struct kobject *kobj, struct attribute *attr,
3289 const char *buf, size_t len)
3291 struct ksplice_attribute *attribute =
3292 container_of(attr, struct ksplice_attribute, attr);
3293 struct update *update = container_of(kobj, struct update, kobj);
3294 if (attribute->store == NULL)
3295 return -EIO;
3296 return attribute->store(update, buf, len);
3299 static struct sysfs_ops ksplice_sysfs_ops = {
3300 .show = ksplice_attr_show,
3301 .store = ksplice_attr_store,
3304 static void ksplice_release(struct kobject *kobj)
3306 struct update *update;
3307 update = container_of(kobj, struct update, kobj);
3308 cleanup_ksplice_update(update);
3311 static ssize_t stage_show(struct update *update, char *buf)
3313 switch (update->stage) {
3314 case STAGE_PREPARING:
3315 return snprintf(buf, PAGE_SIZE, "preparing\n");
3316 case STAGE_APPLIED:
3317 return snprintf(buf, PAGE_SIZE, "applied\n");
3318 case STAGE_REVERSED:
3319 return snprintf(buf, PAGE_SIZE, "reversed\n");
3321 return 0;
3324 static ssize_t abort_cause_show(struct update *update, char *buf)
3326 switch (update->abort_cause) {
3327 case OK:
3328 return snprintf(buf, PAGE_SIZE, "ok\n");
3329 case NO_MATCH:
3330 return snprintf(buf, PAGE_SIZE, "no_match\n");
3331 #ifdef KSPLICE_STANDALONE
3332 case BAD_SYSTEM_MAP:
3333 return snprintf(buf, PAGE_SIZE, "bad_system_map\n");
3334 #endif /* KSPLICE_STANDALONE */
3335 case CODE_BUSY:
3336 return snprintf(buf, PAGE_SIZE, "code_busy\n");
3337 case MODULE_BUSY:
3338 return snprintf(buf, PAGE_SIZE, "module_busy\n");
3339 case OUT_OF_MEMORY:
3340 return snprintf(buf, PAGE_SIZE, "out_of_memory\n");
3341 case FAILED_TO_FIND:
3342 return snprintf(buf, PAGE_SIZE, "failed_to_find\n");
3343 case ALREADY_REVERSED:
3344 return snprintf(buf, PAGE_SIZE, "already_reversed\n");
3345 case MISSING_EXPORT:
3346 return snprintf(buf, PAGE_SIZE, "missing_export\n");
3347 case UNEXPECTED_RUNNING_TASK:
3348 return snprintf(buf, PAGE_SIZE, "unexpected_running_task\n");
3349 case UNEXPECTED:
3350 return snprintf(buf, PAGE_SIZE, "unexpected\n");
3352 return 0;
3355 static ssize_t conflict_show(struct update *update, char *buf)
3357 const struct conflict *conf;
3358 const struct conflict_addr *ca;
3359 int used = 0;
3360 list_for_each_entry(conf, &update->conflicts, list) {
3361 used += snprintf(buf + used, PAGE_SIZE - used, "%s %d",
3362 conf->process_name, conf->pid);
3363 list_for_each_entry(ca, &conf->stack, list) {
3364 if (!ca->has_conflict)
3365 continue;
3366 used += snprintf(buf + used, PAGE_SIZE - used, " %s",
3367 ca->label);
3369 used += snprintf(buf + used, PAGE_SIZE - used, "\n");
3371 return used;
3374 static ssize_t stage_store(struct update *update, const char *buf, size_t len)
3376 if ((strncmp(buf, "applied", len) == 0 ||
3377 strncmp(buf, "applied\n", len) == 0) &&
3378 update->stage == STAGE_PREPARING)
3379 update->abort_cause = apply_update(update);
3380 else if ((strncmp(buf, "reversed", len) == 0 ||
3381 strncmp(buf, "reversed\n", len) == 0) &&
3382 update->stage == STAGE_APPLIED)
3383 update->abort_cause = reverse_patches(update);
3384 if (update->abort_cause == OK)
3385 printk(KERN_INFO "ksplice: Update %s %s successfully\n",
3386 update->kid,
3387 update->stage == STAGE_APPLIED ? "applied" : "reversed");
3388 return len;
3391 static ssize_t debug_show(struct update *update, char *buf)
3393 return snprintf(buf, PAGE_SIZE, "%d\n", update->debug);
3396 static ssize_t debug_store(struct update *update, const char *buf, size_t len)
3398 unsigned long l;
3399 int ret = strict_strtoul(buf, 10, &l);
3400 if (ret != 0)
3401 return ret;
3402 update->debug = l;
3403 return len;
3406 static struct ksplice_attribute stage_attribute =
3407 __ATTR(stage, 0600, stage_show, stage_store);
3408 static struct ksplice_attribute abort_cause_attribute =
3409 __ATTR(abort_cause, 0400, abort_cause_show, NULL);
3410 static struct ksplice_attribute debug_attribute =
3411 __ATTR(debug, 0600, debug_show, debug_store);
3412 static struct ksplice_attribute conflict_attribute =
3413 __ATTR(conflicts, 0400, conflict_show, NULL);
3415 static struct attribute *ksplice_attrs[] = {
3416 &stage_attribute.attr,
3417 &abort_cause_attribute.attr,
3418 &debug_attribute.attr,
3419 &conflict_attribute.attr,
3420 NULL
3423 static struct kobj_type ksplice_ktype = {
3424 .sysfs_ops = &ksplice_sysfs_ops,
3425 .release = ksplice_release,
3426 .default_attrs = ksplice_attrs,
3429 #ifdef KSPLICE_STANDALONE
3430 static int debug;
3431 module_param(debug, int, 0600);
3432 MODULE_PARM_DESC(debug, "Debug level");
3434 extern struct ksplice_system_map ksplice_system_map[], ksplice_system_map_end[];
3436 static struct ksplice_pack bootstrap_pack = {
3437 .name = "ksplice_" __stringify(KSPLICE_KID),
3438 .kid = "init_" __stringify(KSPLICE_KID),
3439 .target_name = NULL,
3440 .target = NULL,
3441 .map_printk = MAP_PRINTK,
3442 .primary = THIS_MODULE,
3443 .labelvals = LIST_HEAD_INIT(bootstrap_pack.labelvals),
3444 .primary_system_map = ksplice_system_map,
3445 .primary_system_map_end = ksplice_system_map_end,
3447 #endif /* KSPLICE_STANDALONE */
3449 static int init_ksplice(void)
3451 #ifdef KSPLICE_STANDALONE
3452 struct ksplice_pack *pack = &bootstrap_pack;
3453 pack->update = init_ksplice_update(pack->kid);
3454 #ifdef KSPLICE_STANDALONE
3455 sort(pack->primary_system_map,
3456 (pack->primary_system_map_end - pack->primary_system_map),
3457 sizeof(struct ksplice_system_map), compare_system_map, NULL);
3458 #endif /* KSPLICE_STANDALONE */
3459 if (pack->update == NULL)
3460 return -ENOMEM;
3461 add_to_update(pack, pack->update);
3462 pack->update->debug = debug;
3463 pack->update->abort_cause =
3464 apply_relocs(pack, ksplice_init_relocs, ksplice_init_relocs_end);
3465 if (pack->update->abort_cause == OK)
3466 bootstrapped = true;
3467 #else /* !KSPLICE_STANDALONE */
3468 ksplice_kobj = kobject_create_and_add("ksplice", kernel_kobj);
3469 if (ksplice_kobj == NULL)
3470 return -ENOMEM;
3471 #endif /* KSPLICE_STANDALONE */
3472 return 0;
3475 static void cleanup_ksplice(void)
3477 #ifdef KSPLICE_STANDALONE
3478 cleanup_ksplice_update(bootstrap_pack.update);
3479 #else /* !KSPLICE_STANDALONE */
3480 kobject_put(ksplice_kobj);
3481 #endif /* KSPLICE_STANDALONE */
3484 module_init(init_ksplice);
3485 module_exit(cleanup_ksplice);
3487 MODULE_AUTHOR("Jeffrey Brian Arnold <jbarnold@mit.edu>");
3488 MODULE_DESCRIPTION("Ksplice rebootless update system");
3489 #ifdef KSPLICE_VERSION
3490 MODULE_VERSION(KSPLICE_VERSION);
3491 #endif
3492 MODULE_LICENSE("GPL v2");