MIPS: UAPI: Split inst.h into exported and kernel-only part.
[linux-2.6.git] / kernel / module.c
blobeab08274ec9bf9a1e5b8c83de0f925da0aeaf761
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/fips.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
86 * Given BASE and SIZE this macro calculates the number of pages the
87 * memory regions occupies
89 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
90 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
91 PFN_DOWN((unsigned long)BASE) + 1) \
92 : (0UL))
94 /* If this is set, the section belongs in the init part of the module */
95 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
98 * Mutex protects:
99 * 1) List of modules (also safely readable with preempt_disable),
100 * 2) module_use links,
101 * 3) module_addr_min/module_addr_max.
102 * (delete uses stop_machine/add uses RCU list operations). */
103 DEFINE_MUTEX(module_mutex);
104 EXPORT_SYMBOL_GPL(module_mutex);
105 static LIST_HEAD(modules);
106 #ifdef CONFIG_KGDB_KDB
107 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
108 #endif /* CONFIG_KGDB_KDB */
110 #ifdef CONFIG_MODULE_SIG
111 #ifdef CONFIG_MODULE_SIG_FORCE
112 static bool sig_enforce = true;
113 #else
114 static bool sig_enforce = false;
116 static int param_set_bool_enable_only(const char *val,
117 const struct kernel_param *kp)
119 int err;
120 bool test;
121 struct kernel_param dummy_kp = *kp;
123 dummy_kp.arg = &test;
125 err = param_set_bool(val, &dummy_kp);
126 if (err)
127 return err;
129 /* Don't let them unset it once it's set! */
130 if (!test && sig_enforce)
131 return -EROFS;
133 if (test)
134 sig_enforce = true;
135 return 0;
138 static const struct kernel_param_ops param_ops_bool_enable_only = {
139 .set = param_set_bool_enable_only,
140 .get = param_get_bool,
142 #define param_check_bool_enable_only param_check_bool
144 module_param(sig_enforce, bool_enable_only, 0644);
145 #endif /* !CONFIG_MODULE_SIG_FORCE */
146 #endif /* CONFIG_MODULE_SIG */
148 /* Block module loading/unloading? */
149 int modules_disabled = 0;
150 core_param(nomodule, modules_disabled, bint, 0);
152 /* Waiting for a module to finish initializing? */
153 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
155 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
157 /* Bounds of module allocation, for speeding __module_address.
158 * Protected by module_mutex. */
159 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
161 int register_module_notifier(struct notifier_block * nb)
163 return blocking_notifier_chain_register(&module_notify_list, nb);
165 EXPORT_SYMBOL(register_module_notifier);
167 int unregister_module_notifier(struct notifier_block * nb)
169 return blocking_notifier_chain_unregister(&module_notify_list, nb);
171 EXPORT_SYMBOL(unregister_module_notifier);
173 struct load_info {
174 Elf_Ehdr *hdr;
175 unsigned long len;
176 Elf_Shdr *sechdrs;
177 char *secstrings, *strtab;
178 unsigned long symoffs, stroffs;
179 struct _ddebug *debug;
180 unsigned int num_debug;
181 bool sig_ok;
182 struct {
183 unsigned int sym, str, mod, vers, info, pcpu;
184 } index;
187 /* We require a truly strong try_module_get(): 0 means failure due to
188 ongoing or failed initialization etc. */
189 static inline int strong_try_module_get(struct module *mod)
191 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
192 if (mod && mod->state == MODULE_STATE_COMING)
193 return -EBUSY;
194 if (try_module_get(mod))
195 return 0;
196 else
197 return -ENOENT;
200 static inline void add_taint_module(struct module *mod, unsigned flag)
202 add_taint(flag);
203 mod->taints |= (1U << flag);
207 * A thread that wants to hold a reference to a module only while it
208 * is running can call this to safely exit. nfsd and lockd use this.
210 void __module_put_and_exit(struct module *mod, long code)
212 module_put(mod);
213 do_exit(code);
215 EXPORT_SYMBOL(__module_put_and_exit);
217 /* Find a module section: 0 means not found. */
218 static unsigned int find_sec(const struct load_info *info, const char *name)
220 unsigned int i;
222 for (i = 1; i < info->hdr->e_shnum; i++) {
223 Elf_Shdr *shdr = &info->sechdrs[i];
224 /* Alloc bit cleared means "ignore it." */
225 if ((shdr->sh_flags & SHF_ALLOC)
226 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
227 return i;
229 return 0;
232 /* Find a module section, or NULL. */
233 static void *section_addr(const struct load_info *info, const char *name)
235 /* Section 0 has sh_addr 0. */
236 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
239 /* Find a module section, or NULL. Fill in number of "objects" in section. */
240 static void *section_objs(const struct load_info *info,
241 const char *name,
242 size_t object_size,
243 unsigned int *num)
245 unsigned int sec = find_sec(info, name);
247 /* Section 0 has sh_addr 0 and sh_size 0. */
248 *num = info->sechdrs[sec].sh_size / object_size;
249 return (void *)info->sechdrs[sec].sh_addr;
252 /* Provided by the linker */
253 extern const struct kernel_symbol __start___ksymtab[];
254 extern const struct kernel_symbol __stop___ksymtab[];
255 extern const struct kernel_symbol __start___ksymtab_gpl[];
256 extern const struct kernel_symbol __stop___ksymtab_gpl[];
257 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
258 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
259 extern const unsigned long __start___kcrctab[];
260 extern const unsigned long __start___kcrctab_gpl[];
261 extern const unsigned long __start___kcrctab_gpl_future[];
262 #ifdef CONFIG_UNUSED_SYMBOLS
263 extern const struct kernel_symbol __start___ksymtab_unused[];
264 extern const struct kernel_symbol __stop___ksymtab_unused[];
265 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
266 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
267 extern const unsigned long __start___kcrctab_unused[];
268 extern const unsigned long __start___kcrctab_unused_gpl[];
269 #endif
271 #ifndef CONFIG_MODVERSIONS
272 #define symversion(base, idx) NULL
273 #else
274 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
275 #endif
277 static bool each_symbol_in_section(const struct symsearch *arr,
278 unsigned int arrsize,
279 struct module *owner,
280 bool (*fn)(const struct symsearch *syms,
281 struct module *owner,
282 void *data),
283 void *data)
285 unsigned int j;
287 for (j = 0; j < arrsize; j++) {
288 if (fn(&arr[j], owner, data))
289 return true;
292 return false;
295 /* Returns true as soon as fn returns true, otherwise false. */
296 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
297 struct module *owner,
298 void *data),
299 void *data)
301 struct module *mod;
302 static const struct symsearch arr[] = {
303 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
304 NOT_GPL_ONLY, false },
305 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
306 __start___kcrctab_gpl,
307 GPL_ONLY, false },
308 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
309 __start___kcrctab_gpl_future,
310 WILL_BE_GPL_ONLY, false },
311 #ifdef CONFIG_UNUSED_SYMBOLS
312 { __start___ksymtab_unused, __stop___ksymtab_unused,
313 __start___kcrctab_unused,
314 NOT_GPL_ONLY, true },
315 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
316 __start___kcrctab_unused_gpl,
317 GPL_ONLY, true },
318 #endif
321 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
322 return true;
324 list_for_each_entry_rcu(mod, &modules, list) {
325 struct symsearch arr[] = {
326 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
327 NOT_GPL_ONLY, false },
328 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
329 mod->gpl_crcs,
330 GPL_ONLY, false },
331 { mod->gpl_future_syms,
332 mod->gpl_future_syms + mod->num_gpl_future_syms,
333 mod->gpl_future_crcs,
334 WILL_BE_GPL_ONLY, false },
335 #ifdef CONFIG_UNUSED_SYMBOLS
336 { mod->unused_syms,
337 mod->unused_syms + mod->num_unused_syms,
338 mod->unused_crcs,
339 NOT_GPL_ONLY, true },
340 { mod->unused_gpl_syms,
341 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
342 mod->unused_gpl_crcs,
343 GPL_ONLY, true },
344 #endif
347 if (mod->state == MODULE_STATE_UNFORMED)
348 continue;
350 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
351 return true;
353 return false;
355 EXPORT_SYMBOL_GPL(each_symbol_section);
357 struct find_symbol_arg {
358 /* Input */
359 const char *name;
360 bool gplok;
361 bool warn;
363 /* Output */
364 struct module *owner;
365 const unsigned long *crc;
366 const struct kernel_symbol *sym;
369 static bool check_symbol(const struct symsearch *syms,
370 struct module *owner,
371 unsigned int symnum, void *data)
373 struct find_symbol_arg *fsa = data;
375 if (!fsa->gplok) {
376 if (syms->licence == GPL_ONLY)
377 return false;
378 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
379 printk(KERN_WARNING "Symbol %s is being used "
380 "by a non-GPL module, which will not "
381 "be allowed in the future\n", fsa->name);
385 #ifdef CONFIG_UNUSED_SYMBOLS
386 if (syms->unused && fsa->warn) {
387 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
388 "however this module is using it.\n", fsa->name);
389 printk(KERN_WARNING
390 "This symbol will go away in the future.\n");
391 printk(KERN_WARNING
392 "Please evalute if this is the right api to use and if "
393 "it really is, submit a report the linux kernel "
394 "mailinglist together with submitting your code for "
395 "inclusion.\n");
397 #endif
399 fsa->owner = owner;
400 fsa->crc = symversion(syms->crcs, symnum);
401 fsa->sym = &syms->start[symnum];
402 return true;
405 static int cmp_name(const void *va, const void *vb)
407 const char *a;
408 const struct kernel_symbol *b;
409 a = va; b = vb;
410 return strcmp(a, b->name);
413 static bool find_symbol_in_section(const struct symsearch *syms,
414 struct module *owner,
415 void *data)
417 struct find_symbol_arg *fsa = data;
418 struct kernel_symbol *sym;
420 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
421 sizeof(struct kernel_symbol), cmp_name);
423 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
424 return true;
426 return false;
429 /* Find a symbol and return it, along with, (optional) crc and
430 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
431 const struct kernel_symbol *find_symbol(const char *name,
432 struct module **owner,
433 const unsigned long **crc,
434 bool gplok,
435 bool warn)
437 struct find_symbol_arg fsa;
439 fsa.name = name;
440 fsa.gplok = gplok;
441 fsa.warn = warn;
443 if (each_symbol_section(find_symbol_in_section, &fsa)) {
444 if (owner)
445 *owner = fsa.owner;
446 if (crc)
447 *crc = fsa.crc;
448 return fsa.sym;
451 pr_debug("Failed to find symbol %s\n", name);
452 return NULL;
454 EXPORT_SYMBOL_GPL(find_symbol);
456 /* Search for module by name: must hold module_mutex. */
457 static struct module *find_module_all(const char *name,
458 bool even_unformed)
460 struct module *mod;
462 list_for_each_entry(mod, &modules, list) {
463 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
464 continue;
465 if (strcmp(mod->name, name) == 0)
466 return mod;
468 return NULL;
471 struct module *find_module(const char *name)
473 return find_module_all(name, false);
475 EXPORT_SYMBOL_GPL(find_module);
477 #ifdef CONFIG_SMP
479 static inline void __percpu *mod_percpu(struct module *mod)
481 return mod->percpu;
484 static int percpu_modalloc(struct module *mod,
485 unsigned long size, unsigned long align)
487 if (align > PAGE_SIZE) {
488 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
489 mod->name, align, PAGE_SIZE);
490 align = PAGE_SIZE;
493 mod->percpu = __alloc_reserved_percpu(size, align);
494 if (!mod->percpu) {
495 printk(KERN_WARNING
496 "%s: Could not allocate %lu bytes percpu data\n",
497 mod->name, size);
498 return -ENOMEM;
500 mod->percpu_size = size;
501 return 0;
504 static void percpu_modfree(struct module *mod)
506 free_percpu(mod->percpu);
509 static unsigned int find_pcpusec(struct load_info *info)
511 return find_sec(info, ".data..percpu");
514 static void percpu_modcopy(struct module *mod,
515 const void *from, unsigned long size)
517 int cpu;
519 for_each_possible_cpu(cpu)
520 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
524 * is_module_percpu_address - test whether address is from module static percpu
525 * @addr: address to test
527 * Test whether @addr belongs to module static percpu area.
529 * RETURNS:
530 * %true if @addr is from module static percpu area
532 bool is_module_percpu_address(unsigned long addr)
534 struct module *mod;
535 unsigned int cpu;
537 preempt_disable();
539 list_for_each_entry_rcu(mod, &modules, list) {
540 if (mod->state == MODULE_STATE_UNFORMED)
541 continue;
542 if (!mod->percpu_size)
543 continue;
544 for_each_possible_cpu(cpu) {
545 void *start = per_cpu_ptr(mod->percpu, cpu);
547 if ((void *)addr >= start &&
548 (void *)addr < start + mod->percpu_size) {
549 preempt_enable();
550 return true;
555 preempt_enable();
556 return false;
559 #else /* ... !CONFIG_SMP */
561 static inline void __percpu *mod_percpu(struct module *mod)
563 return NULL;
565 static inline int percpu_modalloc(struct module *mod,
566 unsigned long size, unsigned long align)
568 return -ENOMEM;
570 static inline void percpu_modfree(struct module *mod)
573 static unsigned int find_pcpusec(struct load_info *info)
575 return 0;
577 static inline void percpu_modcopy(struct module *mod,
578 const void *from, unsigned long size)
580 /* pcpusec should be 0, and size of that section should be 0. */
581 BUG_ON(size != 0);
583 bool is_module_percpu_address(unsigned long addr)
585 return false;
588 #endif /* CONFIG_SMP */
590 #define MODINFO_ATTR(field) \
591 static void setup_modinfo_##field(struct module *mod, const char *s) \
593 mod->field = kstrdup(s, GFP_KERNEL); \
595 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
596 struct module_kobject *mk, char *buffer) \
598 return sprintf(buffer, "%s\n", mk->mod->field); \
600 static int modinfo_##field##_exists(struct module *mod) \
602 return mod->field != NULL; \
604 static void free_modinfo_##field(struct module *mod) \
606 kfree(mod->field); \
607 mod->field = NULL; \
609 static struct module_attribute modinfo_##field = { \
610 .attr = { .name = __stringify(field), .mode = 0444 }, \
611 .show = show_modinfo_##field, \
612 .setup = setup_modinfo_##field, \
613 .test = modinfo_##field##_exists, \
614 .free = free_modinfo_##field, \
617 MODINFO_ATTR(version);
618 MODINFO_ATTR(srcversion);
620 static char last_unloaded_module[MODULE_NAME_LEN+1];
622 #ifdef CONFIG_MODULE_UNLOAD
624 EXPORT_TRACEPOINT_SYMBOL(module_get);
626 /* Init the unload section of the module. */
627 static int module_unload_init(struct module *mod)
629 mod->refptr = alloc_percpu(struct module_ref);
630 if (!mod->refptr)
631 return -ENOMEM;
633 INIT_LIST_HEAD(&mod->source_list);
634 INIT_LIST_HEAD(&mod->target_list);
636 /* Hold reference count during initialization. */
637 __this_cpu_write(mod->refptr->incs, 1);
638 /* Backwards compatibility macros put refcount during init. */
639 mod->waiter = current;
641 return 0;
644 /* Does a already use b? */
645 static int already_uses(struct module *a, struct module *b)
647 struct module_use *use;
649 list_for_each_entry(use, &b->source_list, source_list) {
650 if (use->source == a) {
651 pr_debug("%s uses %s!\n", a->name, b->name);
652 return 1;
655 pr_debug("%s does not use %s!\n", a->name, b->name);
656 return 0;
660 * Module a uses b
661 * - we add 'a' as a "source", 'b' as a "target" of module use
662 * - the module_use is added to the list of 'b' sources (so
663 * 'b' can walk the list to see who sourced them), and of 'a'
664 * targets (so 'a' can see what modules it targets).
666 static int add_module_usage(struct module *a, struct module *b)
668 struct module_use *use;
670 pr_debug("Allocating new usage for %s.\n", a->name);
671 use = kmalloc(sizeof(*use), GFP_ATOMIC);
672 if (!use) {
673 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
674 return -ENOMEM;
677 use->source = a;
678 use->target = b;
679 list_add(&use->source_list, &b->source_list);
680 list_add(&use->target_list, &a->target_list);
681 return 0;
684 /* Module a uses b: caller needs module_mutex() */
685 int ref_module(struct module *a, struct module *b)
687 int err;
689 if (b == NULL || already_uses(a, b))
690 return 0;
692 /* If module isn't available, we fail. */
693 err = strong_try_module_get(b);
694 if (err)
695 return err;
697 err = add_module_usage(a, b);
698 if (err) {
699 module_put(b);
700 return err;
702 return 0;
704 EXPORT_SYMBOL_GPL(ref_module);
706 /* Clear the unload stuff of the module. */
707 static void module_unload_free(struct module *mod)
709 struct module_use *use, *tmp;
711 mutex_lock(&module_mutex);
712 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
713 struct module *i = use->target;
714 pr_debug("%s unusing %s\n", mod->name, i->name);
715 module_put(i);
716 list_del(&use->source_list);
717 list_del(&use->target_list);
718 kfree(use);
720 mutex_unlock(&module_mutex);
722 free_percpu(mod->refptr);
725 #ifdef CONFIG_MODULE_FORCE_UNLOAD
726 static inline int try_force_unload(unsigned int flags)
728 int ret = (flags & O_TRUNC);
729 if (ret)
730 add_taint(TAINT_FORCED_RMMOD);
731 return ret;
733 #else
734 static inline int try_force_unload(unsigned int flags)
736 return 0;
738 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
740 struct stopref
742 struct module *mod;
743 int flags;
744 int *forced;
747 /* Whole machine is stopped with interrupts off when this runs. */
748 static int __try_stop_module(void *_sref)
750 struct stopref *sref = _sref;
752 /* If it's not unused, quit unless we're forcing. */
753 if (module_refcount(sref->mod) != 0) {
754 if (!(*sref->forced = try_force_unload(sref->flags)))
755 return -EWOULDBLOCK;
758 /* Mark it as dying. */
759 sref->mod->state = MODULE_STATE_GOING;
760 return 0;
763 static int try_stop_module(struct module *mod, int flags, int *forced)
765 if (flags & O_NONBLOCK) {
766 struct stopref sref = { mod, flags, forced };
768 return stop_machine(__try_stop_module, &sref, NULL);
769 } else {
770 /* We don't need to stop the machine for this. */
771 mod->state = MODULE_STATE_GOING;
772 synchronize_sched();
773 return 0;
777 unsigned long module_refcount(struct module *mod)
779 unsigned long incs = 0, decs = 0;
780 int cpu;
782 for_each_possible_cpu(cpu)
783 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
785 * ensure the incs are added up after the decs.
786 * module_put ensures incs are visible before decs with smp_wmb.
788 * This 2-count scheme avoids the situation where the refcount
789 * for CPU0 is read, then CPU0 increments the module refcount,
790 * then CPU1 drops that refcount, then the refcount for CPU1 is
791 * read. We would record a decrement but not its corresponding
792 * increment so we would see a low count (disaster).
794 * Rare situation? But module_refcount can be preempted, and we
795 * might be tallying up 4096+ CPUs. So it is not impossible.
797 smp_rmb();
798 for_each_possible_cpu(cpu)
799 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
800 return incs - decs;
802 EXPORT_SYMBOL(module_refcount);
804 /* This exists whether we can unload or not */
805 static void free_module(struct module *mod);
807 static void wait_for_zero_refcount(struct module *mod)
809 /* Since we might sleep for some time, release the mutex first */
810 mutex_unlock(&module_mutex);
811 for (;;) {
812 pr_debug("Looking at refcount...\n");
813 set_current_state(TASK_UNINTERRUPTIBLE);
814 if (module_refcount(mod) == 0)
815 break;
816 schedule();
818 current->state = TASK_RUNNING;
819 mutex_lock(&module_mutex);
822 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
823 unsigned int, flags)
825 struct module *mod;
826 char name[MODULE_NAME_LEN];
827 int ret, forced = 0;
829 if (!capable(CAP_SYS_MODULE) || modules_disabled)
830 return -EPERM;
832 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
833 return -EFAULT;
834 name[MODULE_NAME_LEN-1] = '\0';
836 if (mutex_lock_interruptible(&module_mutex) != 0)
837 return -EINTR;
839 mod = find_module(name);
840 if (!mod) {
841 ret = -ENOENT;
842 goto out;
845 if (!list_empty(&mod->source_list)) {
846 /* Other modules depend on us: get rid of them first. */
847 ret = -EWOULDBLOCK;
848 goto out;
851 /* Doing init or already dying? */
852 if (mod->state != MODULE_STATE_LIVE) {
853 /* FIXME: if (force), slam module count and wake up
854 waiter --RR */
855 pr_debug("%s already dying\n", mod->name);
856 ret = -EBUSY;
857 goto out;
860 /* If it has an init func, it must have an exit func to unload */
861 if (mod->init && !mod->exit) {
862 forced = try_force_unload(flags);
863 if (!forced) {
864 /* This module can't be removed */
865 ret = -EBUSY;
866 goto out;
870 /* Set this up before setting mod->state */
871 mod->waiter = current;
873 /* Stop the machine so refcounts can't move and disable module. */
874 ret = try_stop_module(mod, flags, &forced);
875 if (ret != 0)
876 goto out;
878 /* Never wait if forced. */
879 if (!forced && module_refcount(mod) != 0)
880 wait_for_zero_refcount(mod);
882 mutex_unlock(&module_mutex);
883 /* Final destruction now no one is using it. */
884 if (mod->exit != NULL)
885 mod->exit();
886 blocking_notifier_call_chain(&module_notify_list,
887 MODULE_STATE_GOING, mod);
888 async_synchronize_full();
890 /* Store the name of the last unloaded module for diagnostic purposes */
891 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
893 free_module(mod);
894 return 0;
895 out:
896 mutex_unlock(&module_mutex);
897 return ret;
900 static inline void print_unload_info(struct seq_file *m, struct module *mod)
902 struct module_use *use;
903 int printed_something = 0;
905 seq_printf(m, " %lu ", module_refcount(mod));
907 /* Always include a trailing , so userspace can differentiate
908 between this and the old multi-field proc format. */
909 list_for_each_entry(use, &mod->source_list, source_list) {
910 printed_something = 1;
911 seq_printf(m, "%s,", use->source->name);
914 if (mod->init != NULL && mod->exit == NULL) {
915 printed_something = 1;
916 seq_printf(m, "[permanent],");
919 if (!printed_something)
920 seq_printf(m, "-");
923 void __symbol_put(const char *symbol)
925 struct module *owner;
927 preempt_disable();
928 if (!find_symbol(symbol, &owner, NULL, true, false))
929 BUG();
930 module_put(owner);
931 preempt_enable();
933 EXPORT_SYMBOL(__symbol_put);
935 /* Note this assumes addr is a function, which it currently always is. */
936 void symbol_put_addr(void *addr)
938 struct module *modaddr;
939 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
941 if (core_kernel_text(a))
942 return;
944 /* module_text_address is safe here: we're supposed to have reference
945 * to module from symbol_get, so it can't go away. */
946 modaddr = __module_text_address(a);
947 BUG_ON(!modaddr);
948 module_put(modaddr);
950 EXPORT_SYMBOL_GPL(symbol_put_addr);
952 static ssize_t show_refcnt(struct module_attribute *mattr,
953 struct module_kobject *mk, char *buffer)
955 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
958 static struct module_attribute modinfo_refcnt =
959 __ATTR(refcnt, 0444, show_refcnt, NULL);
961 void __module_get(struct module *module)
963 if (module) {
964 preempt_disable();
965 __this_cpu_inc(module->refptr->incs);
966 trace_module_get(module, _RET_IP_);
967 preempt_enable();
970 EXPORT_SYMBOL(__module_get);
972 bool try_module_get(struct module *module)
974 bool ret = true;
976 if (module) {
977 preempt_disable();
979 if (likely(module_is_live(module))) {
980 __this_cpu_inc(module->refptr->incs);
981 trace_module_get(module, _RET_IP_);
982 } else
983 ret = false;
985 preempt_enable();
987 return ret;
989 EXPORT_SYMBOL(try_module_get);
991 void module_put(struct module *module)
993 if (module) {
994 preempt_disable();
995 smp_wmb(); /* see comment in module_refcount */
996 __this_cpu_inc(module->refptr->decs);
998 trace_module_put(module, _RET_IP_);
999 /* Maybe they're waiting for us to drop reference? */
1000 if (unlikely(!module_is_live(module)))
1001 wake_up_process(module->waiter);
1002 preempt_enable();
1005 EXPORT_SYMBOL(module_put);
1007 #else /* !CONFIG_MODULE_UNLOAD */
1008 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1010 /* We don't know the usage count, or what modules are using. */
1011 seq_printf(m, " - -");
1014 static inline void module_unload_free(struct module *mod)
1018 int ref_module(struct module *a, struct module *b)
1020 return strong_try_module_get(b);
1022 EXPORT_SYMBOL_GPL(ref_module);
1024 static inline int module_unload_init(struct module *mod)
1026 return 0;
1028 #endif /* CONFIG_MODULE_UNLOAD */
1030 static size_t module_flags_taint(struct module *mod, char *buf)
1032 size_t l = 0;
1034 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1035 buf[l++] = 'P';
1036 if (mod->taints & (1 << TAINT_OOT_MODULE))
1037 buf[l++] = 'O';
1038 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1039 buf[l++] = 'F';
1040 if (mod->taints & (1 << TAINT_CRAP))
1041 buf[l++] = 'C';
1043 * TAINT_FORCED_RMMOD: could be added.
1044 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1045 * apply to modules.
1047 return l;
1050 static ssize_t show_initstate(struct module_attribute *mattr,
1051 struct module_kobject *mk, char *buffer)
1053 const char *state = "unknown";
1055 switch (mk->mod->state) {
1056 case MODULE_STATE_LIVE:
1057 state = "live";
1058 break;
1059 case MODULE_STATE_COMING:
1060 state = "coming";
1061 break;
1062 case MODULE_STATE_GOING:
1063 state = "going";
1064 break;
1065 default:
1066 BUG();
1068 return sprintf(buffer, "%s\n", state);
1071 static struct module_attribute modinfo_initstate =
1072 __ATTR(initstate, 0444, show_initstate, NULL);
1074 static ssize_t store_uevent(struct module_attribute *mattr,
1075 struct module_kobject *mk,
1076 const char *buffer, size_t count)
1078 enum kobject_action action;
1080 if (kobject_action_type(buffer, count, &action) == 0)
1081 kobject_uevent(&mk->kobj, action);
1082 return count;
1085 struct module_attribute module_uevent =
1086 __ATTR(uevent, 0200, NULL, store_uevent);
1088 static ssize_t show_coresize(struct module_attribute *mattr,
1089 struct module_kobject *mk, char *buffer)
1091 return sprintf(buffer, "%u\n", mk->mod->core_size);
1094 static struct module_attribute modinfo_coresize =
1095 __ATTR(coresize, 0444, show_coresize, NULL);
1097 static ssize_t show_initsize(struct module_attribute *mattr,
1098 struct module_kobject *mk, char *buffer)
1100 return sprintf(buffer, "%u\n", mk->mod->init_size);
1103 static struct module_attribute modinfo_initsize =
1104 __ATTR(initsize, 0444, show_initsize, NULL);
1106 static ssize_t show_taint(struct module_attribute *mattr,
1107 struct module_kobject *mk, char *buffer)
1109 size_t l;
1111 l = module_flags_taint(mk->mod, buffer);
1112 buffer[l++] = '\n';
1113 return l;
1116 static struct module_attribute modinfo_taint =
1117 __ATTR(taint, 0444, show_taint, NULL);
1119 static struct module_attribute *modinfo_attrs[] = {
1120 &module_uevent,
1121 &modinfo_version,
1122 &modinfo_srcversion,
1123 &modinfo_initstate,
1124 &modinfo_coresize,
1125 &modinfo_initsize,
1126 &modinfo_taint,
1127 #ifdef CONFIG_MODULE_UNLOAD
1128 &modinfo_refcnt,
1129 #endif
1130 NULL,
1133 static const char vermagic[] = VERMAGIC_STRING;
1135 static int try_to_force_load(struct module *mod, const char *reason)
1137 #ifdef CONFIG_MODULE_FORCE_LOAD
1138 if (!test_taint(TAINT_FORCED_MODULE))
1139 printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1140 mod->name, reason);
1141 add_taint_module(mod, TAINT_FORCED_MODULE);
1142 return 0;
1143 #else
1144 return -ENOEXEC;
1145 #endif
1148 #ifdef CONFIG_MODVERSIONS
1149 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1150 static unsigned long maybe_relocated(unsigned long crc,
1151 const struct module *crc_owner)
1153 #ifdef ARCH_RELOCATES_KCRCTAB
1154 if (crc_owner == NULL)
1155 return crc - (unsigned long)reloc_start;
1156 #endif
1157 return crc;
1160 static int check_version(Elf_Shdr *sechdrs,
1161 unsigned int versindex,
1162 const char *symname,
1163 struct module *mod,
1164 const unsigned long *crc,
1165 const struct module *crc_owner)
1167 unsigned int i, num_versions;
1168 struct modversion_info *versions;
1170 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1171 if (!crc)
1172 return 1;
1174 /* No versions at all? modprobe --force does this. */
1175 if (versindex == 0)
1176 return try_to_force_load(mod, symname) == 0;
1178 versions = (void *) sechdrs[versindex].sh_addr;
1179 num_versions = sechdrs[versindex].sh_size
1180 / sizeof(struct modversion_info);
1182 for (i = 0; i < num_versions; i++) {
1183 if (strcmp(versions[i].name, symname) != 0)
1184 continue;
1186 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1187 return 1;
1188 pr_debug("Found checksum %lX vs module %lX\n",
1189 maybe_relocated(*crc, crc_owner), versions[i].crc);
1190 goto bad_version;
1193 printk(KERN_WARNING "%s: no symbol version for %s\n",
1194 mod->name, symname);
1195 return 0;
1197 bad_version:
1198 printk("%s: disagrees about version of symbol %s\n",
1199 mod->name, symname);
1200 return 0;
1203 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1204 unsigned int versindex,
1205 struct module *mod)
1207 const unsigned long *crc;
1209 /* Since this should be found in kernel (which can't be removed),
1210 * no locking is necessary. */
1211 if (!find_symbol(MODULE_SYMBOL_PREFIX "module_layout", NULL,
1212 &crc, true, false))
1213 BUG();
1214 return check_version(sechdrs, versindex, "module_layout", mod, crc,
1215 NULL);
1218 /* First part is kernel version, which we ignore if module has crcs. */
1219 static inline int same_magic(const char *amagic, const char *bmagic,
1220 bool has_crcs)
1222 if (has_crcs) {
1223 amagic += strcspn(amagic, " ");
1224 bmagic += strcspn(bmagic, " ");
1226 return strcmp(amagic, bmagic) == 0;
1228 #else
1229 static inline int check_version(Elf_Shdr *sechdrs,
1230 unsigned int versindex,
1231 const char *symname,
1232 struct module *mod,
1233 const unsigned long *crc,
1234 const struct module *crc_owner)
1236 return 1;
1239 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1240 unsigned int versindex,
1241 struct module *mod)
1243 return 1;
1246 static inline int same_magic(const char *amagic, const char *bmagic,
1247 bool has_crcs)
1249 return strcmp(amagic, bmagic) == 0;
1251 #endif /* CONFIG_MODVERSIONS */
1253 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1254 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1255 const struct load_info *info,
1256 const char *name,
1257 char ownername[])
1259 struct module *owner;
1260 const struct kernel_symbol *sym;
1261 const unsigned long *crc;
1262 int err;
1264 mutex_lock(&module_mutex);
1265 sym = find_symbol(name, &owner, &crc,
1266 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1267 if (!sym)
1268 goto unlock;
1270 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1271 owner)) {
1272 sym = ERR_PTR(-EINVAL);
1273 goto getname;
1276 err = ref_module(mod, owner);
1277 if (err) {
1278 sym = ERR_PTR(err);
1279 goto getname;
1282 getname:
1283 /* We must make copy under the lock if we failed to get ref. */
1284 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1285 unlock:
1286 mutex_unlock(&module_mutex);
1287 return sym;
1290 static const struct kernel_symbol *
1291 resolve_symbol_wait(struct module *mod,
1292 const struct load_info *info,
1293 const char *name)
1295 const struct kernel_symbol *ksym;
1296 char owner[MODULE_NAME_LEN];
1298 if (wait_event_interruptible_timeout(module_wq,
1299 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1300 || PTR_ERR(ksym) != -EBUSY,
1301 30 * HZ) <= 0) {
1302 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1303 mod->name, owner);
1305 return ksym;
1309 * /sys/module/foo/sections stuff
1310 * J. Corbet <corbet@lwn.net>
1312 #ifdef CONFIG_SYSFS
1314 #ifdef CONFIG_KALLSYMS
1315 static inline bool sect_empty(const Elf_Shdr *sect)
1317 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1320 struct module_sect_attr
1322 struct module_attribute mattr;
1323 char *name;
1324 unsigned long address;
1327 struct module_sect_attrs
1329 struct attribute_group grp;
1330 unsigned int nsections;
1331 struct module_sect_attr attrs[0];
1334 static ssize_t module_sect_show(struct module_attribute *mattr,
1335 struct module_kobject *mk, char *buf)
1337 struct module_sect_attr *sattr =
1338 container_of(mattr, struct module_sect_attr, mattr);
1339 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1342 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1344 unsigned int section;
1346 for (section = 0; section < sect_attrs->nsections; section++)
1347 kfree(sect_attrs->attrs[section].name);
1348 kfree(sect_attrs);
1351 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1353 unsigned int nloaded = 0, i, size[2];
1354 struct module_sect_attrs *sect_attrs;
1355 struct module_sect_attr *sattr;
1356 struct attribute **gattr;
1358 /* Count loaded sections and allocate structures */
1359 for (i = 0; i < info->hdr->e_shnum; i++)
1360 if (!sect_empty(&info->sechdrs[i]))
1361 nloaded++;
1362 size[0] = ALIGN(sizeof(*sect_attrs)
1363 + nloaded * sizeof(sect_attrs->attrs[0]),
1364 sizeof(sect_attrs->grp.attrs[0]));
1365 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1366 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1367 if (sect_attrs == NULL)
1368 return;
1370 /* Setup section attributes. */
1371 sect_attrs->grp.name = "sections";
1372 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1374 sect_attrs->nsections = 0;
1375 sattr = &sect_attrs->attrs[0];
1376 gattr = &sect_attrs->grp.attrs[0];
1377 for (i = 0; i < info->hdr->e_shnum; i++) {
1378 Elf_Shdr *sec = &info->sechdrs[i];
1379 if (sect_empty(sec))
1380 continue;
1381 sattr->address = sec->sh_addr;
1382 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1383 GFP_KERNEL);
1384 if (sattr->name == NULL)
1385 goto out;
1386 sect_attrs->nsections++;
1387 sysfs_attr_init(&sattr->mattr.attr);
1388 sattr->mattr.show = module_sect_show;
1389 sattr->mattr.store = NULL;
1390 sattr->mattr.attr.name = sattr->name;
1391 sattr->mattr.attr.mode = S_IRUGO;
1392 *(gattr++) = &(sattr++)->mattr.attr;
1394 *gattr = NULL;
1396 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1397 goto out;
1399 mod->sect_attrs = sect_attrs;
1400 return;
1401 out:
1402 free_sect_attrs(sect_attrs);
1405 static void remove_sect_attrs(struct module *mod)
1407 if (mod->sect_attrs) {
1408 sysfs_remove_group(&mod->mkobj.kobj,
1409 &mod->sect_attrs->grp);
1410 /* We are positive that no one is using any sect attrs
1411 * at this point. Deallocate immediately. */
1412 free_sect_attrs(mod->sect_attrs);
1413 mod->sect_attrs = NULL;
1418 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1421 struct module_notes_attrs {
1422 struct kobject *dir;
1423 unsigned int notes;
1424 struct bin_attribute attrs[0];
1427 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1428 struct bin_attribute *bin_attr,
1429 char *buf, loff_t pos, size_t count)
1432 * The caller checked the pos and count against our size.
1434 memcpy(buf, bin_attr->private + pos, count);
1435 return count;
1438 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1439 unsigned int i)
1441 if (notes_attrs->dir) {
1442 while (i-- > 0)
1443 sysfs_remove_bin_file(notes_attrs->dir,
1444 &notes_attrs->attrs[i]);
1445 kobject_put(notes_attrs->dir);
1447 kfree(notes_attrs);
1450 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1452 unsigned int notes, loaded, i;
1453 struct module_notes_attrs *notes_attrs;
1454 struct bin_attribute *nattr;
1456 /* failed to create section attributes, so can't create notes */
1457 if (!mod->sect_attrs)
1458 return;
1460 /* Count notes sections and allocate structures. */
1461 notes = 0;
1462 for (i = 0; i < info->hdr->e_shnum; i++)
1463 if (!sect_empty(&info->sechdrs[i]) &&
1464 (info->sechdrs[i].sh_type == SHT_NOTE))
1465 ++notes;
1467 if (notes == 0)
1468 return;
1470 notes_attrs = kzalloc(sizeof(*notes_attrs)
1471 + notes * sizeof(notes_attrs->attrs[0]),
1472 GFP_KERNEL);
1473 if (notes_attrs == NULL)
1474 return;
1476 notes_attrs->notes = notes;
1477 nattr = &notes_attrs->attrs[0];
1478 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1479 if (sect_empty(&info->sechdrs[i]))
1480 continue;
1481 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1482 sysfs_bin_attr_init(nattr);
1483 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1484 nattr->attr.mode = S_IRUGO;
1485 nattr->size = info->sechdrs[i].sh_size;
1486 nattr->private = (void *) info->sechdrs[i].sh_addr;
1487 nattr->read = module_notes_read;
1488 ++nattr;
1490 ++loaded;
1493 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1494 if (!notes_attrs->dir)
1495 goto out;
1497 for (i = 0; i < notes; ++i)
1498 if (sysfs_create_bin_file(notes_attrs->dir,
1499 &notes_attrs->attrs[i]))
1500 goto out;
1502 mod->notes_attrs = notes_attrs;
1503 return;
1505 out:
1506 free_notes_attrs(notes_attrs, i);
1509 static void remove_notes_attrs(struct module *mod)
1511 if (mod->notes_attrs)
1512 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1515 #else
1517 static inline void add_sect_attrs(struct module *mod,
1518 const struct load_info *info)
1522 static inline void remove_sect_attrs(struct module *mod)
1526 static inline void add_notes_attrs(struct module *mod,
1527 const struct load_info *info)
1531 static inline void remove_notes_attrs(struct module *mod)
1534 #endif /* CONFIG_KALLSYMS */
1536 static void add_usage_links(struct module *mod)
1538 #ifdef CONFIG_MODULE_UNLOAD
1539 struct module_use *use;
1540 int nowarn;
1542 mutex_lock(&module_mutex);
1543 list_for_each_entry(use, &mod->target_list, target_list) {
1544 nowarn = sysfs_create_link(use->target->holders_dir,
1545 &mod->mkobj.kobj, mod->name);
1547 mutex_unlock(&module_mutex);
1548 #endif
1551 static void del_usage_links(struct module *mod)
1553 #ifdef CONFIG_MODULE_UNLOAD
1554 struct module_use *use;
1556 mutex_lock(&module_mutex);
1557 list_for_each_entry(use, &mod->target_list, target_list)
1558 sysfs_remove_link(use->target->holders_dir, mod->name);
1559 mutex_unlock(&module_mutex);
1560 #endif
1563 static int module_add_modinfo_attrs(struct module *mod)
1565 struct module_attribute *attr;
1566 struct module_attribute *temp_attr;
1567 int error = 0;
1568 int i;
1570 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1571 (ARRAY_SIZE(modinfo_attrs) + 1)),
1572 GFP_KERNEL);
1573 if (!mod->modinfo_attrs)
1574 return -ENOMEM;
1576 temp_attr = mod->modinfo_attrs;
1577 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1578 if (!attr->test ||
1579 (attr->test && attr->test(mod))) {
1580 memcpy(temp_attr, attr, sizeof(*temp_attr));
1581 sysfs_attr_init(&temp_attr->attr);
1582 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1583 ++temp_attr;
1586 return error;
1589 static void module_remove_modinfo_attrs(struct module *mod)
1591 struct module_attribute *attr;
1592 int i;
1594 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1595 /* pick a field to test for end of list */
1596 if (!attr->attr.name)
1597 break;
1598 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1599 if (attr->free)
1600 attr->free(mod);
1602 kfree(mod->modinfo_attrs);
1605 static int mod_sysfs_init(struct module *mod)
1607 int err;
1608 struct kobject *kobj;
1610 if (!module_sysfs_initialized) {
1611 printk(KERN_ERR "%s: module sysfs not initialized\n",
1612 mod->name);
1613 err = -EINVAL;
1614 goto out;
1617 kobj = kset_find_obj(module_kset, mod->name);
1618 if (kobj) {
1619 printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1620 kobject_put(kobj);
1621 err = -EINVAL;
1622 goto out;
1625 mod->mkobj.mod = mod;
1627 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1628 mod->mkobj.kobj.kset = module_kset;
1629 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1630 "%s", mod->name);
1631 if (err)
1632 kobject_put(&mod->mkobj.kobj);
1634 /* delay uevent until full sysfs population */
1635 out:
1636 return err;
1639 static int mod_sysfs_setup(struct module *mod,
1640 const struct load_info *info,
1641 struct kernel_param *kparam,
1642 unsigned int num_params)
1644 int err;
1646 err = mod_sysfs_init(mod);
1647 if (err)
1648 goto out;
1650 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1651 if (!mod->holders_dir) {
1652 err = -ENOMEM;
1653 goto out_unreg;
1656 err = module_param_sysfs_setup(mod, kparam, num_params);
1657 if (err)
1658 goto out_unreg_holders;
1660 err = module_add_modinfo_attrs(mod);
1661 if (err)
1662 goto out_unreg_param;
1664 add_usage_links(mod);
1665 add_sect_attrs(mod, info);
1666 add_notes_attrs(mod, info);
1668 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1669 return 0;
1671 out_unreg_param:
1672 module_param_sysfs_remove(mod);
1673 out_unreg_holders:
1674 kobject_put(mod->holders_dir);
1675 out_unreg:
1676 kobject_put(&mod->mkobj.kobj);
1677 out:
1678 return err;
1681 static void mod_sysfs_fini(struct module *mod)
1683 remove_notes_attrs(mod);
1684 remove_sect_attrs(mod);
1685 kobject_put(&mod->mkobj.kobj);
1688 #else /* !CONFIG_SYSFS */
1690 static int mod_sysfs_setup(struct module *mod,
1691 const struct load_info *info,
1692 struct kernel_param *kparam,
1693 unsigned int num_params)
1695 return 0;
1698 static void mod_sysfs_fini(struct module *mod)
1702 static void module_remove_modinfo_attrs(struct module *mod)
1706 static void del_usage_links(struct module *mod)
1710 #endif /* CONFIG_SYSFS */
1712 static void mod_sysfs_teardown(struct module *mod)
1714 del_usage_links(mod);
1715 module_remove_modinfo_attrs(mod);
1716 module_param_sysfs_remove(mod);
1717 kobject_put(mod->mkobj.drivers_dir);
1718 kobject_put(mod->holders_dir);
1719 mod_sysfs_fini(mod);
1723 * unlink the module with the whole machine is stopped with interrupts off
1724 * - this defends against kallsyms not taking locks
1726 static int __unlink_module(void *_mod)
1728 struct module *mod = _mod;
1729 list_del(&mod->list);
1730 module_bug_cleanup(mod);
1731 return 0;
1734 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1736 * LKM RO/NX protection: protect module's text/ro-data
1737 * from modification and any data from execution.
1739 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1741 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1742 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1744 if (end_pfn > begin_pfn)
1745 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1748 static void set_section_ro_nx(void *base,
1749 unsigned long text_size,
1750 unsigned long ro_size,
1751 unsigned long total_size)
1753 /* begin and end PFNs of the current subsection */
1754 unsigned long begin_pfn;
1755 unsigned long end_pfn;
1758 * Set RO for module text and RO-data:
1759 * - Always protect first page.
1760 * - Do not protect last partial page.
1762 if (ro_size > 0)
1763 set_page_attributes(base, base + ro_size, set_memory_ro);
1766 * Set NX permissions for module data:
1767 * - Do not protect first partial page.
1768 * - Always protect last page.
1770 if (total_size > text_size) {
1771 begin_pfn = PFN_UP((unsigned long)base + text_size);
1772 end_pfn = PFN_UP((unsigned long)base + total_size);
1773 if (end_pfn > begin_pfn)
1774 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1778 static void unset_module_core_ro_nx(struct module *mod)
1780 set_page_attributes(mod->module_core + mod->core_text_size,
1781 mod->module_core + mod->core_size,
1782 set_memory_x);
1783 set_page_attributes(mod->module_core,
1784 mod->module_core + mod->core_ro_size,
1785 set_memory_rw);
1788 static void unset_module_init_ro_nx(struct module *mod)
1790 set_page_attributes(mod->module_init + mod->init_text_size,
1791 mod->module_init + mod->init_size,
1792 set_memory_x);
1793 set_page_attributes(mod->module_init,
1794 mod->module_init + mod->init_ro_size,
1795 set_memory_rw);
1798 /* Iterate through all modules and set each module's text as RW */
1799 void set_all_modules_text_rw(void)
1801 struct module *mod;
1803 mutex_lock(&module_mutex);
1804 list_for_each_entry_rcu(mod, &modules, list) {
1805 if (mod->state == MODULE_STATE_UNFORMED)
1806 continue;
1807 if ((mod->module_core) && (mod->core_text_size)) {
1808 set_page_attributes(mod->module_core,
1809 mod->module_core + mod->core_text_size,
1810 set_memory_rw);
1812 if ((mod->module_init) && (mod->init_text_size)) {
1813 set_page_attributes(mod->module_init,
1814 mod->module_init + mod->init_text_size,
1815 set_memory_rw);
1818 mutex_unlock(&module_mutex);
1821 /* Iterate through all modules and set each module's text as RO */
1822 void set_all_modules_text_ro(void)
1824 struct module *mod;
1826 mutex_lock(&module_mutex);
1827 list_for_each_entry_rcu(mod, &modules, list) {
1828 if (mod->state == MODULE_STATE_UNFORMED)
1829 continue;
1830 if ((mod->module_core) && (mod->core_text_size)) {
1831 set_page_attributes(mod->module_core,
1832 mod->module_core + mod->core_text_size,
1833 set_memory_ro);
1835 if ((mod->module_init) && (mod->init_text_size)) {
1836 set_page_attributes(mod->module_init,
1837 mod->module_init + mod->init_text_size,
1838 set_memory_ro);
1841 mutex_unlock(&module_mutex);
1843 #else
1844 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1845 static void unset_module_core_ro_nx(struct module *mod) { }
1846 static void unset_module_init_ro_nx(struct module *mod) { }
1847 #endif
1849 void __weak module_free(struct module *mod, void *module_region)
1851 vfree(module_region);
1854 void __weak module_arch_cleanup(struct module *mod)
1858 /* Free a module, remove from lists, etc. */
1859 static void free_module(struct module *mod)
1861 trace_module_free(mod);
1863 /* Delete from various lists */
1864 mutex_lock(&module_mutex);
1865 stop_machine(__unlink_module, mod, NULL);
1866 mutex_unlock(&module_mutex);
1867 mod_sysfs_teardown(mod);
1869 /* Remove dynamic debug info */
1870 ddebug_remove_module(mod->name);
1872 /* Arch-specific cleanup. */
1873 module_arch_cleanup(mod);
1875 /* Module unload stuff */
1876 module_unload_free(mod);
1878 /* Free any allocated parameters. */
1879 destroy_params(mod->kp, mod->num_kp);
1881 /* This may be NULL, but that's OK */
1882 unset_module_init_ro_nx(mod);
1883 module_free(mod, mod->module_init);
1884 kfree(mod->args);
1885 percpu_modfree(mod);
1887 /* Free lock-classes: */
1888 lockdep_free_key_range(mod->module_core, mod->core_size);
1890 /* Finally, free the core (containing the module structure) */
1891 unset_module_core_ro_nx(mod);
1892 module_free(mod, mod->module_core);
1894 #ifdef CONFIG_MPU
1895 update_protections(current->mm);
1896 #endif
1899 void *__symbol_get(const char *symbol)
1901 struct module *owner;
1902 const struct kernel_symbol *sym;
1904 preempt_disable();
1905 sym = find_symbol(symbol, &owner, NULL, true, true);
1906 if (sym && strong_try_module_get(owner))
1907 sym = NULL;
1908 preempt_enable();
1910 return sym ? (void *)sym->value : NULL;
1912 EXPORT_SYMBOL_GPL(__symbol_get);
1915 * Ensure that an exported symbol [global namespace] does not already exist
1916 * in the kernel or in some other module's exported symbol table.
1918 * You must hold the module_mutex.
1920 static int verify_export_symbols(struct module *mod)
1922 unsigned int i;
1923 struct module *owner;
1924 const struct kernel_symbol *s;
1925 struct {
1926 const struct kernel_symbol *sym;
1927 unsigned int num;
1928 } arr[] = {
1929 { mod->syms, mod->num_syms },
1930 { mod->gpl_syms, mod->num_gpl_syms },
1931 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1932 #ifdef CONFIG_UNUSED_SYMBOLS
1933 { mod->unused_syms, mod->num_unused_syms },
1934 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1935 #endif
1938 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1939 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1940 if (find_symbol(s->name, &owner, NULL, true, false)) {
1941 printk(KERN_ERR
1942 "%s: exports duplicate symbol %s"
1943 " (owned by %s)\n",
1944 mod->name, s->name, module_name(owner));
1945 return -ENOEXEC;
1949 return 0;
1952 /* Change all symbols so that st_value encodes the pointer directly. */
1953 static int simplify_symbols(struct module *mod, const struct load_info *info)
1955 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1956 Elf_Sym *sym = (void *)symsec->sh_addr;
1957 unsigned long secbase;
1958 unsigned int i;
1959 int ret = 0;
1960 const struct kernel_symbol *ksym;
1962 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1963 const char *name = info->strtab + sym[i].st_name;
1965 switch (sym[i].st_shndx) {
1966 case SHN_COMMON:
1967 /* We compiled with -fno-common. These are not
1968 supposed to happen. */
1969 pr_debug("Common symbol: %s\n", name);
1970 printk("%s: please compile with -fno-common\n",
1971 mod->name);
1972 ret = -ENOEXEC;
1973 break;
1975 case SHN_ABS:
1976 /* Don't need to do anything */
1977 pr_debug("Absolute symbol: 0x%08lx\n",
1978 (long)sym[i].st_value);
1979 break;
1981 case SHN_UNDEF:
1982 ksym = resolve_symbol_wait(mod, info, name);
1983 /* Ok if resolved. */
1984 if (ksym && !IS_ERR(ksym)) {
1985 sym[i].st_value = ksym->value;
1986 break;
1989 /* Ok if weak. */
1990 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
1991 break;
1993 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
1994 mod->name, name, PTR_ERR(ksym));
1995 ret = PTR_ERR(ksym) ?: -ENOENT;
1996 break;
1998 default:
1999 /* Divert to percpu allocation if a percpu var. */
2000 if (sym[i].st_shndx == info->index.pcpu)
2001 secbase = (unsigned long)mod_percpu(mod);
2002 else
2003 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2004 sym[i].st_value += secbase;
2005 break;
2009 return ret;
2012 static int apply_relocations(struct module *mod, const struct load_info *info)
2014 unsigned int i;
2015 int err = 0;
2017 /* Now do relocations. */
2018 for (i = 1; i < info->hdr->e_shnum; i++) {
2019 unsigned int infosec = info->sechdrs[i].sh_info;
2021 /* Not a valid relocation section? */
2022 if (infosec >= info->hdr->e_shnum)
2023 continue;
2025 /* Don't bother with non-allocated sections */
2026 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2027 continue;
2029 if (info->sechdrs[i].sh_type == SHT_REL)
2030 err = apply_relocate(info->sechdrs, info->strtab,
2031 info->index.sym, i, mod);
2032 else if (info->sechdrs[i].sh_type == SHT_RELA)
2033 err = apply_relocate_add(info->sechdrs, info->strtab,
2034 info->index.sym, i, mod);
2035 if (err < 0)
2036 break;
2038 return err;
2041 /* Additional bytes needed by arch in front of individual sections */
2042 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2043 unsigned int section)
2045 /* default implementation just returns zero */
2046 return 0;
2049 /* Update size with this section: return offset. */
2050 static long get_offset(struct module *mod, unsigned int *size,
2051 Elf_Shdr *sechdr, unsigned int section)
2053 long ret;
2055 *size += arch_mod_section_prepend(mod, section);
2056 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2057 *size = ret + sechdr->sh_size;
2058 return ret;
2061 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2062 might -- code, read-only data, read-write data, small data. Tally
2063 sizes, and place the offsets into sh_entsize fields: high bit means it
2064 belongs in init. */
2065 static void layout_sections(struct module *mod, struct load_info *info)
2067 static unsigned long const masks[][2] = {
2068 /* NOTE: all executable code must be the first section
2069 * in this array; otherwise modify the text_size
2070 * finder in the two loops below */
2071 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2072 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2073 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2074 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2076 unsigned int m, i;
2078 for (i = 0; i < info->hdr->e_shnum; i++)
2079 info->sechdrs[i].sh_entsize = ~0UL;
2081 pr_debug("Core section allocation order:\n");
2082 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2083 for (i = 0; i < info->hdr->e_shnum; ++i) {
2084 Elf_Shdr *s = &info->sechdrs[i];
2085 const char *sname = info->secstrings + s->sh_name;
2087 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2088 || (s->sh_flags & masks[m][1])
2089 || s->sh_entsize != ~0UL
2090 || strstarts(sname, ".init"))
2091 continue;
2092 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2093 pr_debug("\t%s\n", sname);
2095 switch (m) {
2096 case 0: /* executable */
2097 mod->core_size = debug_align(mod->core_size);
2098 mod->core_text_size = mod->core_size;
2099 break;
2100 case 1: /* RO: text and ro-data */
2101 mod->core_size = debug_align(mod->core_size);
2102 mod->core_ro_size = mod->core_size;
2103 break;
2104 case 3: /* whole core */
2105 mod->core_size = debug_align(mod->core_size);
2106 break;
2110 pr_debug("Init section allocation order:\n");
2111 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2112 for (i = 0; i < info->hdr->e_shnum; ++i) {
2113 Elf_Shdr *s = &info->sechdrs[i];
2114 const char *sname = info->secstrings + s->sh_name;
2116 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2117 || (s->sh_flags & masks[m][1])
2118 || s->sh_entsize != ~0UL
2119 || !strstarts(sname, ".init"))
2120 continue;
2121 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2122 | INIT_OFFSET_MASK);
2123 pr_debug("\t%s\n", sname);
2125 switch (m) {
2126 case 0: /* executable */
2127 mod->init_size = debug_align(mod->init_size);
2128 mod->init_text_size = mod->init_size;
2129 break;
2130 case 1: /* RO: text and ro-data */
2131 mod->init_size = debug_align(mod->init_size);
2132 mod->init_ro_size = mod->init_size;
2133 break;
2134 case 3: /* whole init */
2135 mod->init_size = debug_align(mod->init_size);
2136 break;
2141 static void set_license(struct module *mod, const char *license)
2143 if (!license)
2144 license = "unspecified";
2146 if (!license_is_gpl_compatible(license)) {
2147 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2148 printk(KERN_WARNING "%s: module license '%s' taints "
2149 "kernel.\n", mod->name, license);
2150 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2154 /* Parse tag=value strings from .modinfo section */
2155 static char *next_string(char *string, unsigned long *secsize)
2157 /* Skip non-zero chars */
2158 while (string[0]) {
2159 string++;
2160 if ((*secsize)-- <= 1)
2161 return NULL;
2164 /* Skip any zero padding. */
2165 while (!string[0]) {
2166 string++;
2167 if ((*secsize)-- <= 1)
2168 return NULL;
2170 return string;
2173 static char *get_modinfo(struct load_info *info, const char *tag)
2175 char *p;
2176 unsigned int taglen = strlen(tag);
2177 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2178 unsigned long size = infosec->sh_size;
2180 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2181 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2182 return p + taglen + 1;
2184 return NULL;
2187 static void setup_modinfo(struct module *mod, struct load_info *info)
2189 struct module_attribute *attr;
2190 int i;
2192 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2193 if (attr->setup)
2194 attr->setup(mod, get_modinfo(info, attr->attr.name));
2198 static void free_modinfo(struct module *mod)
2200 struct module_attribute *attr;
2201 int i;
2203 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2204 if (attr->free)
2205 attr->free(mod);
2209 #ifdef CONFIG_KALLSYMS
2211 /* lookup symbol in given range of kernel_symbols */
2212 static const struct kernel_symbol *lookup_symbol(const char *name,
2213 const struct kernel_symbol *start,
2214 const struct kernel_symbol *stop)
2216 return bsearch(name, start, stop - start,
2217 sizeof(struct kernel_symbol), cmp_name);
2220 static int is_exported(const char *name, unsigned long value,
2221 const struct module *mod)
2223 const struct kernel_symbol *ks;
2224 if (!mod)
2225 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2226 else
2227 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2228 return ks != NULL && ks->value == value;
2231 /* As per nm */
2232 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2234 const Elf_Shdr *sechdrs = info->sechdrs;
2236 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2237 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2238 return 'v';
2239 else
2240 return 'w';
2242 if (sym->st_shndx == SHN_UNDEF)
2243 return 'U';
2244 if (sym->st_shndx == SHN_ABS)
2245 return 'a';
2246 if (sym->st_shndx >= SHN_LORESERVE)
2247 return '?';
2248 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2249 return 't';
2250 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2251 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2252 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2253 return 'r';
2254 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2255 return 'g';
2256 else
2257 return 'd';
2259 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2260 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2261 return 's';
2262 else
2263 return 'b';
2265 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2266 ".debug")) {
2267 return 'n';
2269 return '?';
2272 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2273 unsigned int shnum)
2275 const Elf_Shdr *sec;
2277 if (src->st_shndx == SHN_UNDEF
2278 || src->st_shndx >= shnum
2279 || !src->st_name)
2280 return false;
2282 sec = sechdrs + src->st_shndx;
2283 if (!(sec->sh_flags & SHF_ALLOC)
2284 #ifndef CONFIG_KALLSYMS_ALL
2285 || !(sec->sh_flags & SHF_EXECINSTR)
2286 #endif
2287 || (sec->sh_entsize & INIT_OFFSET_MASK))
2288 return false;
2290 return true;
2294 * We only allocate and copy the strings needed by the parts of symtab
2295 * we keep. This is simple, but has the effect of making multiple
2296 * copies of duplicates. We could be more sophisticated, see
2297 * linux-kernel thread starting with
2298 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2300 static void layout_symtab(struct module *mod, struct load_info *info)
2302 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2303 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2304 const Elf_Sym *src;
2305 unsigned int i, nsrc, ndst, strtab_size = 0;
2307 /* Put symbol section at end of init part of module. */
2308 symsect->sh_flags |= SHF_ALLOC;
2309 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2310 info->index.sym) | INIT_OFFSET_MASK;
2311 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2313 src = (void *)info->hdr + symsect->sh_offset;
2314 nsrc = symsect->sh_size / sizeof(*src);
2316 /* Compute total space required for the core symbols' strtab. */
2317 for (ndst = i = 0; i < nsrc; i++) {
2318 if (i == 0 ||
2319 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2320 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2321 ndst++;
2325 /* Append room for core symbols at end of core part. */
2326 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2327 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2328 mod->core_size += strtab_size;
2330 /* Put string table section at end of init part of module. */
2331 strsect->sh_flags |= SHF_ALLOC;
2332 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2333 info->index.str) | INIT_OFFSET_MASK;
2334 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2337 static void add_kallsyms(struct module *mod, const struct load_info *info)
2339 unsigned int i, ndst;
2340 const Elf_Sym *src;
2341 Elf_Sym *dst;
2342 char *s;
2343 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2345 mod->symtab = (void *)symsec->sh_addr;
2346 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2347 /* Make sure we get permanent strtab: don't use info->strtab. */
2348 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2350 /* Set types up while we still have access to sections. */
2351 for (i = 0; i < mod->num_symtab; i++)
2352 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2354 mod->core_symtab = dst = mod->module_core + info->symoffs;
2355 mod->core_strtab = s = mod->module_core + info->stroffs;
2356 src = mod->symtab;
2357 for (ndst = i = 0; i < mod->num_symtab; i++) {
2358 if (i == 0 ||
2359 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2360 dst[ndst] = src[i];
2361 dst[ndst++].st_name = s - mod->core_strtab;
2362 s += strlcpy(s, &mod->strtab[src[i].st_name],
2363 KSYM_NAME_LEN) + 1;
2366 mod->core_num_syms = ndst;
2368 #else
2369 static inline void layout_symtab(struct module *mod, struct load_info *info)
2373 static void add_kallsyms(struct module *mod, const struct load_info *info)
2376 #endif /* CONFIG_KALLSYMS */
2378 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2380 if (!debug)
2381 return;
2382 #ifdef CONFIG_DYNAMIC_DEBUG
2383 if (ddebug_add_module(debug, num, debug->modname))
2384 printk(KERN_ERR "dynamic debug error adding module: %s\n",
2385 debug->modname);
2386 #endif
2389 static void dynamic_debug_remove(struct _ddebug *debug)
2391 if (debug)
2392 ddebug_remove_module(debug->modname);
2395 void * __weak module_alloc(unsigned long size)
2397 return vmalloc_exec(size);
2400 static void *module_alloc_update_bounds(unsigned long size)
2402 void *ret = module_alloc(size);
2404 if (ret) {
2405 mutex_lock(&module_mutex);
2406 /* Update module bounds. */
2407 if ((unsigned long)ret < module_addr_min)
2408 module_addr_min = (unsigned long)ret;
2409 if ((unsigned long)ret + size > module_addr_max)
2410 module_addr_max = (unsigned long)ret + size;
2411 mutex_unlock(&module_mutex);
2413 return ret;
2416 #ifdef CONFIG_DEBUG_KMEMLEAK
2417 static void kmemleak_load_module(const struct module *mod,
2418 const struct load_info *info)
2420 unsigned int i;
2422 /* only scan the sections containing data */
2423 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2425 for (i = 1; i < info->hdr->e_shnum; i++) {
2426 const char *name = info->secstrings + info->sechdrs[i].sh_name;
2427 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC))
2428 continue;
2429 if (!strstarts(name, ".data") && !strstarts(name, ".bss"))
2430 continue;
2432 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2433 info->sechdrs[i].sh_size, GFP_KERNEL);
2436 #else
2437 static inline void kmemleak_load_module(const struct module *mod,
2438 const struct load_info *info)
2441 #endif
2443 #ifdef CONFIG_MODULE_SIG
2444 static int module_sig_check(struct load_info *info)
2446 int err = -ENOKEY;
2447 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2448 const void *mod = info->hdr;
2450 if (info->len > markerlen &&
2451 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2452 /* We truncate the module to discard the signature */
2453 info->len -= markerlen;
2454 err = mod_verify_sig(mod, &info->len);
2457 if (!err) {
2458 info->sig_ok = true;
2459 return 0;
2462 /* Not having a signature is only an error if we're strict. */
2463 if (err < 0 && fips_enabled)
2464 panic("Module verification failed with error %d in FIPS mode\n",
2465 err);
2466 if (err == -ENOKEY && !sig_enforce)
2467 err = 0;
2469 return err;
2471 #else /* !CONFIG_MODULE_SIG */
2472 static int module_sig_check(struct load_info *info)
2474 return 0;
2476 #endif /* !CONFIG_MODULE_SIG */
2478 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2479 static int elf_header_check(struct load_info *info)
2481 if (info->len < sizeof(*(info->hdr)))
2482 return -ENOEXEC;
2484 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2485 || info->hdr->e_type != ET_REL
2486 || !elf_check_arch(info->hdr)
2487 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2488 return -ENOEXEC;
2490 if (info->hdr->e_shoff >= info->len
2491 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2492 info->len - info->hdr->e_shoff))
2493 return -ENOEXEC;
2495 return 0;
2498 /* Sets info->hdr and info->len. */
2499 static int copy_module_from_user(const void __user *umod, unsigned long len,
2500 struct load_info *info)
2502 int err;
2504 info->len = len;
2505 if (info->len < sizeof(*(info->hdr)))
2506 return -ENOEXEC;
2508 err = security_kernel_module_from_file(NULL);
2509 if (err)
2510 return err;
2512 /* Suck in entire file: we'll want most of it. */
2513 info->hdr = vmalloc(info->len);
2514 if (!info->hdr)
2515 return -ENOMEM;
2517 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2518 vfree(info->hdr);
2519 return -EFAULT;
2522 return 0;
2525 /* Sets info->hdr and info->len. */
2526 static int copy_module_from_fd(int fd, struct load_info *info)
2528 struct file *file;
2529 int err;
2530 struct kstat stat;
2531 loff_t pos;
2532 ssize_t bytes = 0;
2534 file = fget(fd);
2535 if (!file)
2536 return -ENOEXEC;
2538 err = security_kernel_module_from_file(file);
2539 if (err)
2540 goto out;
2542 err = vfs_getattr(file->f_vfsmnt, file->f_dentry, &stat);
2543 if (err)
2544 goto out;
2546 if (stat.size > INT_MAX) {
2547 err = -EFBIG;
2548 goto out;
2551 /* Don't hand 0 to vmalloc, it whines. */
2552 if (stat.size == 0) {
2553 err = -EINVAL;
2554 goto out;
2557 info->hdr = vmalloc(stat.size);
2558 if (!info->hdr) {
2559 err = -ENOMEM;
2560 goto out;
2563 pos = 0;
2564 while (pos < stat.size) {
2565 bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
2566 stat.size - pos);
2567 if (bytes < 0) {
2568 vfree(info->hdr);
2569 err = bytes;
2570 goto out;
2572 if (bytes == 0)
2573 break;
2574 pos += bytes;
2576 info->len = pos;
2578 out:
2579 fput(file);
2580 return err;
2583 static void free_copy(struct load_info *info)
2585 vfree(info->hdr);
2588 static int rewrite_section_headers(struct load_info *info, int flags)
2590 unsigned int i;
2592 /* This should always be true, but let's be sure. */
2593 info->sechdrs[0].sh_addr = 0;
2595 for (i = 1; i < info->hdr->e_shnum; i++) {
2596 Elf_Shdr *shdr = &info->sechdrs[i];
2597 if (shdr->sh_type != SHT_NOBITS
2598 && info->len < shdr->sh_offset + shdr->sh_size) {
2599 printk(KERN_ERR "Module len %lu truncated\n",
2600 info->len);
2601 return -ENOEXEC;
2604 /* Mark all sections sh_addr with their address in the
2605 temporary image. */
2606 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2608 #ifndef CONFIG_MODULE_UNLOAD
2609 /* Don't load .exit sections */
2610 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2611 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2612 #endif
2615 /* Track but don't keep modinfo and version sections. */
2616 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2617 info->index.vers = 0; /* Pretend no __versions section! */
2618 else
2619 info->index.vers = find_sec(info, "__versions");
2620 info->index.info = find_sec(info, ".modinfo");
2621 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2622 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2623 return 0;
2627 * Set up our basic convenience variables (pointers to section headers,
2628 * search for module section index etc), and do some basic section
2629 * verification.
2631 * Return the temporary module pointer (we'll replace it with the final
2632 * one when we move the module sections around).
2634 static struct module *setup_load_info(struct load_info *info, int flags)
2636 unsigned int i;
2637 int err;
2638 struct module *mod;
2640 /* Set up the convenience variables */
2641 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2642 info->secstrings = (void *)info->hdr
2643 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2645 err = rewrite_section_headers(info, flags);
2646 if (err)
2647 return ERR_PTR(err);
2649 /* Find internal symbols and strings. */
2650 for (i = 1; i < info->hdr->e_shnum; i++) {
2651 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2652 info->index.sym = i;
2653 info->index.str = info->sechdrs[i].sh_link;
2654 info->strtab = (char *)info->hdr
2655 + info->sechdrs[info->index.str].sh_offset;
2656 break;
2660 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2661 if (!info->index.mod) {
2662 printk(KERN_WARNING "No module found in object\n");
2663 return ERR_PTR(-ENOEXEC);
2665 /* This is temporary: point mod into copy of data. */
2666 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2668 if (info->index.sym == 0) {
2669 printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2670 mod->name);
2671 return ERR_PTR(-ENOEXEC);
2674 info->index.pcpu = find_pcpusec(info);
2676 /* Check module struct version now, before we try to use module. */
2677 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2678 return ERR_PTR(-ENOEXEC);
2680 return mod;
2683 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2685 const char *modmagic = get_modinfo(info, "vermagic");
2686 int err;
2688 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2689 modmagic = NULL;
2691 /* This is allowed: modprobe --force will invalidate it. */
2692 if (!modmagic) {
2693 err = try_to_force_load(mod, "bad vermagic");
2694 if (err)
2695 return err;
2696 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2697 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2698 mod->name, modmagic, vermagic);
2699 return -ENOEXEC;
2702 if (!get_modinfo(info, "intree"))
2703 add_taint_module(mod, TAINT_OOT_MODULE);
2705 if (get_modinfo(info, "staging")) {
2706 add_taint_module(mod, TAINT_CRAP);
2707 printk(KERN_WARNING "%s: module is from the staging directory,"
2708 " the quality is unknown, you have been warned.\n",
2709 mod->name);
2712 /* Set up license info based on the info section */
2713 set_license(mod, get_modinfo(info, "license"));
2715 return 0;
2718 static void find_module_sections(struct module *mod, struct load_info *info)
2720 mod->kp = section_objs(info, "__param",
2721 sizeof(*mod->kp), &mod->num_kp);
2722 mod->syms = section_objs(info, "__ksymtab",
2723 sizeof(*mod->syms), &mod->num_syms);
2724 mod->crcs = section_addr(info, "__kcrctab");
2725 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2726 sizeof(*mod->gpl_syms),
2727 &mod->num_gpl_syms);
2728 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2729 mod->gpl_future_syms = section_objs(info,
2730 "__ksymtab_gpl_future",
2731 sizeof(*mod->gpl_future_syms),
2732 &mod->num_gpl_future_syms);
2733 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2735 #ifdef CONFIG_UNUSED_SYMBOLS
2736 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2737 sizeof(*mod->unused_syms),
2738 &mod->num_unused_syms);
2739 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2740 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2741 sizeof(*mod->unused_gpl_syms),
2742 &mod->num_unused_gpl_syms);
2743 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2744 #endif
2745 #ifdef CONFIG_CONSTRUCTORS
2746 mod->ctors = section_objs(info, ".ctors",
2747 sizeof(*mod->ctors), &mod->num_ctors);
2748 #endif
2750 #ifdef CONFIG_TRACEPOINTS
2751 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2752 sizeof(*mod->tracepoints_ptrs),
2753 &mod->num_tracepoints);
2754 #endif
2755 #ifdef HAVE_JUMP_LABEL
2756 mod->jump_entries = section_objs(info, "__jump_table",
2757 sizeof(*mod->jump_entries),
2758 &mod->num_jump_entries);
2759 #endif
2760 #ifdef CONFIG_EVENT_TRACING
2761 mod->trace_events = section_objs(info, "_ftrace_events",
2762 sizeof(*mod->trace_events),
2763 &mod->num_trace_events);
2765 * This section contains pointers to allocated objects in the trace
2766 * code and not scanning it leads to false positives.
2768 kmemleak_scan_area(mod->trace_events, sizeof(*mod->trace_events) *
2769 mod->num_trace_events, GFP_KERNEL);
2770 #endif
2771 #ifdef CONFIG_TRACING
2772 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2773 sizeof(*mod->trace_bprintk_fmt_start),
2774 &mod->num_trace_bprintk_fmt);
2776 * This section contains pointers to allocated objects in the trace
2777 * code and not scanning it leads to false positives.
2779 kmemleak_scan_area(mod->trace_bprintk_fmt_start,
2780 sizeof(*mod->trace_bprintk_fmt_start) *
2781 mod->num_trace_bprintk_fmt, GFP_KERNEL);
2782 #endif
2783 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2784 /* sechdrs[0].sh_size is always zero */
2785 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2786 sizeof(*mod->ftrace_callsites),
2787 &mod->num_ftrace_callsites);
2788 #endif
2790 mod->extable = section_objs(info, "__ex_table",
2791 sizeof(*mod->extable), &mod->num_exentries);
2793 if (section_addr(info, "__obsparm"))
2794 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2795 mod->name);
2797 info->debug = section_objs(info, "__verbose",
2798 sizeof(*info->debug), &info->num_debug);
2801 static int move_module(struct module *mod, struct load_info *info)
2803 int i;
2804 void *ptr;
2806 /* Do the allocs. */
2807 ptr = module_alloc_update_bounds(mod->core_size);
2809 * The pointer to this block is stored in the module structure
2810 * which is inside the block. Just mark it as not being a
2811 * leak.
2813 kmemleak_not_leak(ptr);
2814 if (!ptr)
2815 return -ENOMEM;
2817 memset(ptr, 0, mod->core_size);
2818 mod->module_core = ptr;
2820 if (mod->init_size) {
2821 ptr = module_alloc_update_bounds(mod->init_size);
2823 * The pointer to this block is stored in the module structure
2824 * which is inside the block. This block doesn't need to be
2825 * scanned as it contains data and code that will be freed
2826 * after the module is initialized.
2828 kmemleak_ignore(ptr);
2829 if (!ptr) {
2830 module_free(mod, mod->module_core);
2831 return -ENOMEM;
2833 memset(ptr, 0, mod->init_size);
2834 mod->module_init = ptr;
2835 } else
2836 mod->module_init = NULL;
2838 /* Transfer each section which specifies SHF_ALLOC */
2839 pr_debug("final section addresses:\n");
2840 for (i = 0; i < info->hdr->e_shnum; i++) {
2841 void *dest;
2842 Elf_Shdr *shdr = &info->sechdrs[i];
2844 if (!(shdr->sh_flags & SHF_ALLOC))
2845 continue;
2847 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2848 dest = mod->module_init
2849 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2850 else
2851 dest = mod->module_core + shdr->sh_entsize;
2853 if (shdr->sh_type != SHT_NOBITS)
2854 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2855 /* Update sh_addr to point to copy in image. */
2856 shdr->sh_addr = (unsigned long)dest;
2857 pr_debug("\t0x%lx %s\n",
2858 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2861 return 0;
2864 static int check_module_license_and_versions(struct module *mod)
2867 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2868 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2869 * using GPL-only symbols it needs.
2871 if (strcmp(mod->name, "ndiswrapper") == 0)
2872 add_taint(TAINT_PROPRIETARY_MODULE);
2874 /* driverloader was caught wrongly pretending to be under GPL */
2875 if (strcmp(mod->name, "driverloader") == 0)
2876 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2878 /* lve claims to be GPL but upstream won't provide source */
2879 if (strcmp(mod->name, "lve") == 0)
2880 add_taint_module(mod, TAINT_PROPRIETARY_MODULE);
2882 #ifdef CONFIG_MODVERSIONS
2883 if ((mod->num_syms && !mod->crcs)
2884 || (mod->num_gpl_syms && !mod->gpl_crcs)
2885 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2886 #ifdef CONFIG_UNUSED_SYMBOLS
2887 || (mod->num_unused_syms && !mod->unused_crcs)
2888 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2889 #endif
2891 return try_to_force_load(mod,
2892 "no versions for exported symbols");
2894 #endif
2895 return 0;
2898 static void flush_module_icache(const struct module *mod)
2900 mm_segment_t old_fs;
2902 /* flush the icache in correct context */
2903 old_fs = get_fs();
2904 set_fs(KERNEL_DS);
2907 * Flush the instruction cache, since we've played with text.
2908 * Do it before processing of module parameters, so the module
2909 * can provide parameter accessor functions of its own.
2911 if (mod->module_init)
2912 flush_icache_range((unsigned long)mod->module_init,
2913 (unsigned long)mod->module_init
2914 + mod->init_size);
2915 flush_icache_range((unsigned long)mod->module_core,
2916 (unsigned long)mod->module_core + mod->core_size);
2918 set_fs(old_fs);
2921 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2922 Elf_Shdr *sechdrs,
2923 char *secstrings,
2924 struct module *mod)
2926 return 0;
2929 static struct module *layout_and_allocate(struct load_info *info, int flags)
2931 /* Module within temporary copy. */
2932 struct module *mod;
2933 Elf_Shdr *pcpusec;
2934 int err;
2936 mod = setup_load_info(info, flags);
2937 if (IS_ERR(mod))
2938 return mod;
2940 err = check_modinfo(mod, info, flags);
2941 if (err)
2942 return ERR_PTR(err);
2944 /* Allow arches to frob section contents and sizes. */
2945 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2946 info->secstrings, mod);
2947 if (err < 0)
2948 goto out;
2950 pcpusec = &info->sechdrs[info->index.pcpu];
2951 if (pcpusec->sh_size) {
2952 /* We have a special allocation for this section. */
2953 err = percpu_modalloc(mod,
2954 pcpusec->sh_size, pcpusec->sh_addralign);
2955 if (err)
2956 goto out;
2957 pcpusec->sh_flags &= ~(unsigned long)SHF_ALLOC;
2960 /* Determine total sizes, and put offsets in sh_entsize. For now
2961 this is done generically; there doesn't appear to be any
2962 special cases for the architectures. */
2963 layout_sections(mod, info);
2964 layout_symtab(mod, info);
2966 /* Allocate and move to the final place */
2967 err = move_module(mod, info);
2968 if (err)
2969 goto free_percpu;
2971 /* Module has been copied to its final place now: return it. */
2972 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2973 kmemleak_load_module(mod, info);
2974 return mod;
2976 free_percpu:
2977 percpu_modfree(mod);
2978 out:
2979 return ERR_PTR(err);
2982 /* mod is no longer valid after this! */
2983 static void module_deallocate(struct module *mod, struct load_info *info)
2985 percpu_modfree(mod);
2986 module_free(mod, mod->module_init);
2987 module_free(mod, mod->module_core);
2990 int __weak module_finalize(const Elf_Ehdr *hdr,
2991 const Elf_Shdr *sechdrs,
2992 struct module *me)
2994 return 0;
2997 static int post_relocation(struct module *mod, const struct load_info *info)
2999 /* Sort exception table now relocations are done. */
3000 sort_extable(mod->extable, mod->extable + mod->num_exentries);
3002 /* Copy relocated percpu area over. */
3003 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
3004 info->sechdrs[info->index.pcpu].sh_size);
3006 /* Setup kallsyms-specific fields. */
3007 add_kallsyms(mod, info);
3009 /* Arch-specific module finalizing. */
3010 return module_finalize(info->hdr, info->sechdrs, mod);
3013 /* Is this module of this name done loading? No locks held. */
3014 static bool finished_loading(const char *name)
3016 struct module *mod;
3017 bool ret;
3019 mutex_lock(&module_mutex);
3020 mod = find_module_all(name, true);
3021 ret = !mod || mod->state == MODULE_STATE_LIVE
3022 || mod->state == MODULE_STATE_GOING;
3023 mutex_unlock(&module_mutex);
3025 return ret;
3028 /* Call module constructors. */
3029 static void do_mod_ctors(struct module *mod)
3031 #ifdef CONFIG_CONSTRUCTORS
3032 unsigned long i;
3034 for (i = 0; i < mod->num_ctors; i++)
3035 mod->ctors[i]();
3036 #endif
3039 /* This is where the real work happens */
3040 static int do_init_module(struct module *mod)
3042 int ret = 0;
3045 * We want to find out whether @mod uses async during init. Clear
3046 * PF_USED_ASYNC. async_schedule*() will set it.
3048 current->flags &= ~PF_USED_ASYNC;
3050 blocking_notifier_call_chain(&module_notify_list,
3051 MODULE_STATE_COMING, mod);
3053 /* Set RO and NX regions for core */
3054 set_section_ro_nx(mod->module_core,
3055 mod->core_text_size,
3056 mod->core_ro_size,
3057 mod->core_size);
3059 /* Set RO and NX regions for init */
3060 set_section_ro_nx(mod->module_init,
3061 mod->init_text_size,
3062 mod->init_ro_size,
3063 mod->init_size);
3065 do_mod_ctors(mod);
3066 /* Start the module */
3067 if (mod->init != NULL)
3068 ret = do_one_initcall(mod->init);
3069 if (ret < 0) {
3070 /* Init routine failed: abort. Try to protect us from
3071 buggy refcounters. */
3072 mod->state = MODULE_STATE_GOING;
3073 synchronize_sched();
3074 module_put(mod);
3075 blocking_notifier_call_chain(&module_notify_list,
3076 MODULE_STATE_GOING, mod);
3077 free_module(mod);
3078 wake_up_all(&module_wq);
3079 return ret;
3081 if (ret > 0) {
3082 printk(KERN_WARNING
3083 "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
3084 "%s: loading module anyway...\n",
3085 __func__, mod->name, ret,
3086 __func__);
3087 dump_stack();
3090 /* Now it's a first class citizen! */
3091 mod->state = MODULE_STATE_LIVE;
3092 blocking_notifier_call_chain(&module_notify_list,
3093 MODULE_STATE_LIVE, mod);
3096 * We need to finish all async code before the module init sequence
3097 * is done. This has potential to deadlock. For example, a newly
3098 * detected block device can trigger request_module() of the
3099 * default iosched from async probing task. Once userland helper
3100 * reaches here, async_synchronize_full() will wait on the async
3101 * task waiting on request_module() and deadlock.
3103 * This deadlock is avoided by perfomring async_synchronize_full()
3104 * iff module init queued any async jobs. This isn't a full
3105 * solution as it will deadlock the same if module loading from
3106 * async jobs nests more than once; however, due to the various
3107 * constraints, this hack seems to be the best option for now.
3108 * Please refer to the following thread for details.
3110 * http://thread.gmane.org/gmane.linux.kernel/1420814
3112 if (current->flags & PF_USED_ASYNC)
3113 async_synchronize_full();
3115 mutex_lock(&module_mutex);
3116 /* Drop initial reference. */
3117 module_put(mod);
3118 trim_init_extable(mod);
3119 #ifdef CONFIG_KALLSYMS
3120 mod->num_symtab = mod->core_num_syms;
3121 mod->symtab = mod->core_symtab;
3122 mod->strtab = mod->core_strtab;
3123 #endif
3124 unset_module_init_ro_nx(mod);
3125 module_free(mod, mod->module_init);
3126 mod->module_init = NULL;
3127 mod->init_size = 0;
3128 mod->init_ro_size = 0;
3129 mod->init_text_size = 0;
3130 mutex_unlock(&module_mutex);
3131 wake_up_all(&module_wq);
3133 return 0;
3136 static int may_init_module(void)
3138 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3139 return -EPERM;
3141 return 0;
3144 /* Allocate and load the module: note that size of section 0 is always
3145 zero, and we rely on this for optional sections. */
3146 static int load_module(struct load_info *info, const char __user *uargs,
3147 int flags)
3149 struct module *mod, *old;
3150 long err;
3152 err = module_sig_check(info);
3153 if (err)
3154 goto free_copy;
3156 err = elf_header_check(info);
3157 if (err)
3158 goto free_copy;
3160 /* Figure out module layout, and allocate all the memory. */
3161 mod = layout_and_allocate(info, flags);
3162 if (IS_ERR(mod)) {
3163 err = PTR_ERR(mod);
3164 goto free_copy;
3168 * We try to place it in the list now to make sure it's unique
3169 * before we dedicate too many resources. In particular,
3170 * temporary percpu memory exhaustion.
3172 mod->state = MODULE_STATE_UNFORMED;
3173 again:
3174 mutex_lock(&module_mutex);
3175 if ((old = find_module_all(mod->name, true)) != NULL) {
3176 if (old->state == MODULE_STATE_COMING
3177 || old->state == MODULE_STATE_UNFORMED) {
3178 /* Wait in case it fails to load. */
3179 mutex_unlock(&module_mutex);
3180 err = wait_event_interruptible(module_wq,
3181 finished_loading(mod->name));
3182 if (err)
3183 goto free_module;
3184 goto again;
3186 err = -EEXIST;
3187 mutex_unlock(&module_mutex);
3188 goto free_module;
3190 list_add_rcu(&mod->list, &modules);
3191 mutex_unlock(&module_mutex);
3193 #ifdef CONFIG_MODULE_SIG
3194 mod->sig_ok = info->sig_ok;
3195 if (!mod->sig_ok)
3196 add_taint_module(mod, TAINT_FORCED_MODULE);
3197 #endif
3199 /* Now module is in final location, initialize linked lists, etc. */
3200 err = module_unload_init(mod);
3201 if (err)
3202 goto unlink_mod;
3204 /* Now we've got everything in the final locations, we can
3205 * find optional sections. */
3206 find_module_sections(mod, info);
3208 err = check_module_license_and_versions(mod);
3209 if (err)
3210 goto free_unload;
3212 /* Set up MODINFO_ATTR fields */
3213 setup_modinfo(mod, info);
3215 /* Fix up syms, so that st_value is a pointer to location. */
3216 err = simplify_symbols(mod, info);
3217 if (err < 0)
3218 goto free_modinfo;
3220 err = apply_relocations(mod, info);
3221 if (err < 0)
3222 goto free_modinfo;
3224 err = post_relocation(mod, info);
3225 if (err < 0)
3226 goto free_modinfo;
3228 flush_module_icache(mod);
3230 /* Now copy in args */
3231 mod->args = strndup_user(uargs, ~0UL >> 1);
3232 if (IS_ERR(mod->args)) {
3233 err = PTR_ERR(mod->args);
3234 goto free_arch_cleanup;
3237 dynamic_debug_setup(info->debug, info->num_debug);
3239 mutex_lock(&module_mutex);
3240 /* Find duplicate symbols (must be called under lock). */
3241 err = verify_export_symbols(mod);
3242 if (err < 0)
3243 goto ddebug_cleanup;
3245 /* This relies on module_mutex for list integrity. */
3246 module_bug_finalize(info->hdr, info->sechdrs, mod);
3248 /* Mark state as coming so strong_try_module_get() ignores us,
3249 * but kallsyms etc. can see us. */
3250 mod->state = MODULE_STATE_COMING;
3252 mutex_unlock(&module_mutex);
3254 /* Module is ready to execute: parsing args may do that. */
3255 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3256 -32768, 32767, &ddebug_dyndbg_module_param_cb);
3257 if (err < 0)
3258 goto bug_cleanup;
3260 /* Link in to syfs. */
3261 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3262 if (err < 0)
3263 goto bug_cleanup;
3265 /* Get rid of temporary copy. */
3266 free_copy(info);
3268 /* Done! */
3269 trace_module_load(mod);
3271 return do_init_module(mod);
3273 bug_cleanup:
3274 /* module_bug_cleanup needs module_mutex protection */
3275 mutex_lock(&module_mutex);
3276 module_bug_cleanup(mod);
3277 ddebug_cleanup:
3278 mutex_unlock(&module_mutex);
3279 dynamic_debug_remove(info->debug);
3280 synchronize_sched();
3281 kfree(mod->args);
3282 free_arch_cleanup:
3283 module_arch_cleanup(mod);
3284 free_modinfo:
3285 free_modinfo(mod);
3286 free_unload:
3287 module_unload_free(mod);
3288 unlink_mod:
3289 mutex_lock(&module_mutex);
3290 /* Unlink carefully: kallsyms could be walking list. */
3291 list_del_rcu(&mod->list);
3292 wake_up_all(&module_wq);
3293 mutex_unlock(&module_mutex);
3294 free_module:
3295 module_deallocate(mod, info);
3296 free_copy:
3297 free_copy(info);
3298 return err;
3301 SYSCALL_DEFINE3(init_module, void __user *, umod,
3302 unsigned long, len, const char __user *, uargs)
3304 int err;
3305 struct load_info info = { };
3307 err = may_init_module();
3308 if (err)
3309 return err;
3311 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3312 umod, len, uargs);
3314 err = copy_module_from_user(umod, len, &info);
3315 if (err)
3316 return err;
3318 return load_module(&info, uargs, 0);
3321 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3323 int err;
3324 struct load_info info = { };
3326 err = may_init_module();
3327 if (err)
3328 return err;
3330 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3332 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3333 |MODULE_INIT_IGNORE_VERMAGIC))
3334 return -EINVAL;
3336 err = copy_module_from_fd(fd, &info);
3337 if (err)
3338 return err;
3340 return load_module(&info, uargs, flags);
3343 static inline int within(unsigned long addr, void *start, unsigned long size)
3345 return ((void *)addr >= start && (void *)addr < start + size);
3348 #ifdef CONFIG_KALLSYMS
3350 * This ignores the intensely annoying "mapping symbols" found
3351 * in ARM ELF files: $a, $t and $d.
3353 static inline int is_arm_mapping_symbol(const char *str)
3355 return str[0] == '$' && strchr("atd", str[1])
3356 && (str[2] == '\0' || str[2] == '.');
3359 static const char *get_ksymbol(struct module *mod,
3360 unsigned long addr,
3361 unsigned long *size,
3362 unsigned long *offset)
3364 unsigned int i, best = 0;
3365 unsigned long nextval;
3367 /* At worse, next value is at end of module */
3368 if (within_module_init(addr, mod))
3369 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3370 else
3371 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3373 /* Scan for closest preceding symbol, and next symbol. (ELF
3374 starts real symbols at 1). */
3375 for (i = 1; i < mod->num_symtab; i++) {
3376 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3377 continue;
3379 /* We ignore unnamed symbols: they're uninformative
3380 * and inserted at a whim. */
3381 if (mod->symtab[i].st_value <= addr
3382 && mod->symtab[i].st_value > mod->symtab[best].st_value
3383 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3384 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3385 best = i;
3386 if (mod->symtab[i].st_value > addr
3387 && mod->symtab[i].st_value < nextval
3388 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3389 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3390 nextval = mod->symtab[i].st_value;
3393 if (!best)
3394 return NULL;
3396 if (size)
3397 *size = nextval - mod->symtab[best].st_value;
3398 if (offset)
3399 *offset = addr - mod->symtab[best].st_value;
3400 return mod->strtab + mod->symtab[best].st_name;
3403 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3404 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3405 const char *module_address_lookup(unsigned long addr,
3406 unsigned long *size,
3407 unsigned long *offset,
3408 char **modname,
3409 char *namebuf)
3411 struct module *mod;
3412 const char *ret = NULL;
3414 preempt_disable();
3415 list_for_each_entry_rcu(mod, &modules, list) {
3416 if (mod->state == MODULE_STATE_UNFORMED)
3417 continue;
3418 if (within_module_init(addr, mod) ||
3419 within_module_core(addr, mod)) {
3420 if (modname)
3421 *modname = mod->name;
3422 ret = get_ksymbol(mod, addr, size, offset);
3423 break;
3426 /* Make a copy in here where it's safe */
3427 if (ret) {
3428 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3429 ret = namebuf;
3431 preempt_enable();
3432 return ret;
3435 int lookup_module_symbol_name(unsigned long addr, char *symname)
3437 struct module *mod;
3439 preempt_disable();
3440 list_for_each_entry_rcu(mod, &modules, list) {
3441 if (mod->state == MODULE_STATE_UNFORMED)
3442 continue;
3443 if (within_module_init(addr, mod) ||
3444 within_module_core(addr, mod)) {
3445 const char *sym;
3447 sym = get_ksymbol(mod, addr, NULL, NULL);
3448 if (!sym)
3449 goto out;
3450 strlcpy(symname, sym, KSYM_NAME_LEN);
3451 preempt_enable();
3452 return 0;
3455 out:
3456 preempt_enable();
3457 return -ERANGE;
3460 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3461 unsigned long *offset, char *modname, char *name)
3463 struct module *mod;
3465 preempt_disable();
3466 list_for_each_entry_rcu(mod, &modules, list) {
3467 if (mod->state == MODULE_STATE_UNFORMED)
3468 continue;
3469 if (within_module_init(addr, mod) ||
3470 within_module_core(addr, mod)) {
3471 const char *sym;
3473 sym = get_ksymbol(mod, addr, size, offset);
3474 if (!sym)
3475 goto out;
3476 if (modname)
3477 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3478 if (name)
3479 strlcpy(name, sym, KSYM_NAME_LEN);
3480 preempt_enable();
3481 return 0;
3484 out:
3485 preempt_enable();
3486 return -ERANGE;
3489 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3490 char *name, char *module_name, int *exported)
3492 struct module *mod;
3494 preempt_disable();
3495 list_for_each_entry_rcu(mod, &modules, list) {
3496 if (mod->state == MODULE_STATE_UNFORMED)
3497 continue;
3498 if (symnum < mod->num_symtab) {
3499 *value = mod->symtab[symnum].st_value;
3500 *type = mod->symtab[symnum].st_info;
3501 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3502 KSYM_NAME_LEN);
3503 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3504 *exported = is_exported(name, *value, mod);
3505 preempt_enable();
3506 return 0;
3508 symnum -= mod->num_symtab;
3510 preempt_enable();
3511 return -ERANGE;
3514 static unsigned long mod_find_symname(struct module *mod, const char *name)
3516 unsigned int i;
3518 for (i = 0; i < mod->num_symtab; i++)
3519 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3520 mod->symtab[i].st_info != 'U')
3521 return mod->symtab[i].st_value;
3522 return 0;
3525 /* Look for this name: can be of form module:name. */
3526 unsigned long module_kallsyms_lookup_name(const char *name)
3528 struct module *mod;
3529 char *colon;
3530 unsigned long ret = 0;
3532 /* Don't lock: we're in enough trouble already. */
3533 preempt_disable();
3534 if ((colon = strchr(name, ':')) != NULL) {
3535 *colon = '\0';
3536 if ((mod = find_module(name)) != NULL)
3537 ret = mod_find_symname(mod, colon+1);
3538 *colon = ':';
3539 } else {
3540 list_for_each_entry_rcu(mod, &modules, list) {
3541 if (mod->state == MODULE_STATE_UNFORMED)
3542 continue;
3543 if ((ret = mod_find_symname(mod, name)) != 0)
3544 break;
3547 preempt_enable();
3548 return ret;
3551 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3552 struct module *, unsigned long),
3553 void *data)
3555 struct module *mod;
3556 unsigned int i;
3557 int ret;
3559 list_for_each_entry(mod, &modules, list) {
3560 if (mod->state == MODULE_STATE_UNFORMED)
3561 continue;
3562 for (i = 0; i < mod->num_symtab; i++) {
3563 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3564 mod, mod->symtab[i].st_value);
3565 if (ret != 0)
3566 return ret;
3569 return 0;
3571 #endif /* CONFIG_KALLSYMS */
3573 static char *module_flags(struct module *mod, char *buf)
3575 int bx = 0;
3577 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3578 if (mod->taints ||
3579 mod->state == MODULE_STATE_GOING ||
3580 mod->state == MODULE_STATE_COMING) {
3581 buf[bx++] = '(';
3582 bx += module_flags_taint(mod, buf + bx);
3583 /* Show a - for module-is-being-unloaded */
3584 if (mod->state == MODULE_STATE_GOING)
3585 buf[bx++] = '-';
3586 /* Show a + for module-is-being-loaded */
3587 if (mod->state == MODULE_STATE_COMING)
3588 buf[bx++] = '+';
3589 buf[bx++] = ')';
3591 buf[bx] = '\0';
3593 return buf;
3596 #ifdef CONFIG_PROC_FS
3597 /* Called by the /proc file system to return a list of modules. */
3598 static void *m_start(struct seq_file *m, loff_t *pos)
3600 mutex_lock(&module_mutex);
3601 return seq_list_start(&modules, *pos);
3604 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3606 return seq_list_next(p, &modules, pos);
3609 static void m_stop(struct seq_file *m, void *p)
3611 mutex_unlock(&module_mutex);
3614 static int m_show(struct seq_file *m, void *p)
3616 struct module *mod = list_entry(p, struct module, list);
3617 char buf[8];
3619 /* We always ignore unformed modules. */
3620 if (mod->state == MODULE_STATE_UNFORMED)
3621 return 0;
3623 seq_printf(m, "%s %u",
3624 mod->name, mod->init_size + mod->core_size);
3625 print_unload_info(m, mod);
3627 /* Informative for users. */
3628 seq_printf(m, " %s",
3629 mod->state == MODULE_STATE_GOING ? "Unloading":
3630 mod->state == MODULE_STATE_COMING ? "Loading":
3631 "Live");
3632 /* Used by oprofile and other similar tools. */
3633 seq_printf(m, " 0x%pK", mod->module_core);
3635 /* Taints info */
3636 if (mod->taints)
3637 seq_printf(m, " %s", module_flags(mod, buf));
3639 seq_printf(m, "\n");
3640 return 0;
3643 /* Format: modulename size refcount deps address
3645 Where refcount is a number or -, and deps is a comma-separated list
3646 of depends or -.
3648 static const struct seq_operations modules_op = {
3649 .start = m_start,
3650 .next = m_next,
3651 .stop = m_stop,
3652 .show = m_show
3655 static int modules_open(struct inode *inode, struct file *file)
3657 return seq_open(file, &modules_op);
3660 static const struct file_operations proc_modules_operations = {
3661 .open = modules_open,
3662 .read = seq_read,
3663 .llseek = seq_lseek,
3664 .release = seq_release,
3667 static int __init proc_modules_init(void)
3669 proc_create("modules", 0, NULL, &proc_modules_operations);
3670 return 0;
3672 module_init(proc_modules_init);
3673 #endif
3675 /* Given an address, look for it in the module exception tables. */
3676 const struct exception_table_entry *search_module_extables(unsigned long addr)
3678 const struct exception_table_entry *e = NULL;
3679 struct module *mod;
3681 preempt_disable();
3682 list_for_each_entry_rcu(mod, &modules, list) {
3683 if (mod->state == MODULE_STATE_UNFORMED)
3684 continue;
3685 if (mod->num_exentries == 0)
3686 continue;
3688 e = search_extable(mod->extable,
3689 mod->extable + mod->num_exentries - 1,
3690 addr);
3691 if (e)
3692 break;
3694 preempt_enable();
3696 /* Now, if we found one, we are running inside it now, hence
3697 we cannot unload the module, hence no refcnt needed. */
3698 return e;
3702 * is_module_address - is this address inside a module?
3703 * @addr: the address to check.
3705 * See is_module_text_address() if you simply want to see if the address
3706 * is code (not data).
3708 bool is_module_address(unsigned long addr)
3710 bool ret;
3712 preempt_disable();
3713 ret = __module_address(addr) != NULL;
3714 preempt_enable();
3716 return ret;
3720 * __module_address - get the module which contains an address.
3721 * @addr: the address.
3723 * Must be called with preempt disabled or module mutex held so that
3724 * module doesn't get freed during this.
3726 struct module *__module_address(unsigned long addr)
3728 struct module *mod;
3730 if (addr < module_addr_min || addr > module_addr_max)
3731 return NULL;
3733 list_for_each_entry_rcu(mod, &modules, list) {
3734 if (mod->state == MODULE_STATE_UNFORMED)
3735 continue;
3736 if (within_module_core(addr, mod)
3737 || within_module_init(addr, mod))
3738 return mod;
3740 return NULL;
3742 EXPORT_SYMBOL_GPL(__module_address);
3745 * is_module_text_address - is this address inside module code?
3746 * @addr: the address to check.
3748 * See is_module_address() if you simply want to see if the address is
3749 * anywhere in a module. See kernel_text_address() for testing if an
3750 * address corresponds to kernel or module code.
3752 bool is_module_text_address(unsigned long addr)
3754 bool ret;
3756 preempt_disable();
3757 ret = __module_text_address(addr) != NULL;
3758 preempt_enable();
3760 return ret;
3764 * __module_text_address - get the module whose code contains an address.
3765 * @addr: the address.
3767 * Must be called with preempt disabled or module mutex held so that
3768 * module doesn't get freed during this.
3770 struct module *__module_text_address(unsigned long addr)
3772 struct module *mod = __module_address(addr);
3773 if (mod) {
3774 /* Make sure it's within the text section. */
3775 if (!within(addr, mod->module_init, mod->init_text_size)
3776 && !within(addr, mod->module_core, mod->core_text_size))
3777 mod = NULL;
3779 return mod;
3781 EXPORT_SYMBOL_GPL(__module_text_address);
3783 /* Don't grab lock, we're oopsing. */
3784 void print_modules(void)
3786 struct module *mod;
3787 char buf[8];
3789 printk(KERN_DEFAULT "Modules linked in:");
3790 /* Most callers should already have preempt disabled, but make sure */
3791 preempt_disable();
3792 list_for_each_entry_rcu(mod, &modules, list) {
3793 if (mod->state == MODULE_STATE_UNFORMED)
3794 continue;
3795 printk(" %s%s", mod->name, module_flags(mod, buf));
3797 preempt_enable();
3798 if (last_unloaded_module[0])
3799 printk(" [last unloaded: %s]", last_unloaded_module);
3800 printk("\n");
3803 #ifdef CONFIG_MODVERSIONS
3804 /* Generate the signature for all relevant module structures here.
3805 * If these change, we don't want to try to parse the module. */
3806 void module_layout(struct module *mod,
3807 struct modversion_info *ver,
3808 struct kernel_param *kp,
3809 struct kernel_symbol *ks,
3810 struct tracepoint * const *tp)
3813 EXPORT_SYMBOL(module_layout);
3814 #endif