tracing: Change tracing_stats_fops to rely on tracing_get_cpu()
[linux-2.6.git] / kernel / module.c
blob206915830d2993bc54124b0fecf09abadac08a0f
1 /*
2 Copyright (C) 2002 Richard Henderson
3 Copyright (C) 2001 Rusty Russell, 2002, 2010 Rusty Russell IBM.
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 #include <linux/export.h>
20 #include <linux/moduleloader.h>
21 #include <linux/ftrace_event.h>
22 #include <linux/init.h>
23 #include <linux/kallsyms.h>
24 #include <linux/file.h>
25 #include <linux/fs.h>
26 #include <linux/sysfs.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/vmalloc.h>
30 #include <linux/elf.h>
31 #include <linux/proc_fs.h>
32 #include <linux/security.h>
33 #include <linux/seq_file.h>
34 #include <linux/syscalls.h>
35 #include <linux/fcntl.h>
36 #include <linux/rcupdate.h>
37 #include <linux/capability.h>
38 #include <linux/cpu.h>
39 #include <linux/moduleparam.h>
40 #include <linux/errno.h>
41 #include <linux/err.h>
42 #include <linux/vermagic.h>
43 #include <linux/notifier.h>
44 #include <linux/sched.h>
45 #include <linux/stop_machine.h>
46 #include <linux/device.h>
47 #include <linux/string.h>
48 #include <linux/mutex.h>
49 #include <linux/rculist.h>
50 #include <asm/uaccess.h>
51 #include <asm/cacheflush.h>
52 #include <asm/mmu_context.h>
53 #include <linux/license.h>
54 #include <asm/sections.h>
55 #include <linux/tracepoint.h>
56 #include <linux/ftrace.h>
57 #include <linux/async.h>
58 #include <linux/percpu.h>
59 #include <linux/kmemleak.h>
60 #include <linux/jump_label.h>
61 #include <linux/pfn.h>
62 #include <linux/bsearch.h>
63 #include <linux/fips.h>
64 #include <uapi/linux/module.h>
65 #include "module-internal.h"
67 #define CREATE_TRACE_POINTS
68 #include <trace/events/module.h>
70 #ifndef ARCH_SHF_SMALL
71 #define ARCH_SHF_SMALL 0
72 #endif
75 * Modules' sections will be aligned on page boundaries
76 * to ensure complete separation of code and data, but
77 * only when CONFIG_DEBUG_SET_MODULE_RONX=y
79 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
80 # define debug_align(X) ALIGN(X, PAGE_SIZE)
81 #else
82 # define debug_align(X) (X)
83 #endif
86 * Given BASE and SIZE this macro calculates the number of pages the
87 * memory regions occupies
89 #define MOD_NUMBER_OF_PAGES(BASE, SIZE) (((SIZE) > 0) ? \
90 (PFN_DOWN((unsigned long)(BASE) + (SIZE) - 1) - \
91 PFN_DOWN((unsigned long)BASE) + 1) \
92 : (0UL))
94 /* If this is set, the section belongs in the init part of the module */
95 #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
98 * Mutex protects:
99 * 1) List of modules (also safely readable with preempt_disable),
100 * 2) module_use links,
101 * 3) module_addr_min/module_addr_max.
102 * (delete uses stop_machine/add uses RCU list operations). */
103 DEFINE_MUTEX(module_mutex);
104 EXPORT_SYMBOL_GPL(module_mutex);
105 static LIST_HEAD(modules);
106 #ifdef CONFIG_KGDB_KDB
107 struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
108 #endif /* CONFIG_KGDB_KDB */
110 #ifdef CONFIG_MODULE_SIG
111 #ifdef CONFIG_MODULE_SIG_FORCE
112 static bool sig_enforce = true;
113 #else
114 static bool sig_enforce = false;
116 static int param_set_bool_enable_only(const char *val,
117 const struct kernel_param *kp)
119 int err;
120 bool test;
121 struct kernel_param dummy_kp = *kp;
123 dummy_kp.arg = &test;
125 err = param_set_bool(val, &dummy_kp);
126 if (err)
127 return err;
129 /* Don't let them unset it once it's set! */
130 if (!test && sig_enforce)
131 return -EROFS;
133 if (test)
134 sig_enforce = true;
135 return 0;
138 static const struct kernel_param_ops param_ops_bool_enable_only = {
139 .set = param_set_bool_enable_only,
140 .get = param_get_bool,
142 #define param_check_bool_enable_only param_check_bool
144 module_param(sig_enforce, bool_enable_only, 0644);
145 #endif /* !CONFIG_MODULE_SIG_FORCE */
146 #endif /* CONFIG_MODULE_SIG */
148 /* Block module loading/unloading? */
149 int modules_disabled = 0;
150 core_param(nomodule, modules_disabled, bint, 0);
152 /* Waiting for a module to finish initializing? */
153 static DECLARE_WAIT_QUEUE_HEAD(module_wq);
155 static BLOCKING_NOTIFIER_HEAD(module_notify_list);
157 /* Bounds of module allocation, for speeding __module_address.
158 * Protected by module_mutex. */
159 static unsigned long module_addr_min = -1UL, module_addr_max = 0;
161 int register_module_notifier(struct notifier_block * nb)
163 return blocking_notifier_chain_register(&module_notify_list, nb);
165 EXPORT_SYMBOL(register_module_notifier);
167 int unregister_module_notifier(struct notifier_block * nb)
169 return blocking_notifier_chain_unregister(&module_notify_list, nb);
171 EXPORT_SYMBOL(unregister_module_notifier);
173 struct load_info {
174 Elf_Ehdr *hdr;
175 unsigned long len;
176 Elf_Shdr *sechdrs;
177 char *secstrings, *strtab;
178 unsigned long symoffs, stroffs;
179 struct _ddebug *debug;
180 unsigned int num_debug;
181 bool sig_ok;
182 struct {
183 unsigned int sym, str, mod, vers, info, pcpu;
184 } index;
187 /* We require a truly strong try_module_get(): 0 means failure due to
188 ongoing or failed initialization etc. */
189 static inline int strong_try_module_get(struct module *mod)
191 BUG_ON(mod && mod->state == MODULE_STATE_UNFORMED);
192 if (mod && mod->state == MODULE_STATE_COMING)
193 return -EBUSY;
194 if (try_module_get(mod))
195 return 0;
196 else
197 return -ENOENT;
200 static inline void add_taint_module(struct module *mod, unsigned flag,
201 enum lockdep_ok lockdep_ok)
203 add_taint(flag, lockdep_ok);
204 mod->taints |= (1U << flag);
208 * A thread that wants to hold a reference to a module only while it
209 * is running can call this to safely exit. nfsd and lockd use this.
211 void __module_put_and_exit(struct module *mod, long code)
213 module_put(mod);
214 do_exit(code);
216 EXPORT_SYMBOL(__module_put_and_exit);
218 /* Find a module section: 0 means not found. */
219 static unsigned int find_sec(const struct load_info *info, const char *name)
221 unsigned int i;
223 for (i = 1; i < info->hdr->e_shnum; i++) {
224 Elf_Shdr *shdr = &info->sechdrs[i];
225 /* Alloc bit cleared means "ignore it." */
226 if ((shdr->sh_flags & SHF_ALLOC)
227 && strcmp(info->secstrings + shdr->sh_name, name) == 0)
228 return i;
230 return 0;
233 /* Find a module section, or NULL. */
234 static void *section_addr(const struct load_info *info, const char *name)
236 /* Section 0 has sh_addr 0. */
237 return (void *)info->sechdrs[find_sec(info, name)].sh_addr;
240 /* Find a module section, or NULL. Fill in number of "objects" in section. */
241 static void *section_objs(const struct load_info *info,
242 const char *name,
243 size_t object_size,
244 unsigned int *num)
246 unsigned int sec = find_sec(info, name);
248 /* Section 0 has sh_addr 0 and sh_size 0. */
249 *num = info->sechdrs[sec].sh_size / object_size;
250 return (void *)info->sechdrs[sec].sh_addr;
253 /* Provided by the linker */
254 extern const struct kernel_symbol __start___ksymtab[];
255 extern const struct kernel_symbol __stop___ksymtab[];
256 extern const struct kernel_symbol __start___ksymtab_gpl[];
257 extern const struct kernel_symbol __stop___ksymtab_gpl[];
258 extern const struct kernel_symbol __start___ksymtab_gpl_future[];
259 extern const struct kernel_symbol __stop___ksymtab_gpl_future[];
260 extern const unsigned long __start___kcrctab[];
261 extern const unsigned long __start___kcrctab_gpl[];
262 extern const unsigned long __start___kcrctab_gpl_future[];
263 #ifdef CONFIG_UNUSED_SYMBOLS
264 extern const struct kernel_symbol __start___ksymtab_unused[];
265 extern const struct kernel_symbol __stop___ksymtab_unused[];
266 extern const struct kernel_symbol __start___ksymtab_unused_gpl[];
267 extern const struct kernel_symbol __stop___ksymtab_unused_gpl[];
268 extern const unsigned long __start___kcrctab_unused[];
269 extern const unsigned long __start___kcrctab_unused_gpl[];
270 #endif
272 #ifndef CONFIG_MODVERSIONS
273 #define symversion(base, idx) NULL
274 #else
275 #define symversion(base, idx) ((base != NULL) ? ((base) + (idx)) : NULL)
276 #endif
278 static bool each_symbol_in_section(const struct symsearch *arr,
279 unsigned int arrsize,
280 struct module *owner,
281 bool (*fn)(const struct symsearch *syms,
282 struct module *owner,
283 void *data),
284 void *data)
286 unsigned int j;
288 for (j = 0; j < arrsize; j++) {
289 if (fn(&arr[j], owner, data))
290 return true;
293 return false;
296 /* Returns true as soon as fn returns true, otherwise false. */
297 bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
298 struct module *owner,
299 void *data),
300 void *data)
302 struct module *mod;
303 static const struct symsearch arr[] = {
304 { __start___ksymtab, __stop___ksymtab, __start___kcrctab,
305 NOT_GPL_ONLY, false },
306 { __start___ksymtab_gpl, __stop___ksymtab_gpl,
307 __start___kcrctab_gpl,
308 GPL_ONLY, false },
309 { __start___ksymtab_gpl_future, __stop___ksymtab_gpl_future,
310 __start___kcrctab_gpl_future,
311 WILL_BE_GPL_ONLY, false },
312 #ifdef CONFIG_UNUSED_SYMBOLS
313 { __start___ksymtab_unused, __stop___ksymtab_unused,
314 __start___kcrctab_unused,
315 NOT_GPL_ONLY, true },
316 { __start___ksymtab_unused_gpl, __stop___ksymtab_unused_gpl,
317 __start___kcrctab_unused_gpl,
318 GPL_ONLY, true },
319 #endif
322 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
323 return true;
325 list_for_each_entry_rcu(mod, &modules, list) {
326 struct symsearch arr[] = {
327 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
328 NOT_GPL_ONLY, false },
329 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
330 mod->gpl_crcs,
331 GPL_ONLY, false },
332 { mod->gpl_future_syms,
333 mod->gpl_future_syms + mod->num_gpl_future_syms,
334 mod->gpl_future_crcs,
335 WILL_BE_GPL_ONLY, false },
336 #ifdef CONFIG_UNUSED_SYMBOLS
337 { mod->unused_syms,
338 mod->unused_syms + mod->num_unused_syms,
339 mod->unused_crcs,
340 NOT_GPL_ONLY, true },
341 { mod->unused_gpl_syms,
342 mod->unused_gpl_syms + mod->num_unused_gpl_syms,
343 mod->unused_gpl_crcs,
344 GPL_ONLY, true },
345 #endif
348 if (mod->state == MODULE_STATE_UNFORMED)
349 continue;
351 if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
352 return true;
354 return false;
356 EXPORT_SYMBOL_GPL(each_symbol_section);
358 struct find_symbol_arg {
359 /* Input */
360 const char *name;
361 bool gplok;
362 bool warn;
364 /* Output */
365 struct module *owner;
366 const unsigned long *crc;
367 const struct kernel_symbol *sym;
370 static bool check_symbol(const struct symsearch *syms,
371 struct module *owner,
372 unsigned int symnum, void *data)
374 struct find_symbol_arg *fsa = data;
376 if (!fsa->gplok) {
377 if (syms->licence == GPL_ONLY)
378 return false;
379 if (syms->licence == WILL_BE_GPL_ONLY && fsa->warn) {
380 printk(KERN_WARNING "Symbol %s is being used "
381 "by a non-GPL module, which will not "
382 "be allowed in the future\n", fsa->name);
386 #ifdef CONFIG_UNUSED_SYMBOLS
387 if (syms->unused && fsa->warn) {
388 printk(KERN_WARNING "Symbol %s is marked as UNUSED, "
389 "however this module is using it.\n", fsa->name);
390 printk(KERN_WARNING
391 "This symbol will go away in the future.\n");
392 printk(KERN_WARNING
393 "Please evalute if this is the right api to use and if "
394 "it really is, submit a report the linux kernel "
395 "mailinglist together with submitting your code for "
396 "inclusion.\n");
398 #endif
400 fsa->owner = owner;
401 fsa->crc = symversion(syms->crcs, symnum);
402 fsa->sym = &syms->start[symnum];
403 return true;
406 static int cmp_name(const void *va, const void *vb)
408 const char *a;
409 const struct kernel_symbol *b;
410 a = va; b = vb;
411 return strcmp(a, b->name);
414 static bool find_symbol_in_section(const struct symsearch *syms,
415 struct module *owner,
416 void *data)
418 struct find_symbol_arg *fsa = data;
419 struct kernel_symbol *sym;
421 sym = bsearch(fsa->name, syms->start, syms->stop - syms->start,
422 sizeof(struct kernel_symbol), cmp_name);
424 if (sym != NULL && check_symbol(syms, owner, sym - syms->start, data))
425 return true;
427 return false;
430 /* Find a symbol and return it, along with, (optional) crc and
431 * (optional) module which owns it. Needs preempt disabled or module_mutex. */
432 const struct kernel_symbol *find_symbol(const char *name,
433 struct module **owner,
434 const unsigned long **crc,
435 bool gplok,
436 bool warn)
438 struct find_symbol_arg fsa;
440 fsa.name = name;
441 fsa.gplok = gplok;
442 fsa.warn = warn;
444 if (each_symbol_section(find_symbol_in_section, &fsa)) {
445 if (owner)
446 *owner = fsa.owner;
447 if (crc)
448 *crc = fsa.crc;
449 return fsa.sym;
452 pr_debug("Failed to find symbol %s\n", name);
453 return NULL;
455 EXPORT_SYMBOL_GPL(find_symbol);
457 /* Search for module by name: must hold module_mutex. */
458 static struct module *find_module_all(const char *name, size_t len,
459 bool even_unformed)
461 struct module *mod;
463 list_for_each_entry(mod, &modules, list) {
464 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
465 continue;
466 if (strlen(mod->name) == len && !memcmp(mod->name, name, len))
467 return mod;
469 return NULL;
472 struct module *find_module(const char *name)
474 return find_module_all(name, strlen(name), false);
476 EXPORT_SYMBOL_GPL(find_module);
478 #ifdef CONFIG_SMP
480 static inline void __percpu *mod_percpu(struct module *mod)
482 return mod->percpu;
485 static int percpu_modalloc(struct module *mod, struct load_info *info)
487 Elf_Shdr *pcpusec = &info->sechdrs[info->index.pcpu];
488 unsigned long align = pcpusec->sh_addralign;
490 if (!pcpusec->sh_size)
491 return 0;
493 if (align > PAGE_SIZE) {
494 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
495 mod->name, align, PAGE_SIZE);
496 align = PAGE_SIZE;
499 mod->percpu = __alloc_reserved_percpu(pcpusec->sh_size, align);
500 if (!mod->percpu) {
501 printk(KERN_WARNING
502 "%s: Could not allocate %lu bytes percpu data\n",
503 mod->name, (unsigned long)pcpusec->sh_size);
504 return -ENOMEM;
506 mod->percpu_size = pcpusec->sh_size;
507 return 0;
510 static void percpu_modfree(struct module *mod)
512 free_percpu(mod->percpu);
515 static unsigned int find_pcpusec(struct load_info *info)
517 return find_sec(info, ".data..percpu");
520 static void percpu_modcopy(struct module *mod,
521 const void *from, unsigned long size)
523 int cpu;
525 for_each_possible_cpu(cpu)
526 memcpy(per_cpu_ptr(mod->percpu, cpu), from, size);
530 * is_module_percpu_address - test whether address is from module static percpu
531 * @addr: address to test
533 * Test whether @addr belongs to module static percpu area.
535 * RETURNS:
536 * %true if @addr is from module static percpu area
538 bool is_module_percpu_address(unsigned long addr)
540 struct module *mod;
541 unsigned int cpu;
543 preempt_disable();
545 list_for_each_entry_rcu(mod, &modules, list) {
546 if (mod->state == MODULE_STATE_UNFORMED)
547 continue;
548 if (!mod->percpu_size)
549 continue;
550 for_each_possible_cpu(cpu) {
551 void *start = per_cpu_ptr(mod->percpu, cpu);
553 if ((void *)addr >= start &&
554 (void *)addr < start + mod->percpu_size) {
555 preempt_enable();
556 return true;
561 preempt_enable();
562 return false;
565 #else /* ... !CONFIG_SMP */
567 static inline void __percpu *mod_percpu(struct module *mod)
569 return NULL;
571 static int percpu_modalloc(struct module *mod, struct load_info *info)
573 /* UP modules shouldn't have this section: ENOMEM isn't quite right */
574 if (info->sechdrs[info->index.pcpu].sh_size != 0)
575 return -ENOMEM;
576 return 0;
578 static inline void percpu_modfree(struct module *mod)
581 static unsigned int find_pcpusec(struct load_info *info)
583 return 0;
585 static inline void percpu_modcopy(struct module *mod,
586 const void *from, unsigned long size)
588 /* pcpusec should be 0, and size of that section should be 0. */
589 BUG_ON(size != 0);
591 bool is_module_percpu_address(unsigned long addr)
593 return false;
596 #endif /* CONFIG_SMP */
598 #define MODINFO_ATTR(field) \
599 static void setup_modinfo_##field(struct module *mod, const char *s) \
601 mod->field = kstrdup(s, GFP_KERNEL); \
603 static ssize_t show_modinfo_##field(struct module_attribute *mattr, \
604 struct module_kobject *mk, char *buffer) \
606 return sprintf(buffer, "%s\n", mk->mod->field); \
608 static int modinfo_##field##_exists(struct module *mod) \
610 return mod->field != NULL; \
612 static void free_modinfo_##field(struct module *mod) \
614 kfree(mod->field); \
615 mod->field = NULL; \
617 static struct module_attribute modinfo_##field = { \
618 .attr = { .name = __stringify(field), .mode = 0444 }, \
619 .show = show_modinfo_##field, \
620 .setup = setup_modinfo_##field, \
621 .test = modinfo_##field##_exists, \
622 .free = free_modinfo_##field, \
625 MODINFO_ATTR(version);
626 MODINFO_ATTR(srcversion);
628 static char last_unloaded_module[MODULE_NAME_LEN+1];
630 #ifdef CONFIG_MODULE_UNLOAD
632 EXPORT_TRACEPOINT_SYMBOL(module_get);
634 /* Init the unload section of the module. */
635 static int module_unload_init(struct module *mod)
637 mod->refptr = alloc_percpu(struct module_ref);
638 if (!mod->refptr)
639 return -ENOMEM;
641 INIT_LIST_HEAD(&mod->source_list);
642 INIT_LIST_HEAD(&mod->target_list);
644 /* Hold reference count during initialization. */
645 __this_cpu_write(mod->refptr->incs, 1);
646 /* Backwards compatibility macros put refcount during init. */
647 mod->waiter = current;
649 return 0;
652 /* Does a already use b? */
653 static int already_uses(struct module *a, struct module *b)
655 struct module_use *use;
657 list_for_each_entry(use, &b->source_list, source_list) {
658 if (use->source == a) {
659 pr_debug("%s uses %s!\n", a->name, b->name);
660 return 1;
663 pr_debug("%s does not use %s!\n", a->name, b->name);
664 return 0;
668 * Module a uses b
669 * - we add 'a' as a "source", 'b' as a "target" of module use
670 * - the module_use is added to the list of 'b' sources (so
671 * 'b' can walk the list to see who sourced them), and of 'a'
672 * targets (so 'a' can see what modules it targets).
674 static int add_module_usage(struct module *a, struct module *b)
676 struct module_use *use;
678 pr_debug("Allocating new usage for %s.\n", a->name);
679 use = kmalloc(sizeof(*use), GFP_ATOMIC);
680 if (!use) {
681 printk(KERN_WARNING "%s: out of memory loading\n", a->name);
682 return -ENOMEM;
685 use->source = a;
686 use->target = b;
687 list_add(&use->source_list, &b->source_list);
688 list_add(&use->target_list, &a->target_list);
689 return 0;
692 /* Module a uses b: caller needs module_mutex() */
693 int ref_module(struct module *a, struct module *b)
695 int err;
697 if (b == NULL || already_uses(a, b))
698 return 0;
700 /* If module isn't available, we fail. */
701 err = strong_try_module_get(b);
702 if (err)
703 return err;
705 err = add_module_usage(a, b);
706 if (err) {
707 module_put(b);
708 return err;
710 return 0;
712 EXPORT_SYMBOL_GPL(ref_module);
714 /* Clear the unload stuff of the module. */
715 static void module_unload_free(struct module *mod)
717 struct module_use *use, *tmp;
719 mutex_lock(&module_mutex);
720 list_for_each_entry_safe(use, tmp, &mod->target_list, target_list) {
721 struct module *i = use->target;
722 pr_debug("%s unusing %s\n", mod->name, i->name);
723 module_put(i);
724 list_del(&use->source_list);
725 list_del(&use->target_list);
726 kfree(use);
728 mutex_unlock(&module_mutex);
730 free_percpu(mod->refptr);
733 #ifdef CONFIG_MODULE_FORCE_UNLOAD
734 static inline int try_force_unload(unsigned int flags)
736 int ret = (flags & O_TRUNC);
737 if (ret)
738 add_taint(TAINT_FORCED_RMMOD, LOCKDEP_NOW_UNRELIABLE);
739 return ret;
741 #else
742 static inline int try_force_unload(unsigned int flags)
744 return 0;
746 #endif /* CONFIG_MODULE_FORCE_UNLOAD */
748 struct stopref
750 struct module *mod;
751 int flags;
752 int *forced;
755 /* Whole machine is stopped with interrupts off when this runs. */
756 static int __try_stop_module(void *_sref)
758 struct stopref *sref = _sref;
760 /* If it's not unused, quit unless we're forcing. */
761 if (module_refcount(sref->mod) != 0) {
762 if (!(*sref->forced = try_force_unload(sref->flags)))
763 return -EWOULDBLOCK;
766 /* Mark it as dying. */
767 sref->mod->state = MODULE_STATE_GOING;
768 return 0;
771 static int try_stop_module(struct module *mod, int flags, int *forced)
773 if (flags & O_NONBLOCK) {
774 struct stopref sref = { mod, flags, forced };
776 return stop_machine(__try_stop_module, &sref, NULL);
777 } else {
778 /* We don't need to stop the machine for this. */
779 mod->state = MODULE_STATE_GOING;
780 synchronize_sched();
781 return 0;
785 unsigned long module_refcount(struct module *mod)
787 unsigned long incs = 0, decs = 0;
788 int cpu;
790 for_each_possible_cpu(cpu)
791 decs += per_cpu_ptr(mod->refptr, cpu)->decs;
793 * ensure the incs are added up after the decs.
794 * module_put ensures incs are visible before decs with smp_wmb.
796 * This 2-count scheme avoids the situation where the refcount
797 * for CPU0 is read, then CPU0 increments the module refcount,
798 * then CPU1 drops that refcount, then the refcount for CPU1 is
799 * read. We would record a decrement but not its corresponding
800 * increment so we would see a low count (disaster).
802 * Rare situation? But module_refcount can be preempted, and we
803 * might be tallying up 4096+ CPUs. So it is not impossible.
805 smp_rmb();
806 for_each_possible_cpu(cpu)
807 incs += per_cpu_ptr(mod->refptr, cpu)->incs;
808 return incs - decs;
810 EXPORT_SYMBOL(module_refcount);
812 /* This exists whether we can unload or not */
813 static void free_module(struct module *mod);
815 static void wait_for_zero_refcount(struct module *mod)
817 /* Since we might sleep for some time, release the mutex first */
818 mutex_unlock(&module_mutex);
819 for (;;) {
820 pr_debug("Looking at refcount...\n");
821 set_current_state(TASK_UNINTERRUPTIBLE);
822 if (module_refcount(mod) == 0)
823 break;
824 schedule();
826 current->state = TASK_RUNNING;
827 mutex_lock(&module_mutex);
830 SYSCALL_DEFINE2(delete_module, const char __user *, name_user,
831 unsigned int, flags)
833 struct module *mod;
834 char name[MODULE_NAME_LEN];
835 int ret, forced = 0;
837 if (!capable(CAP_SYS_MODULE) || modules_disabled)
838 return -EPERM;
840 if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
841 return -EFAULT;
842 name[MODULE_NAME_LEN-1] = '\0';
844 if (mutex_lock_interruptible(&module_mutex) != 0)
845 return -EINTR;
847 mod = find_module(name);
848 if (!mod) {
849 ret = -ENOENT;
850 goto out;
853 if (!list_empty(&mod->source_list)) {
854 /* Other modules depend on us: get rid of them first. */
855 ret = -EWOULDBLOCK;
856 goto out;
859 /* Doing init or already dying? */
860 if (mod->state != MODULE_STATE_LIVE) {
861 /* FIXME: if (force), slam module count and wake up
862 waiter --RR */
863 pr_debug("%s already dying\n", mod->name);
864 ret = -EBUSY;
865 goto out;
868 /* If it has an init func, it must have an exit func to unload */
869 if (mod->init && !mod->exit) {
870 forced = try_force_unload(flags);
871 if (!forced) {
872 /* This module can't be removed */
873 ret = -EBUSY;
874 goto out;
878 /* Set this up before setting mod->state */
879 mod->waiter = current;
881 /* Stop the machine so refcounts can't move and disable module. */
882 ret = try_stop_module(mod, flags, &forced);
883 if (ret != 0)
884 goto out;
886 /* Never wait if forced. */
887 if (!forced && module_refcount(mod) != 0)
888 wait_for_zero_refcount(mod);
890 mutex_unlock(&module_mutex);
891 /* Final destruction now no one is using it. */
892 if (mod->exit != NULL)
893 mod->exit();
894 blocking_notifier_call_chain(&module_notify_list,
895 MODULE_STATE_GOING, mod);
896 async_synchronize_full();
898 /* Store the name of the last unloaded module for diagnostic purposes */
899 strlcpy(last_unloaded_module, mod->name, sizeof(last_unloaded_module));
901 free_module(mod);
902 return 0;
903 out:
904 mutex_unlock(&module_mutex);
905 return ret;
908 static inline void print_unload_info(struct seq_file *m, struct module *mod)
910 struct module_use *use;
911 int printed_something = 0;
913 seq_printf(m, " %lu ", module_refcount(mod));
915 /* Always include a trailing , so userspace can differentiate
916 between this and the old multi-field proc format. */
917 list_for_each_entry(use, &mod->source_list, source_list) {
918 printed_something = 1;
919 seq_printf(m, "%s,", use->source->name);
922 if (mod->init != NULL && mod->exit == NULL) {
923 printed_something = 1;
924 seq_printf(m, "[permanent],");
927 if (!printed_something)
928 seq_printf(m, "-");
931 void __symbol_put(const char *symbol)
933 struct module *owner;
935 preempt_disable();
936 if (!find_symbol(symbol, &owner, NULL, true, false))
937 BUG();
938 module_put(owner);
939 preempt_enable();
941 EXPORT_SYMBOL(__symbol_put);
943 /* Note this assumes addr is a function, which it currently always is. */
944 void symbol_put_addr(void *addr)
946 struct module *modaddr;
947 unsigned long a = (unsigned long)dereference_function_descriptor(addr);
949 if (core_kernel_text(a))
950 return;
952 /* module_text_address is safe here: we're supposed to have reference
953 * to module from symbol_get, so it can't go away. */
954 modaddr = __module_text_address(a);
955 BUG_ON(!modaddr);
956 module_put(modaddr);
958 EXPORT_SYMBOL_GPL(symbol_put_addr);
960 static ssize_t show_refcnt(struct module_attribute *mattr,
961 struct module_kobject *mk, char *buffer)
963 return sprintf(buffer, "%lu\n", module_refcount(mk->mod));
966 static struct module_attribute modinfo_refcnt =
967 __ATTR(refcnt, 0444, show_refcnt, NULL);
969 void __module_get(struct module *module)
971 if (module) {
972 preempt_disable();
973 __this_cpu_inc(module->refptr->incs);
974 trace_module_get(module, _RET_IP_);
975 preempt_enable();
978 EXPORT_SYMBOL(__module_get);
980 bool try_module_get(struct module *module)
982 bool ret = true;
984 if (module) {
985 preempt_disable();
987 if (likely(module_is_live(module))) {
988 __this_cpu_inc(module->refptr->incs);
989 trace_module_get(module, _RET_IP_);
990 } else
991 ret = false;
993 preempt_enable();
995 return ret;
997 EXPORT_SYMBOL(try_module_get);
999 void module_put(struct module *module)
1001 if (module) {
1002 preempt_disable();
1003 smp_wmb(); /* see comment in module_refcount */
1004 __this_cpu_inc(module->refptr->decs);
1006 trace_module_put(module, _RET_IP_);
1007 /* Maybe they're waiting for us to drop reference? */
1008 if (unlikely(!module_is_live(module)))
1009 wake_up_process(module->waiter);
1010 preempt_enable();
1013 EXPORT_SYMBOL(module_put);
1015 #else /* !CONFIG_MODULE_UNLOAD */
1016 static inline void print_unload_info(struct seq_file *m, struct module *mod)
1018 /* We don't know the usage count, or what modules are using. */
1019 seq_printf(m, " - -");
1022 static inline void module_unload_free(struct module *mod)
1026 int ref_module(struct module *a, struct module *b)
1028 return strong_try_module_get(b);
1030 EXPORT_SYMBOL_GPL(ref_module);
1032 static inline int module_unload_init(struct module *mod)
1034 return 0;
1036 #endif /* CONFIG_MODULE_UNLOAD */
1038 static size_t module_flags_taint(struct module *mod, char *buf)
1040 size_t l = 0;
1042 if (mod->taints & (1 << TAINT_PROPRIETARY_MODULE))
1043 buf[l++] = 'P';
1044 if (mod->taints & (1 << TAINT_OOT_MODULE))
1045 buf[l++] = 'O';
1046 if (mod->taints & (1 << TAINT_FORCED_MODULE))
1047 buf[l++] = 'F';
1048 if (mod->taints & (1 << TAINT_CRAP))
1049 buf[l++] = 'C';
1051 * TAINT_FORCED_RMMOD: could be added.
1052 * TAINT_UNSAFE_SMP, TAINT_MACHINE_CHECK, TAINT_BAD_PAGE don't
1053 * apply to modules.
1055 return l;
1058 static ssize_t show_initstate(struct module_attribute *mattr,
1059 struct module_kobject *mk, char *buffer)
1061 const char *state = "unknown";
1063 switch (mk->mod->state) {
1064 case MODULE_STATE_LIVE:
1065 state = "live";
1066 break;
1067 case MODULE_STATE_COMING:
1068 state = "coming";
1069 break;
1070 case MODULE_STATE_GOING:
1071 state = "going";
1072 break;
1073 default:
1074 BUG();
1076 return sprintf(buffer, "%s\n", state);
1079 static struct module_attribute modinfo_initstate =
1080 __ATTR(initstate, 0444, show_initstate, NULL);
1082 static ssize_t store_uevent(struct module_attribute *mattr,
1083 struct module_kobject *mk,
1084 const char *buffer, size_t count)
1086 enum kobject_action action;
1088 if (kobject_action_type(buffer, count, &action) == 0)
1089 kobject_uevent(&mk->kobj, action);
1090 return count;
1093 struct module_attribute module_uevent =
1094 __ATTR(uevent, 0200, NULL, store_uevent);
1096 static ssize_t show_coresize(struct module_attribute *mattr,
1097 struct module_kobject *mk, char *buffer)
1099 return sprintf(buffer, "%u\n", mk->mod->core_size);
1102 static struct module_attribute modinfo_coresize =
1103 __ATTR(coresize, 0444, show_coresize, NULL);
1105 static ssize_t show_initsize(struct module_attribute *mattr,
1106 struct module_kobject *mk, char *buffer)
1108 return sprintf(buffer, "%u\n", mk->mod->init_size);
1111 static struct module_attribute modinfo_initsize =
1112 __ATTR(initsize, 0444, show_initsize, NULL);
1114 static ssize_t show_taint(struct module_attribute *mattr,
1115 struct module_kobject *mk, char *buffer)
1117 size_t l;
1119 l = module_flags_taint(mk->mod, buffer);
1120 buffer[l++] = '\n';
1121 return l;
1124 static struct module_attribute modinfo_taint =
1125 __ATTR(taint, 0444, show_taint, NULL);
1127 static struct module_attribute *modinfo_attrs[] = {
1128 &module_uevent,
1129 &modinfo_version,
1130 &modinfo_srcversion,
1131 &modinfo_initstate,
1132 &modinfo_coresize,
1133 &modinfo_initsize,
1134 &modinfo_taint,
1135 #ifdef CONFIG_MODULE_UNLOAD
1136 &modinfo_refcnt,
1137 #endif
1138 NULL,
1141 static const char vermagic[] = VERMAGIC_STRING;
1143 static int try_to_force_load(struct module *mod, const char *reason)
1145 #ifdef CONFIG_MODULE_FORCE_LOAD
1146 if (!test_taint(TAINT_FORCED_MODULE))
1147 printk(KERN_WARNING "%s: %s: kernel tainted.\n",
1148 mod->name, reason);
1149 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_NOW_UNRELIABLE);
1150 return 0;
1151 #else
1152 return -ENOEXEC;
1153 #endif
1156 #ifdef CONFIG_MODVERSIONS
1157 /* If the arch applies (non-zero) relocations to kernel kcrctab, unapply it. */
1158 static unsigned long maybe_relocated(unsigned long crc,
1159 const struct module *crc_owner)
1161 #ifdef ARCH_RELOCATES_KCRCTAB
1162 if (crc_owner == NULL)
1163 return crc - (unsigned long)reloc_start;
1164 #endif
1165 return crc;
1168 static int check_version(Elf_Shdr *sechdrs,
1169 unsigned int versindex,
1170 const char *symname,
1171 struct module *mod,
1172 const unsigned long *crc,
1173 const struct module *crc_owner)
1175 unsigned int i, num_versions;
1176 struct modversion_info *versions;
1178 /* Exporting module didn't supply crcs? OK, we're already tainted. */
1179 if (!crc)
1180 return 1;
1182 /* No versions at all? modprobe --force does this. */
1183 if (versindex == 0)
1184 return try_to_force_load(mod, symname) == 0;
1186 versions = (void *) sechdrs[versindex].sh_addr;
1187 num_versions = sechdrs[versindex].sh_size
1188 / sizeof(struct modversion_info);
1190 for (i = 0; i < num_versions; i++) {
1191 if (strcmp(versions[i].name, symname) != 0)
1192 continue;
1194 if (versions[i].crc == maybe_relocated(*crc, crc_owner))
1195 return 1;
1196 pr_debug("Found checksum %lX vs module %lX\n",
1197 maybe_relocated(*crc, crc_owner), versions[i].crc);
1198 goto bad_version;
1201 printk(KERN_WARNING "%s: no symbol version for %s\n",
1202 mod->name, symname);
1203 return 0;
1205 bad_version:
1206 printk("%s: disagrees about version of symbol %s\n",
1207 mod->name, symname);
1208 return 0;
1211 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1212 unsigned int versindex,
1213 struct module *mod)
1215 const unsigned long *crc;
1217 /* Since this should be found in kernel (which can't be removed),
1218 * no locking is necessary. */
1219 if (!find_symbol(VMLINUX_SYMBOL_STR(module_layout), NULL,
1220 &crc, true, false))
1221 BUG();
1222 return check_version(sechdrs, versindex,
1223 VMLINUX_SYMBOL_STR(module_layout), mod, crc,
1224 NULL);
1227 /* First part is kernel version, which we ignore if module has crcs. */
1228 static inline int same_magic(const char *amagic, const char *bmagic,
1229 bool has_crcs)
1231 if (has_crcs) {
1232 amagic += strcspn(amagic, " ");
1233 bmagic += strcspn(bmagic, " ");
1235 return strcmp(amagic, bmagic) == 0;
1237 #else
1238 static inline int check_version(Elf_Shdr *sechdrs,
1239 unsigned int versindex,
1240 const char *symname,
1241 struct module *mod,
1242 const unsigned long *crc,
1243 const struct module *crc_owner)
1245 return 1;
1248 static inline int check_modstruct_version(Elf_Shdr *sechdrs,
1249 unsigned int versindex,
1250 struct module *mod)
1252 return 1;
1255 static inline int same_magic(const char *amagic, const char *bmagic,
1256 bool has_crcs)
1258 return strcmp(amagic, bmagic) == 0;
1260 #endif /* CONFIG_MODVERSIONS */
1262 /* Resolve a symbol for this module. I.e. if we find one, record usage. */
1263 static const struct kernel_symbol *resolve_symbol(struct module *mod,
1264 const struct load_info *info,
1265 const char *name,
1266 char ownername[])
1268 struct module *owner;
1269 const struct kernel_symbol *sym;
1270 const unsigned long *crc;
1271 int err;
1273 mutex_lock(&module_mutex);
1274 sym = find_symbol(name, &owner, &crc,
1275 !(mod->taints & (1 << TAINT_PROPRIETARY_MODULE)), true);
1276 if (!sym)
1277 goto unlock;
1279 if (!check_version(info->sechdrs, info->index.vers, name, mod, crc,
1280 owner)) {
1281 sym = ERR_PTR(-EINVAL);
1282 goto getname;
1285 err = ref_module(mod, owner);
1286 if (err) {
1287 sym = ERR_PTR(err);
1288 goto getname;
1291 getname:
1292 /* We must make copy under the lock if we failed to get ref. */
1293 strncpy(ownername, module_name(owner), MODULE_NAME_LEN);
1294 unlock:
1295 mutex_unlock(&module_mutex);
1296 return sym;
1299 static const struct kernel_symbol *
1300 resolve_symbol_wait(struct module *mod,
1301 const struct load_info *info,
1302 const char *name)
1304 const struct kernel_symbol *ksym;
1305 char owner[MODULE_NAME_LEN];
1307 if (wait_event_interruptible_timeout(module_wq,
1308 !IS_ERR(ksym = resolve_symbol(mod, info, name, owner))
1309 || PTR_ERR(ksym) != -EBUSY,
1310 30 * HZ) <= 0) {
1311 printk(KERN_WARNING "%s: gave up waiting for init of module %s.\n",
1312 mod->name, owner);
1314 return ksym;
1318 * /sys/module/foo/sections stuff
1319 * J. Corbet <corbet@lwn.net>
1321 #ifdef CONFIG_SYSFS
1323 #ifdef CONFIG_KALLSYMS
1324 static inline bool sect_empty(const Elf_Shdr *sect)
1326 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
1329 struct module_sect_attr
1331 struct module_attribute mattr;
1332 char *name;
1333 unsigned long address;
1336 struct module_sect_attrs
1338 struct attribute_group grp;
1339 unsigned int nsections;
1340 struct module_sect_attr attrs[0];
1343 static ssize_t module_sect_show(struct module_attribute *mattr,
1344 struct module_kobject *mk, char *buf)
1346 struct module_sect_attr *sattr =
1347 container_of(mattr, struct module_sect_attr, mattr);
1348 return sprintf(buf, "0x%pK\n", (void *)sattr->address);
1351 static void free_sect_attrs(struct module_sect_attrs *sect_attrs)
1353 unsigned int section;
1355 for (section = 0; section < sect_attrs->nsections; section++)
1356 kfree(sect_attrs->attrs[section].name);
1357 kfree(sect_attrs);
1360 static void add_sect_attrs(struct module *mod, const struct load_info *info)
1362 unsigned int nloaded = 0, i, size[2];
1363 struct module_sect_attrs *sect_attrs;
1364 struct module_sect_attr *sattr;
1365 struct attribute **gattr;
1367 /* Count loaded sections and allocate structures */
1368 for (i = 0; i < info->hdr->e_shnum; i++)
1369 if (!sect_empty(&info->sechdrs[i]))
1370 nloaded++;
1371 size[0] = ALIGN(sizeof(*sect_attrs)
1372 + nloaded * sizeof(sect_attrs->attrs[0]),
1373 sizeof(sect_attrs->grp.attrs[0]));
1374 size[1] = (nloaded + 1) * sizeof(sect_attrs->grp.attrs[0]);
1375 sect_attrs = kzalloc(size[0] + size[1], GFP_KERNEL);
1376 if (sect_attrs == NULL)
1377 return;
1379 /* Setup section attributes. */
1380 sect_attrs->grp.name = "sections";
1381 sect_attrs->grp.attrs = (void *)sect_attrs + size[0];
1383 sect_attrs->nsections = 0;
1384 sattr = &sect_attrs->attrs[0];
1385 gattr = &sect_attrs->grp.attrs[0];
1386 for (i = 0; i < info->hdr->e_shnum; i++) {
1387 Elf_Shdr *sec = &info->sechdrs[i];
1388 if (sect_empty(sec))
1389 continue;
1390 sattr->address = sec->sh_addr;
1391 sattr->name = kstrdup(info->secstrings + sec->sh_name,
1392 GFP_KERNEL);
1393 if (sattr->name == NULL)
1394 goto out;
1395 sect_attrs->nsections++;
1396 sysfs_attr_init(&sattr->mattr.attr);
1397 sattr->mattr.show = module_sect_show;
1398 sattr->mattr.store = NULL;
1399 sattr->mattr.attr.name = sattr->name;
1400 sattr->mattr.attr.mode = S_IRUGO;
1401 *(gattr++) = &(sattr++)->mattr.attr;
1403 *gattr = NULL;
1405 if (sysfs_create_group(&mod->mkobj.kobj, &sect_attrs->grp))
1406 goto out;
1408 mod->sect_attrs = sect_attrs;
1409 return;
1410 out:
1411 free_sect_attrs(sect_attrs);
1414 static void remove_sect_attrs(struct module *mod)
1416 if (mod->sect_attrs) {
1417 sysfs_remove_group(&mod->mkobj.kobj,
1418 &mod->sect_attrs->grp);
1419 /* We are positive that no one is using any sect attrs
1420 * at this point. Deallocate immediately. */
1421 free_sect_attrs(mod->sect_attrs);
1422 mod->sect_attrs = NULL;
1427 * /sys/module/foo/notes/.section.name gives contents of SHT_NOTE sections.
1430 struct module_notes_attrs {
1431 struct kobject *dir;
1432 unsigned int notes;
1433 struct bin_attribute attrs[0];
1436 static ssize_t module_notes_read(struct file *filp, struct kobject *kobj,
1437 struct bin_attribute *bin_attr,
1438 char *buf, loff_t pos, size_t count)
1441 * The caller checked the pos and count against our size.
1443 memcpy(buf, bin_attr->private + pos, count);
1444 return count;
1447 static void free_notes_attrs(struct module_notes_attrs *notes_attrs,
1448 unsigned int i)
1450 if (notes_attrs->dir) {
1451 while (i-- > 0)
1452 sysfs_remove_bin_file(notes_attrs->dir,
1453 &notes_attrs->attrs[i]);
1454 kobject_put(notes_attrs->dir);
1456 kfree(notes_attrs);
1459 static void add_notes_attrs(struct module *mod, const struct load_info *info)
1461 unsigned int notes, loaded, i;
1462 struct module_notes_attrs *notes_attrs;
1463 struct bin_attribute *nattr;
1465 /* failed to create section attributes, so can't create notes */
1466 if (!mod->sect_attrs)
1467 return;
1469 /* Count notes sections and allocate structures. */
1470 notes = 0;
1471 for (i = 0; i < info->hdr->e_shnum; i++)
1472 if (!sect_empty(&info->sechdrs[i]) &&
1473 (info->sechdrs[i].sh_type == SHT_NOTE))
1474 ++notes;
1476 if (notes == 0)
1477 return;
1479 notes_attrs = kzalloc(sizeof(*notes_attrs)
1480 + notes * sizeof(notes_attrs->attrs[0]),
1481 GFP_KERNEL);
1482 if (notes_attrs == NULL)
1483 return;
1485 notes_attrs->notes = notes;
1486 nattr = &notes_attrs->attrs[0];
1487 for (loaded = i = 0; i < info->hdr->e_shnum; ++i) {
1488 if (sect_empty(&info->sechdrs[i]))
1489 continue;
1490 if (info->sechdrs[i].sh_type == SHT_NOTE) {
1491 sysfs_bin_attr_init(nattr);
1492 nattr->attr.name = mod->sect_attrs->attrs[loaded].name;
1493 nattr->attr.mode = S_IRUGO;
1494 nattr->size = info->sechdrs[i].sh_size;
1495 nattr->private = (void *) info->sechdrs[i].sh_addr;
1496 nattr->read = module_notes_read;
1497 ++nattr;
1499 ++loaded;
1502 notes_attrs->dir = kobject_create_and_add("notes", &mod->mkobj.kobj);
1503 if (!notes_attrs->dir)
1504 goto out;
1506 for (i = 0; i < notes; ++i)
1507 if (sysfs_create_bin_file(notes_attrs->dir,
1508 &notes_attrs->attrs[i]))
1509 goto out;
1511 mod->notes_attrs = notes_attrs;
1512 return;
1514 out:
1515 free_notes_attrs(notes_attrs, i);
1518 static void remove_notes_attrs(struct module *mod)
1520 if (mod->notes_attrs)
1521 free_notes_attrs(mod->notes_attrs, mod->notes_attrs->notes);
1524 #else
1526 static inline void add_sect_attrs(struct module *mod,
1527 const struct load_info *info)
1531 static inline void remove_sect_attrs(struct module *mod)
1535 static inline void add_notes_attrs(struct module *mod,
1536 const struct load_info *info)
1540 static inline void remove_notes_attrs(struct module *mod)
1543 #endif /* CONFIG_KALLSYMS */
1545 static void add_usage_links(struct module *mod)
1547 #ifdef CONFIG_MODULE_UNLOAD
1548 struct module_use *use;
1549 int nowarn;
1551 mutex_lock(&module_mutex);
1552 list_for_each_entry(use, &mod->target_list, target_list) {
1553 nowarn = sysfs_create_link(use->target->holders_dir,
1554 &mod->mkobj.kobj, mod->name);
1556 mutex_unlock(&module_mutex);
1557 #endif
1560 static void del_usage_links(struct module *mod)
1562 #ifdef CONFIG_MODULE_UNLOAD
1563 struct module_use *use;
1565 mutex_lock(&module_mutex);
1566 list_for_each_entry(use, &mod->target_list, target_list)
1567 sysfs_remove_link(use->target->holders_dir, mod->name);
1568 mutex_unlock(&module_mutex);
1569 #endif
1572 static int module_add_modinfo_attrs(struct module *mod)
1574 struct module_attribute *attr;
1575 struct module_attribute *temp_attr;
1576 int error = 0;
1577 int i;
1579 mod->modinfo_attrs = kzalloc((sizeof(struct module_attribute) *
1580 (ARRAY_SIZE(modinfo_attrs) + 1)),
1581 GFP_KERNEL);
1582 if (!mod->modinfo_attrs)
1583 return -ENOMEM;
1585 temp_attr = mod->modinfo_attrs;
1586 for (i = 0; (attr = modinfo_attrs[i]) && !error; i++) {
1587 if (!attr->test ||
1588 (attr->test && attr->test(mod))) {
1589 memcpy(temp_attr, attr, sizeof(*temp_attr));
1590 sysfs_attr_init(&temp_attr->attr);
1591 error = sysfs_create_file(&mod->mkobj.kobj,&temp_attr->attr);
1592 ++temp_attr;
1595 return error;
1598 static void module_remove_modinfo_attrs(struct module *mod)
1600 struct module_attribute *attr;
1601 int i;
1603 for (i = 0; (attr = &mod->modinfo_attrs[i]); i++) {
1604 /* pick a field to test for end of list */
1605 if (!attr->attr.name)
1606 break;
1607 sysfs_remove_file(&mod->mkobj.kobj,&attr->attr);
1608 if (attr->free)
1609 attr->free(mod);
1611 kfree(mod->modinfo_attrs);
1614 static int mod_sysfs_init(struct module *mod)
1616 int err;
1617 struct kobject *kobj;
1619 if (!module_sysfs_initialized) {
1620 printk(KERN_ERR "%s: module sysfs not initialized\n",
1621 mod->name);
1622 err = -EINVAL;
1623 goto out;
1626 kobj = kset_find_obj(module_kset, mod->name);
1627 if (kobj) {
1628 printk(KERN_ERR "%s: module is already loaded\n", mod->name);
1629 kobject_put(kobj);
1630 err = -EINVAL;
1631 goto out;
1634 mod->mkobj.mod = mod;
1636 memset(&mod->mkobj.kobj, 0, sizeof(mod->mkobj.kobj));
1637 mod->mkobj.kobj.kset = module_kset;
1638 err = kobject_init_and_add(&mod->mkobj.kobj, &module_ktype, NULL,
1639 "%s", mod->name);
1640 if (err)
1641 kobject_put(&mod->mkobj.kobj);
1643 /* delay uevent until full sysfs population */
1644 out:
1645 return err;
1648 static int mod_sysfs_setup(struct module *mod,
1649 const struct load_info *info,
1650 struct kernel_param *kparam,
1651 unsigned int num_params)
1653 int err;
1655 err = mod_sysfs_init(mod);
1656 if (err)
1657 goto out;
1659 mod->holders_dir = kobject_create_and_add("holders", &mod->mkobj.kobj);
1660 if (!mod->holders_dir) {
1661 err = -ENOMEM;
1662 goto out_unreg;
1665 err = module_param_sysfs_setup(mod, kparam, num_params);
1666 if (err)
1667 goto out_unreg_holders;
1669 err = module_add_modinfo_attrs(mod);
1670 if (err)
1671 goto out_unreg_param;
1673 add_usage_links(mod);
1674 add_sect_attrs(mod, info);
1675 add_notes_attrs(mod, info);
1677 kobject_uevent(&mod->mkobj.kobj, KOBJ_ADD);
1678 return 0;
1680 out_unreg_param:
1681 module_param_sysfs_remove(mod);
1682 out_unreg_holders:
1683 kobject_put(mod->holders_dir);
1684 out_unreg:
1685 kobject_put(&mod->mkobj.kobj);
1686 out:
1687 return err;
1690 static void mod_sysfs_fini(struct module *mod)
1692 remove_notes_attrs(mod);
1693 remove_sect_attrs(mod);
1694 kobject_put(&mod->mkobj.kobj);
1697 #else /* !CONFIG_SYSFS */
1699 static int mod_sysfs_setup(struct module *mod,
1700 const struct load_info *info,
1701 struct kernel_param *kparam,
1702 unsigned int num_params)
1704 return 0;
1707 static void mod_sysfs_fini(struct module *mod)
1711 static void module_remove_modinfo_attrs(struct module *mod)
1715 static void del_usage_links(struct module *mod)
1719 #endif /* CONFIG_SYSFS */
1721 static void mod_sysfs_teardown(struct module *mod)
1723 del_usage_links(mod);
1724 module_remove_modinfo_attrs(mod);
1725 module_param_sysfs_remove(mod);
1726 kobject_put(mod->mkobj.drivers_dir);
1727 kobject_put(mod->holders_dir);
1728 mod_sysfs_fini(mod);
1732 * unlink the module with the whole machine is stopped with interrupts off
1733 * - this defends against kallsyms not taking locks
1735 static int __unlink_module(void *_mod)
1737 struct module *mod = _mod;
1738 list_del(&mod->list);
1739 module_bug_cleanup(mod);
1740 return 0;
1743 #ifdef CONFIG_DEBUG_SET_MODULE_RONX
1745 * LKM RO/NX protection: protect module's text/ro-data
1746 * from modification and any data from execution.
1748 void set_page_attributes(void *start, void *end, int (*set)(unsigned long start, int num_pages))
1750 unsigned long begin_pfn = PFN_DOWN((unsigned long)start);
1751 unsigned long end_pfn = PFN_DOWN((unsigned long)end);
1753 if (end_pfn > begin_pfn)
1754 set(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1757 static void set_section_ro_nx(void *base,
1758 unsigned long text_size,
1759 unsigned long ro_size,
1760 unsigned long total_size)
1762 /* begin and end PFNs of the current subsection */
1763 unsigned long begin_pfn;
1764 unsigned long end_pfn;
1767 * Set RO for module text and RO-data:
1768 * - Always protect first page.
1769 * - Do not protect last partial page.
1771 if (ro_size > 0)
1772 set_page_attributes(base, base + ro_size, set_memory_ro);
1775 * Set NX permissions for module data:
1776 * - Do not protect first partial page.
1777 * - Always protect last page.
1779 if (total_size > text_size) {
1780 begin_pfn = PFN_UP((unsigned long)base + text_size);
1781 end_pfn = PFN_UP((unsigned long)base + total_size);
1782 if (end_pfn > begin_pfn)
1783 set_memory_nx(begin_pfn << PAGE_SHIFT, end_pfn - begin_pfn);
1787 static void unset_module_core_ro_nx(struct module *mod)
1789 set_page_attributes(mod->module_core + mod->core_text_size,
1790 mod->module_core + mod->core_size,
1791 set_memory_x);
1792 set_page_attributes(mod->module_core,
1793 mod->module_core + mod->core_ro_size,
1794 set_memory_rw);
1797 static void unset_module_init_ro_nx(struct module *mod)
1799 set_page_attributes(mod->module_init + mod->init_text_size,
1800 mod->module_init + mod->init_size,
1801 set_memory_x);
1802 set_page_attributes(mod->module_init,
1803 mod->module_init + mod->init_ro_size,
1804 set_memory_rw);
1807 /* Iterate through all modules and set each module's text as RW */
1808 void set_all_modules_text_rw(void)
1810 struct module *mod;
1812 mutex_lock(&module_mutex);
1813 list_for_each_entry_rcu(mod, &modules, list) {
1814 if (mod->state == MODULE_STATE_UNFORMED)
1815 continue;
1816 if ((mod->module_core) && (mod->core_text_size)) {
1817 set_page_attributes(mod->module_core,
1818 mod->module_core + mod->core_text_size,
1819 set_memory_rw);
1821 if ((mod->module_init) && (mod->init_text_size)) {
1822 set_page_attributes(mod->module_init,
1823 mod->module_init + mod->init_text_size,
1824 set_memory_rw);
1827 mutex_unlock(&module_mutex);
1830 /* Iterate through all modules and set each module's text as RO */
1831 void set_all_modules_text_ro(void)
1833 struct module *mod;
1835 mutex_lock(&module_mutex);
1836 list_for_each_entry_rcu(mod, &modules, list) {
1837 if (mod->state == MODULE_STATE_UNFORMED)
1838 continue;
1839 if ((mod->module_core) && (mod->core_text_size)) {
1840 set_page_attributes(mod->module_core,
1841 mod->module_core + mod->core_text_size,
1842 set_memory_ro);
1844 if ((mod->module_init) && (mod->init_text_size)) {
1845 set_page_attributes(mod->module_init,
1846 mod->module_init + mod->init_text_size,
1847 set_memory_ro);
1850 mutex_unlock(&module_mutex);
1852 #else
1853 static inline void set_section_ro_nx(void *base, unsigned long text_size, unsigned long ro_size, unsigned long total_size) { }
1854 static void unset_module_core_ro_nx(struct module *mod) { }
1855 static void unset_module_init_ro_nx(struct module *mod) { }
1856 #endif
1858 void __weak module_free(struct module *mod, void *module_region)
1860 vfree(module_region);
1863 void __weak module_arch_cleanup(struct module *mod)
1867 /* Free a module, remove from lists, etc. */
1868 static void free_module(struct module *mod)
1870 trace_module_free(mod);
1872 mod_sysfs_teardown(mod);
1874 /* We leave it in list to prevent duplicate loads, but make sure
1875 * that noone uses it while it's being deconstructed. */
1876 mod->state = MODULE_STATE_UNFORMED;
1878 /* Remove dynamic debug info */
1879 ddebug_remove_module(mod->name);
1881 /* Arch-specific cleanup. */
1882 module_arch_cleanup(mod);
1884 /* Module unload stuff */
1885 module_unload_free(mod);
1887 /* Free any allocated parameters. */
1888 destroy_params(mod->kp, mod->num_kp);
1890 /* Now we can delete it from the lists */
1891 mutex_lock(&module_mutex);
1892 stop_machine(__unlink_module, mod, NULL);
1893 mutex_unlock(&module_mutex);
1895 /* This may be NULL, but that's OK */
1896 unset_module_init_ro_nx(mod);
1897 module_free(mod, mod->module_init);
1898 kfree(mod->args);
1899 percpu_modfree(mod);
1901 /* Free lock-classes: */
1902 lockdep_free_key_range(mod->module_core, mod->core_size);
1904 /* Finally, free the core (containing the module structure) */
1905 unset_module_core_ro_nx(mod);
1906 module_free(mod, mod->module_core);
1908 #ifdef CONFIG_MPU
1909 update_protections(current->mm);
1910 #endif
1913 void *__symbol_get(const char *symbol)
1915 struct module *owner;
1916 const struct kernel_symbol *sym;
1918 preempt_disable();
1919 sym = find_symbol(symbol, &owner, NULL, true, true);
1920 if (sym && strong_try_module_get(owner))
1921 sym = NULL;
1922 preempt_enable();
1924 return sym ? (void *)sym->value : NULL;
1926 EXPORT_SYMBOL_GPL(__symbol_get);
1929 * Ensure that an exported symbol [global namespace] does not already exist
1930 * in the kernel or in some other module's exported symbol table.
1932 * You must hold the module_mutex.
1934 static int verify_export_symbols(struct module *mod)
1936 unsigned int i;
1937 struct module *owner;
1938 const struct kernel_symbol *s;
1939 struct {
1940 const struct kernel_symbol *sym;
1941 unsigned int num;
1942 } arr[] = {
1943 { mod->syms, mod->num_syms },
1944 { mod->gpl_syms, mod->num_gpl_syms },
1945 { mod->gpl_future_syms, mod->num_gpl_future_syms },
1946 #ifdef CONFIG_UNUSED_SYMBOLS
1947 { mod->unused_syms, mod->num_unused_syms },
1948 { mod->unused_gpl_syms, mod->num_unused_gpl_syms },
1949 #endif
1952 for (i = 0; i < ARRAY_SIZE(arr); i++) {
1953 for (s = arr[i].sym; s < arr[i].sym + arr[i].num; s++) {
1954 if (find_symbol(s->name, &owner, NULL, true, false)) {
1955 printk(KERN_ERR
1956 "%s: exports duplicate symbol %s"
1957 " (owned by %s)\n",
1958 mod->name, s->name, module_name(owner));
1959 return -ENOEXEC;
1963 return 0;
1966 /* Change all symbols so that st_value encodes the pointer directly. */
1967 static int simplify_symbols(struct module *mod, const struct load_info *info)
1969 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
1970 Elf_Sym *sym = (void *)symsec->sh_addr;
1971 unsigned long secbase;
1972 unsigned int i;
1973 int ret = 0;
1974 const struct kernel_symbol *ksym;
1976 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
1977 const char *name = info->strtab + sym[i].st_name;
1979 switch (sym[i].st_shndx) {
1980 case SHN_COMMON:
1981 /* We compiled with -fno-common. These are not
1982 supposed to happen. */
1983 pr_debug("Common symbol: %s\n", name);
1984 printk("%s: please compile with -fno-common\n",
1985 mod->name);
1986 ret = -ENOEXEC;
1987 break;
1989 case SHN_ABS:
1990 /* Don't need to do anything */
1991 pr_debug("Absolute symbol: 0x%08lx\n",
1992 (long)sym[i].st_value);
1993 break;
1995 case SHN_UNDEF:
1996 ksym = resolve_symbol_wait(mod, info, name);
1997 /* Ok if resolved. */
1998 if (ksym && !IS_ERR(ksym)) {
1999 sym[i].st_value = ksym->value;
2000 break;
2003 /* Ok if weak. */
2004 if (!ksym && ELF_ST_BIND(sym[i].st_info) == STB_WEAK)
2005 break;
2007 printk(KERN_WARNING "%s: Unknown symbol %s (err %li)\n",
2008 mod->name, name, PTR_ERR(ksym));
2009 ret = PTR_ERR(ksym) ?: -ENOENT;
2010 break;
2012 default:
2013 /* Divert to percpu allocation if a percpu var. */
2014 if (sym[i].st_shndx == info->index.pcpu)
2015 secbase = (unsigned long)mod_percpu(mod);
2016 else
2017 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
2018 sym[i].st_value += secbase;
2019 break;
2023 return ret;
2026 static int apply_relocations(struct module *mod, const struct load_info *info)
2028 unsigned int i;
2029 int err = 0;
2031 /* Now do relocations. */
2032 for (i = 1; i < info->hdr->e_shnum; i++) {
2033 unsigned int infosec = info->sechdrs[i].sh_info;
2035 /* Not a valid relocation section? */
2036 if (infosec >= info->hdr->e_shnum)
2037 continue;
2039 /* Don't bother with non-allocated sections */
2040 if (!(info->sechdrs[infosec].sh_flags & SHF_ALLOC))
2041 continue;
2043 if (info->sechdrs[i].sh_type == SHT_REL)
2044 err = apply_relocate(info->sechdrs, info->strtab,
2045 info->index.sym, i, mod);
2046 else if (info->sechdrs[i].sh_type == SHT_RELA)
2047 err = apply_relocate_add(info->sechdrs, info->strtab,
2048 info->index.sym, i, mod);
2049 if (err < 0)
2050 break;
2052 return err;
2055 /* Additional bytes needed by arch in front of individual sections */
2056 unsigned int __weak arch_mod_section_prepend(struct module *mod,
2057 unsigned int section)
2059 /* default implementation just returns zero */
2060 return 0;
2063 /* Update size with this section: return offset. */
2064 static long get_offset(struct module *mod, unsigned int *size,
2065 Elf_Shdr *sechdr, unsigned int section)
2067 long ret;
2069 *size += arch_mod_section_prepend(mod, section);
2070 ret = ALIGN(*size, sechdr->sh_addralign ?: 1);
2071 *size = ret + sechdr->sh_size;
2072 return ret;
2075 /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
2076 might -- code, read-only data, read-write data, small data. Tally
2077 sizes, and place the offsets into sh_entsize fields: high bit means it
2078 belongs in init. */
2079 static void layout_sections(struct module *mod, struct load_info *info)
2081 static unsigned long const masks[][2] = {
2082 /* NOTE: all executable code must be the first section
2083 * in this array; otherwise modify the text_size
2084 * finder in the two loops below */
2085 { SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL },
2086 { SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL },
2087 { SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL },
2088 { ARCH_SHF_SMALL | SHF_ALLOC, 0 }
2090 unsigned int m, i;
2092 for (i = 0; i < info->hdr->e_shnum; i++)
2093 info->sechdrs[i].sh_entsize = ~0UL;
2095 pr_debug("Core section allocation order:\n");
2096 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2097 for (i = 0; i < info->hdr->e_shnum; ++i) {
2098 Elf_Shdr *s = &info->sechdrs[i];
2099 const char *sname = info->secstrings + s->sh_name;
2101 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2102 || (s->sh_flags & masks[m][1])
2103 || s->sh_entsize != ~0UL
2104 || strstarts(sname, ".init"))
2105 continue;
2106 s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
2107 pr_debug("\t%s\n", sname);
2109 switch (m) {
2110 case 0: /* executable */
2111 mod->core_size = debug_align(mod->core_size);
2112 mod->core_text_size = mod->core_size;
2113 break;
2114 case 1: /* RO: text and ro-data */
2115 mod->core_size = debug_align(mod->core_size);
2116 mod->core_ro_size = mod->core_size;
2117 break;
2118 case 3: /* whole core */
2119 mod->core_size = debug_align(mod->core_size);
2120 break;
2124 pr_debug("Init section allocation order:\n");
2125 for (m = 0; m < ARRAY_SIZE(masks); ++m) {
2126 for (i = 0; i < info->hdr->e_shnum; ++i) {
2127 Elf_Shdr *s = &info->sechdrs[i];
2128 const char *sname = info->secstrings + s->sh_name;
2130 if ((s->sh_flags & masks[m][0]) != masks[m][0]
2131 || (s->sh_flags & masks[m][1])
2132 || s->sh_entsize != ~0UL
2133 || !strstarts(sname, ".init"))
2134 continue;
2135 s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
2136 | INIT_OFFSET_MASK);
2137 pr_debug("\t%s\n", sname);
2139 switch (m) {
2140 case 0: /* executable */
2141 mod->init_size = debug_align(mod->init_size);
2142 mod->init_text_size = mod->init_size;
2143 break;
2144 case 1: /* RO: text and ro-data */
2145 mod->init_size = debug_align(mod->init_size);
2146 mod->init_ro_size = mod->init_size;
2147 break;
2148 case 3: /* whole init */
2149 mod->init_size = debug_align(mod->init_size);
2150 break;
2155 static void set_license(struct module *mod, const char *license)
2157 if (!license)
2158 license = "unspecified";
2160 if (!license_is_gpl_compatible(license)) {
2161 if (!test_taint(TAINT_PROPRIETARY_MODULE))
2162 printk(KERN_WARNING "%s: module license '%s' taints "
2163 "kernel.\n", mod->name, license);
2164 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2165 LOCKDEP_NOW_UNRELIABLE);
2169 /* Parse tag=value strings from .modinfo section */
2170 static char *next_string(char *string, unsigned long *secsize)
2172 /* Skip non-zero chars */
2173 while (string[0]) {
2174 string++;
2175 if ((*secsize)-- <= 1)
2176 return NULL;
2179 /* Skip any zero padding. */
2180 while (!string[0]) {
2181 string++;
2182 if ((*secsize)-- <= 1)
2183 return NULL;
2185 return string;
2188 static char *get_modinfo(struct load_info *info, const char *tag)
2190 char *p;
2191 unsigned int taglen = strlen(tag);
2192 Elf_Shdr *infosec = &info->sechdrs[info->index.info];
2193 unsigned long size = infosec->sh_size;
2195 for (p = (char *)infosec->sh_addr; p; p = next_string(p, &size)) {
2196 if (strncmp(p, tag, taglen) == 0 && p[taglen] == '=')
2197 return p + taglen + 1;
2199 return NULL;
2202 static void setup_modinfo(struct module *mod, struct load_info *info)
2204 struct module_attribute *attr;
2205 int i;
2207 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2208 if (attr->setup)
2209 attr->setup(mod, get_modinfo(info, attr->attr.name));
2213 static void free_modinfo(struct module *mod)
2215 struct module_attribute *attr;
2216 int i;
2218 for (i = 0; (attr = modinfo_attrs[i]); i++) {
2219 if (attr->free)
2220 attr->free(mod);
2224 #ifdef CONFIG_KALLSYMS
2226 /* lookup symbol in given range of kernel_symbols */
2227 static const struct kernel_symbol *lookup_symbol(const char *name,
2228 const struct kernel_symbol *start,
2229 const struct kernel_symbol *stop)
2231 return bsearch(name, start, stop - start,
2232 sizeof(struct kernel_symbol), cmp_name);
2235 static int is_exported(const char *name, unsigned long value,
2236 const struct module *mod)
2238 const struct kernel_symbol *ks;
2239 if (!mod)
2240 ks = lookup_symbol(name, __start___ksymtab, __stop___ksymtab);
2241 else
2242 ks = lookup_symbol(name, mod->syms, mod->syms + mod->num_syms);
2243 return ks != NULL && ks->value == value;
2246 /* As per nm */
2247 static char elf_type(const Elf_Sym *sym, const struct load_info *info)
2249 const Elf_Shdr *sechdrs = info->sechdrs;
2251 if (ELF_ST_BIND(sym->st_info) == STB_WEAK) {
2252 if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT)
2253 return 'v';
2254 else
2255 return 'w';
2257 if (sym->st_shndx == SHN_UNDEF)
2258 return 'U';
2259 if (sym->st_shndx == SHN_ABS)
2260 return 'a';
2261 if (sym->st_shndx >= SHN_LORESERVE)
2262 return '?';
2263 if (sechdrs[sym->st_shndx].sh_flags & SHF_EXECINSTR)
2264 return 't';
2265 if (sechdrs[sym->st_shndx].sh_flags & SHF_ALLOC
2266 && sechdrs[sym->st_shndx].sh_type != SHT_NOBITS) {
2267 if (!(sechdrs[sym->st_shndx].sh_flags & SHF_WRITE))
2268 return 'r';
2269 else if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2270 return 'g';
2271 else
2272 return 'd';
2274 if (sechdrs[sym->st_shndx].sh_type == SHT_NOBITS) {
2275 if (sechdrs[sym->st_shndx].sh_flags & ARCH_SHF_SMALL)
2276 return 's';
2277 else
2278 return 'b';
2280 if (strstarts(info->secstrings + sechdrs[sym->st_shndx].sh_name,
2281 ".debug")) {
2282 return 'n';
2284 return '?';
2287 static bool is_core_symbol(const Elf_Sym *src, const Elf_Shdr *sechdrs,
2288 unsigned int shnum)
2290 const Elf_Shdr *sec;
2292 if (src->st_shndx == SHN_UNDEF
2293 || src->st_shndx >= shnum
2294 || !src->st_name)
2295 return false;
2297 sec = sechdrs + src->st_shndx;
2298 if (!(sec->sh_flags & SHF_ALLOC)
2299 #ifndef CONFIG_KALLSYMS_ALL
2300 || !(sec->sh_flags & SHF_EXECINSTR)
2301 #endif
2302 || (sec->sh_entsize & INIT_OFFSET_MASK))
2303 return false;
2305 return true;
2309 * We only allocate and copy the strings needed by the parts of symtab
2310 * we keep. This is simple, but has the effect of making multiple
2311 * copies of duplicates. We could be more sophisticated, see
2312 * linux-kernel thread starting with
2313 * <73defb5e4bca04a6431392cc341112b1@localhost>.
2315 static void layout_symtab(struct module *mod, struct load_info *info)
2317 Elf_Shdr *symsect = info->sechdrs + info->index.sym;
2318 Elf_Shdr *strsect = info->sechdrs + info->index.str;
2319 const Elf_Sym *src;
2320 unsigned int i, nsrc, ndst, strtab_size = 0;
2322 /* Put symbol section at end of init part of module. */
2323 symsect->sh_flags |= SHF_ALLOC;
2324 symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
2325 info->index.sym) | INIT_OFFSET_MASK;
2326 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
2328 src = (void *)info->hdr + symsect->sh_offset;
2329 nsrc = symsect->sh_size / sizeof(*src);
2331 /* Compute total space required for the core symbols' strtab. */
2332 for (ndst = i = 0; i < nsrc; i++) {
2333 if (i == 0 ||
2334 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2335 strtab_size += strlen(&info->strtab[src[i].st_name])+1;
2336 ndst++;
2340 /* Append room for core symbols at end of core part. */
2341 info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
2342 info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
2343 mod->core_size += strtab_size;
2345 /* Put string table section at end of init part of module. */
2346 strsect->sh_flags |= SHF_ALLOC;
2347 strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
2348 info->index.str) | INIT_OFFSET_MASK;
2349 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
2352 static void add_kallsyms(struct module *mod, const struct load_info *info)
2354 unsigned int i, ndst;
2355 const Elf_Sym *src;
2356 Elf_Sym *dst;
2357 char *s;
2358 Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
2360 mod->symtab = (void *)symsec->sh_addr;
2361 mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
2362 /* Make sure we get permanent strtab: don't use info->strtab. */
2363 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
2365 /* Set types up while we still have access to sections. */
2366 for (i = 0; i < mod->num_symtab; i++)
2367 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
2369 mod->core_symtab = dst = mod->module_core + info->symoffs;
2370 mod->core_strtab = s = mod->module_core + info->stroffs;
2371 src = mod->symtab;
2372 for (ndst = i = 0; i < mod->num_symtab; i++) {
2373 if (i == 0 ||
2374 is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum)) {
2375 dst[ndst] = src[i];
2376 dst[ndst++].st_name = s - mod->core_strtab;
2377 s += strlcpy(s, &mod->strtab[src[i].st_name],
2378 KSYM_NAME_LEN) + 1;
2381 mod->core_num_syms = ndst;
2383 #else
2384 static inline void layout_symtab(struct module *mod, struct load_info *info)
2388 static void add_kallsyms(struct module *mod, const struct load_info *info)
2391 #endif /* CONFIG_KALLSYMS */
2393 static void dynamic_debug_setup(struct _ddebug *debug, unsigned int num)
2395 if (!debug)
2396 return;
2397 #ifdef CONFIG_DYNAMIC_DEBUG
2398 if (ddebug_add_module(debug, num, debug->modname))
2399 printk(KERN_ERR "dynamic debug error adding module: %s\n",
2400 debug->modname);
2401 #endif
2404 static void dynamic_debug_remove(struct _ddebug *debug)
2406 if (debug)
2407 ddebug_remove_module(debug->modname);
2410 void * __weak module_alloc(unsigned long size)
2412 return vmalloc_exec(size);
2415 static void *module_alloc_update_bounds(unsigned long size)
2417 void *ret = module_alloc(size);
2419 if (ret) {
2420 mutex_lock(&module_mutex);
2421 /* Update module bounds. */
2422 if ((unsigned long)ret < module_addr_min)
2423 module_addr_min = (unsigned long)ret;
2424 if ((unsigned long)ret + size > module_addr_max)
2425 module_addr_max = (unsigned long)ret + size;
2426 mutex_unlock(&module_mutex);
2428 return ret;
2431 #ifdef CONFIG_DEBUG_KMEMLEAK
2432 static void kmemleak_load_module(const struct module *mod,
2433 const struct load_info *info)
2435 unsigned int i;
2437 /* only scan the sections containing data */
2438 kmemleak_scan_area(mod, sizeof(struct module), GFP_KERNEL);
2440 for (i = 1; i < info->hdr->e_shnum; i++) {
2441 /* Scan all writable sections that's not executable */
2442 if (!(info->sechdrs[i].sh_flags & SHF_ALLOC) ||
2443 !(info->sechdrs[i].sh_flags & SHF_WRITE) ||
2444 (info->sechdrs[i].sh_flags & SHF_EXECINSTR))
2445 continue;
2447 kmemleak_scan_area((void *)info->sechdrs[i].sh_addr,
2448 info->sechdrs[i].sh_size, GFP_KERNEL);
2451 #else
2452 static inline void kmemleak_load_module(const struct module *mod,
2453 const struct load_info *info)
2456 #endif
2458 #ifdef CONFIG_MODULE_SIG
2459 static int module_sig_check(struct load_info *info)
2461 int err = -ENOKEY;
2462 const unsigned long markerlen = sizeof(MODULE_SIG_STRING) - 1;
2463 const void *mod = info->hdr;
2465 if (info->len > markerlen &&
2466 memcmp(mod + info->len - markerlen, MODULE_SIG_STRING, markerlen) == 0) {
2467 /* We truncate the module to discard the signature */
2468 info->len -= markerlen;
2469 err = mod_verify_sig(mod, &info->len);
2472 if (!err) {
2473 info->sig_ok = true;
2474 return 0;
2477 /* Not having a signature is only an error if we're strict. */
2478 if (err < 0 && fips_enabled)
2479 panic("Module verification failed with error %d in FIPS mode\n",
2480 err);
2481 if (err == -ENOKEY && !sig_enforce)
2482 err = 0;
2484 return err;
2486 #else /* !CONFIG_MODULE_SIG */
2487 static int module_sig_check(struct load_info *info)
2489 return 0;
2491 #endif /* !CONFIG_MODULE_SIG */
2493 /* Sanity checks against invalid binaries, wrong arch, weird elf version. */
2494 static int elf_header_check(struct load_info *info)
2496 if (info->len < sizeof(*(info->hdr)))
2497 return -ENOEXEC;
2499 if (memcmp(info->hdr->e_ident, ELFMAG, SELFMAG) != 0
2500 || info->hdr->e_type != ET_REL
2501 || !elf_check_arch(info->hdr)
2502 || info->hdr->e_shentsize != sizeof(Elf_Shdr))
2503 return -ENOEXEC;
2505 if (info->hdr->e_shoff >= info->len
2506 || (info->hdr->e_shnum * sizeof(Elf_Shdr) >
2507 info->len - info->hdr->e_shoff))
2508 return -ENOEXEC;
2510 return 0;
2513 /* Sets info->hdr and info->len. */
2514 static int copy_module_from_user(const void __user *umod, unsigned long len,
2515 struct load_info *info)
2517 int err;
2519 info->len = len;
2520 if (info->len < sizeof(*(info->hdr)))
2521 return -ENOEXEC;
2523 err = security_kernel_module_from_file(NULL);
2524 if (err)
2525 return err;
2527 /* Suck in entire file: we'll want most of it. */
2528 info->hdr = vmalloc(info->len);
2529 if (!info->hdr)
2530 return -ENOMEM;
2532 if (copy_from_user(info->hdr, umod, info->len) != 0) {
2533 vfree(info->hdr);
2534 return -EFAULT;
2537 return 0;
2540 /* Sets info->hdr and info->len. */
2541 static int copy_module_from_fd(int fd, struct load_info *info)
2543 struct file *file;
2544 int err;
2545 struct kstat stat;
2546 loff_t pos;
2547 ssize_t bytes = 0;
2549 file = fget(fd);
2550 if (!file)
2551 return -ENOEXEC;
2553 err = security_kernel_module_from_file(file);
2554 if (err)
2555 goto out;
2557 err = vfs_getattr(&file->f_path, &stat);
2558 if (err)
2559 goto out;
2561 if (stat.size > INT_MAX) {
2562 err = -EFBIG;
2563 goto out;
2566 /* Don't hand 0 to vmalloc, it whines. */
2567 if (stat.size == 0) {
2568 err = -EINVAL;
2569 goto out;
2572 info->hdr = vmalloc(stat.size);
2573 if (!info->hdr) {
2574 err = -ENOMEM;
2575 goto out;
2578 pos = 0;
2579 while (pos < stat.size) {
2580 bytes = kernel_read(file, pos, (char *)(info->hdr) + pos,
2581 stat.size - pos);
2582 if (bytes < 0) {
2583 vfree(info->hdr);
2584 err = bytes;
2585 goto out;
2587 if (bytes == 0)
2588 break;
2589 pos += bytes;
2591 info->len = pos;
2593 out:
2594 fput(file);
2595 return err;
2598 static void free_copy(struct load_info *info)
2600 vfree(info->hdr);
2603 static int rewrite_section_headers(struct load_info *info, int flags)
2605 unsigned int i;
2607 /* This should always be true, but let's be sure. */
2608 info->sechdrs[0].sh_addr = 0;
2610 for (i = 1; i < info->hdr->e_shnum; i++) {
2611 Elf_Shdr *shdr = &info->sechdrs[i];
2612 if (shdr->sh_type != SHT_NOBITS
2613 && info->len < shdr->sh_offset + shdr->sh_size) {
2614 printk(KERN_ERR "Module len %lu truncated\n",
2615 info->len);
2616 return -ENOEXEC;
2619 /* Mark all sections sh_addr with their address in the
2620 temporary image. */
2621 shdr->sh_addr = (size_t)info->hdr + shdr->sh_offset;
2623 #ifndef CONFIG_MODULE_UNLOAD
2624 /* Don't load .exit sections */
2625 if (strstarts(info->secstrings+shdr->sh_name, ".exit"))
2626 shdr->sh_flags &= ~(unsigned long)SHF_ALLOC;
2627 #endif
2630 /* Track but don't keep modinfo and version sections. */
2631 if (flags & MODULE_INIT_IGNORE_MODVERSIONS)
2632 info->index.vers = 0; /* Pretend no __versions section! */
2633 else
2634 info->index.vers = find_sec(info, "__versions");
2635 info->index.info = find_sec(info, ".modinfo");
2636 info->sechdrs[info->index.info].sh_flags &= ~(unsigned long)SHF_ALLOC;
2637 info->sechdrs[info->index.vers].sh_flags &= ~(unsigned long)SHF_ALLOC;
2638 return 0;
2642 * Set up our basic convenience variables (pointers to section headers,
2643 * search for module section index etc), and do some basic section
2644 * verification.
2646 * Return the temporary module pointer (we'll replace it with the final
2647 * one when we move the module sections around).
2649 static struct module *setup_load_info(struct load_info *info, int flags)
2651 unsigned int i;
2652 int err;
2653 struct module *mod;
2655 /* Set up the convenience variables */
2656 info->sechdrs = (void *)info->hdr + info->hdr->e_shoff;
2657 info->secstrings = (void *)info->hdr
2658 + info->sechdrs[info->hdr->e_shstrndx].sh_offset;
2660 err = rewrite_section_headers(info, flags);
2661 if (err)
2662 return ERR_PTR(err);
2664 /* Find internal symbols and strings. */
2665 for (i = 1; i < info->hdr->e_shnum; i++) {
2666 if (info->sechdrs[i].sh_type == SHT_SYMTAB) {
2667 info->index.sym = i;
2668 info->index.str = info->sechdrs[i].sh_link;
2669 info->strtab = (char *)info->hdr
2670 + info->sechdrs[info->index.str].sh_offset;
2671 break;
2675 info->index.mod = find_sec(info, ".gnu.linkonce.this_module");
2676 if (!info->index.mod) {
2677 printk(KERN_WARNING "No module found in object\n");
2678 return ERR_PTR(-ENOEXEC);
2680 /* This is temporary: point mod into copy of data. */
2681 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2683 if (info->index.sym == 0) {
2684 printk(KERN_WARNING "%s: module has no symbols (stripped?)\n",
2685 mod->name);
2686 return ERR_PTR(-ENOEXEC);
2689 info->index.pcpu = find_pcpusec(info);
2691 /* Check module struct version now, before we try to use module. */
2692 if (!check_modstruct_version(info->sechdrs, info->index.vers, mod))
2693 return ERR_PTR(-ENOEXEC);
2695 return mod;
2698 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
2700 const char *modmagic = get_modinfo(info, "vermagic");
2701 int err;
2703 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
2704 modmagic = NULL;
2706 /* This is allowed: modprobe --force will invalidate it. */
2707 if (!modmagic) {
2708 err = try_to_force_load(mod, "bad vermagic");
2709 if (err)
2710 return err;
2711 } else if (!same_magic(modmagic, vermagic, info->index.vers)) {
2712 printk(KERN_ERR "%s: version magic '%s' should be '%s'\n",
2713 mod->name, modmagic, vermagic);
2714 return -ENOEXEC;
2717 if (!get_modinfo(info, "intree"))
2718 add_taint_module(mod, TAINT_OOT_MODULE, LOCKDEP_STILL_OK);
2720 if (get_modinfo(info, "staging")) {
2721 add_taint_module(mod, TAINT_CRAP, LOCKDEP_STILL_OK);
2722 printk(KERN_WARNING "%s: module is from the staging directory,"
2723 " the quality is unknown, you have been warned.\n",
2724 mod->name);
2727 /* Set up license info based on the info section */
2728 set_license(mod, get_modinfo(info, "license"));
2730 return 0;
2733 static void find_module_sections(struct module *mod, struct load_info *info)
2735 mod->kp = section_objs(info, "__param",
2736 sizeof(*mod->kp), &mod->num_kp);
2737 mod->syms = section_objs(info, "__ksymtab",
2738 sizeof(*mod->syms), &mod->num_syms);
2739 mod->crcs = section_addr(info, "__kcrctab");
2740 mod->gpl_syms = section_objs(info, "__ksymtab_gpl",
2741 sizeof(*mod->gpl_syms),
2742 &mod->num_gpl_syms);
2743 mod->gpl_crcs = section_addr(info, "__kcrctab_gpl");
2744 mod->gpl_future_syms = section_objs(info,
2745 "__ksymtab_gpl_future",
2746 sizeof(*mod->gpl_future_syms),
2747 &mod->num_gpl_future_syms);
2748 mod->gpl_future_crcs = section_addr(info, "__kcrctab_gpl_future");
2750 #ifdef CONFIG_UNUSED_SYMBOLS
2751 mod->unused_syms = section_objs(info, "__ksymtab_unused",
2752 sizeof(*mod->unused_syms),
2753 &mod->num_unused_syms);
2754 mod->unused_crcs = section_addr(info, "__kcrctab_unused");
2755 mod->unused_gpl_syms = section_objs(info, "__ksymtab_unused_gpl",
2756 sizeof(*mod->unused_gpl_syms),
2757 &mod->num_unused_gpl_syms);
2758 mod->unused_gpl_crcs = section_addr(info, "__kcrctab_unused_gpl");
2759 #endif
2760 #ifdef CONFIG_CONSTRUCTORS
2761 mod->ctors = section_objs(info, ".ctors",
2762 sizeof(*mod->ctors), &mod->num_ctors);
2763 #endif
2765 #ifdef CONFIG_TRACEPOINTS
2766 mod->tracepoints_ptrs = section_objs(info, "__tracepoints_ptrs",
2767 sizeof(*mod->tracepoints_ptrs),
2768 &mod->num_tracepoints);
2769 #endif
2770 #ifdef HAVE_JUMP_LABEL
2771 mod->jump_entries = section_objs(info, "__jump_table",
2772 sizeof(*mod->jump_entries),
2773 &mod->num_jump_entries);
2774 #endif
2775 #ifdef CONFIG_EVENT_TRACING
2776 mod->trace_events = section_objs(info, "_ftrace_events",
2777 sizeof(*mod->trace_events),
2778 &mod->num_trace_events);
2779 #endif
2780 #ifdef CONFIG_TRACING
2781 mod->trace_bprintk_fmt_start = section_objs(info, "__trace_printk_fmt",
2782 sizeof(*mod->trace_bprintk_fmt_start),
2783 &mod->num_trace_bprintk_fmt);
2784 #endif
2785 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
2786 /* sechdrs[0].sh_size is always zero */
2787 mod->ftrace_callsites = section_objs(info, "__mcount_loc",
2788 sizeof(*mod->ftrace_callsites),
2789 &mod->num_ftrace_callsites);
2790 #endif
2792 mod->extable = section_objs(info, "__ex_table",
2793 sizeof(*mod->extable), &mod->num_exentries);
2795 if (section_addr(info, "__obsparm"))
2796 printk(KERN_WARNING "%s: Ignoring obsolete parameters\n",
2797 mod->name);
2799 info->debug = section_objs(info, "__verbose",
2800 sizeof(*info->debug), &info->num_debug);
2803 static int move_module(struct module *mod, struct load_info *info)
2805 int i;
2806 void *ptr;
2808 /* Do the allocs. */
2809 ptr = module_alloc_update_bounds(mod->core_size);
2811 * The pointer to this block is stored in the module structure
2812 * which is inside the block. Just mark it as not being a
2813 * leak.
2815 kmemleak_not_leak(ptr);
2816 if (!ptr)
2817 return -ENOMEM;
2819 memset(ptr, 0, mod->core_size);
2820 mod->module_core = ptr;
2822 if (mod->init_size) {
2823 ptr = module_alloc_update_bounds(mod->init_size);
2825 * The pointer to this block is stored in the module structure
2826 * which is inside the block. This block doesn't need to be
2827 * scanned as it contains data and code that will be freed
2828 * after the module is initialized.
2830 kmemleak_ignore(ptr);
2831 if (!ptr) {
2832 module_free(mod, mod->module_core);
2833 return -ENOMEM;
2835 memset(ptr, 0, mod->init_size);
2836 mod->module_init = ptr;
2837 } else
2838 mod->module_init = NULL;
2840 /* Transfer each section which specifies SHF_ALLOC */
2841 pr_debug("final section addresses:\n");
2842 for (i = 0; i < info->hdr->e_shnum; i++) {
2843 void *dest;
2844 Elf_Shdr *shdr = &info->sechdrs[i];
2846 if (!(shdr->sh_flags & SHF_ALLOC))
2847 continue;
2849 if (shdr->sh_entsize & INIT_OFFSET_MASK)
2850 dest = mod->module_init
2851 + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
2852 else
2853 dest = mod->module_core + shdr->sh_entsize;
2855 if (shdr->sh_type != SHT_NOBITS)
2856 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
2857 /* Update sh_addr to point to copy in image. */
2858 shdr->sh_addr = (unsigned long)dest;
2859 pr_debug("\t0x%lx %s\n",
2860 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
2863 return 0;
2866 static int check_module_license_and_versions(struct module *mod)
2869 * ndiswrapper is under GPL by itself, but loads proprietary modules.
2870 * Don't use add_taint_module(), as it would prevent ndiswrapper from
2871 * using GPL-only symbols it needs.
2873 if (strcmp(mod->name, "ndiswrapper") == 0)
2874 add_taint(TAINT_PROPRIETARY_MODULE, LOCKDEP_NOW_UNRELIABLE);
2876 /* driverloader was caught wrongly pretending to be under GPL */
2877 if (strcmp(mod->name, "driverloader") == 0)
2878 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2879 LOCKDEP_NOW_UNRELIABLE);
2881 /* lve claims to be GPL but upstream won't provide source */
2882 if (strcmp(mod->name, "lve") == 0)
2883 add_taint_module(mod, TAINT_PROPRIETARY_MODULE,
2884 LOCKDEP_NOW_UNRELIABLE);
2886 #ifdef CONFIG_MODVERSIONS
2887 if ((mod->num_syms && !mod->crcs)
2888 || (mod->num_gpl_syms && !mod->gpl_crcs)
2889 || (mod->num_gpl_future_syms && !mod->gpl_future_crcs)
2890 #ifdef CONFIG_UNUSED_SYMBOLS
2891 || (mod->num_unused_syms && !mod->unused_crcs)
2892 || (mod->num_unused_gpl_syms && !mod->unused_gpl_crcs)
2893 #endif
2895 return try_to_force_load(mod,
2896 "no versions for exported symbols");
2898 #endif
2899 return 0;
2902 static void flush_module_icache(const struct module *mod)
2904 mm_segment_t old_fs;
2906 /* flush the icache in correct context */
2907 old_fs = get_fs();
2908 set_fs(KERNEL_DS);
2911 * Flush the instruction cache, since we've played with text.
2912 * Do it before processing of module parameters, so the module
2913 * can provide parameter accessor functions of its own.
2915 if (mod->module_init)
2916 flush_icache_range((unsigned long)mod->module_init,
2917 (unsigned long)mod->module_init
2918 + mod->init_size);
2919 flush_icache_range((unsigned long)mod->module_core,
2920 (unsigned long)mod->module_core + mod->core_size);
2922 set_fs(old_fs);
2925 int __weak module_frob_arch_sections(Elf_Ehdr *hdr,
2926 Elf_Shdr *sechdrs,
2927 char *secstrings,
2928 struct module *mod)
2930 return 0;
2933 static struct module *layout_and_allocate(struct load_info *info, int flags)
2935 /* Module within temporary copy. */
2936 struct module *mod;
2937 int err;
2939 mod = setup_load_info(info, flags);
2940 if (IS_ERR(mod))
2941 return mod;
2943 err = check_modinfo(mod, info, flags);
2944 if (err)
2945 return ERR_PTR(err);
2947 /* Allow arches to frob section contents and sizes. */
2948 err = module_frob_arch_sections(info->hdr, info->sechdrs,
2949 info->secstrings, mod);
2950 if (err < 0)
2951 return ERR_PTR(err);
2953 /* We will do a special allocation for per-cpu sections later. */
2954 info->sechdrs[info->index.pcpu].sh_flags &= ~(unsigned long)SHF_ALLOC;
2956 /* Determine total sizes, and put offsets in sh_entsize. For now
2957 this is done generically; there doesn't appear to be any
2958 special cases for the architectures. */
2959 layout_sections(mod, info);
2960 layout_symtab(mod, info);
2962 /* Allocate and move to the final place */
2963 err = move_module(mod, info);
2964 if (err)
2965 return ERR_PTR(err);
2967 /* Module has been copied to its final place now: return it. */
2968 mod = (void *)info->sechdrs[info->index.mod].sh_addr;
2969 kmemleak_load_module(mod, info);
2970 return mod;
2973 /* mod is no longer valid after this! */
2974 static void module_deallocate(struct module *mod, struct load_info *info)
2976 percpu_modfree(mod);
2977 module_free(mod, mod->module_init);
2978 module_free(mod, mod->module_core);
2981 int __weak module_finalize(const Elf_Ehdr *hdr,
2982 const Elf_Shdr *sechdrs,
2983 struct module *me)
2985 return 0;
2988 static int post_relocation(struct module *mod, const struct load_info *info)
2990 /* Sort exception table now relocations are done. */
2991 sort_extable(mod->extable, mod->extable + mod->num_exentries);
2993 /* Copy relocated percpu area over. */
2994 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
2995 info->sechdrs[info->index.pcpu].sh_size);
2997 /* Setup kallsyms-specific fields. */
2998 add_kallsyms(mod, info);
3000 /* Arch-specific module finalizing. */
3001 return module_finalize(info->hdr, info->sechdrs, mod);
3004 /* Is this module of this name done loading? No locks held. */
3005 static bool finished_loading(const char *name)
3007 struct module *mod;
3008 bool ret;
3010 mutex_lock(&module_mutex);
3011 mod = find_module_all(name, strlen(name), true);
3012 ret = !mod || mod->state == MODULE_STATE_LIVE
3013 || mod->state == MODULE_STATE_GOING;
3014 mutex_unlock(&module_mutex);
3016 return ret;
3019 /* Call module constructors. */
3020 static void do_mod_ctors(struct module *mod)
3022 #ifdef CONFIG_CONSTRUCTORS
3023 unsigned long i;
3025 for (i = 0; i < mod->num_ctors; i++)
3026 mod->ctors[i]();
3027 #endif
3030 /* This is where the real work happens */
3031 static int do_init_module(struct module *mod)
3033 int ret = 0;
3036 * We want to find out whether @mod uses async during init. Clear
3037 * PF_USED_ASYNC. async_schedule*() will set it.
3039 current->flags &= ~PF_USED_ASYNC;
3041 blocking_notifier_call_chain(&module_notify_list,
3042 MODULE_STATE_COMING, mod);
3044 /* Set RO and NX regions for core */
3045 set_section_ro_nx(mod->module_core,
3046 mod->core_text_size,
3047 mod->core_ro_size,
3048 mod->core_size);
3050 /* Set RO and NX regions for init */
3051 set_section_ro_nx(mod->module_init,
3052 mod->init_text_size,
3053 mod->init_ro_size,
3054 mod->init_size);
3056 do_mod_ctors(mod);
3057 /* Start the module */
3058 if (mod->init != NULL)
3059 ret = do_one_initcall(mod->init);
3060 if (ret < 0) {
3061 /* Init routine failed: abort. Try to protect us from
3062 buggy refcounters. */
3063 mod->state = MODULE_STATE_GOING;
3064 synchronize_sched();
3065 module_put(mod);
3066 blocking_notifier_call_chain(&module_notify_list,
3067 MODULE_STATE_GOING, mod);
3068 free_module(mod);
3069 wake_up_all(&module_wq);
3070 return ret;
3072 if (ret > 0) {
3073 printk(KERN_WARNING
3074 "%s: '%s'->init suspiciously returned %d, it should follow 0/-E convention\n"
3075 "%s: loading module anyway...\n",
3076 __func__, mod->name, ret,
3077 __func__);
3078 dump_stack();
3081 /* Now it's a first class citizen! */
3082 mod->state = MODULE_STATE_LIVE;
3083 blocking_notifier_call_chain(&module_notify_list,
3084 MODULE_STATE_LIVE, mod);
3087 * We need to finish all async code before the module init sequence
3088 * is done. This has potential to deadlock. For example, a newly
3089 * detected block device can trigger request_module() of the
3090 * default iosched from async probing task. Once userland helper
3091 * reaches here, async_synchronize_full() will wait on the async
3092 * task waiting on request_module() and deadlock.
3094 * This deadlock is avoided by perfomring async_synchronize_full()
3095 * iff module init queued any async jobs. This isn't a full
3096 * solution as it will deadlock the same if module loading from
3097 * async jobs nests more than once; however, due to the various
3098 * constraints, this hack seems to be the best option for now.
3099 * Please refer to the following thread for details.
3101 * http://thread.gmane.org/gmane.linux.kernel/1420814
3103 if (current->flags & PF_USED_ASYNC)
3104 async_synchronize_full();
3106 mutex_lock(&module_mutex);
3107 /* Drop initial reference. */
3108 module_put(mod);
3109 trim_init_extable(mod);
3110 #ifdef CONFIG_KALLSYMS
3111 mod->num_symtab = mod->core_num_syms;
3112 mod->symtab = mod->core_symtab;
3113 mod->strtab = mod->core_strtab;
3114 #endif
3115 unset_module_init_ro_nx(mod);
3116 module_free(mod, mod->module_init);
3117 mod->module_init = NULL;
3118 mod->init_size = 0;
3119 mod->init_ro_size = 0;
3120 mod->init_text_size = 0;
3121 mutex_unlock(&module_mutex);
3122 wake_up_all(&module_wq);
3124 return 0;
3127 static int may_init_module(void)
3129 if (!capable(CAP_SYS_MODULE) || modules_disabled)
3130 return -EPERM;
3132 return 0;
3136 * We try to place it in the list now to make sure it's unique before
3137 * we dedicate too many resources. In particular, temporary percpu
3138 * memory exhaustion.
3140 static int add_unformed_module(struct module *mod)
3142 int err;
3143 struct module *old;
3145 mod->state = MODULE_STATE_UNFORMED;
3147 again:
3148 mutex_lock(&module_mutex);
3149 old = find_module_all(mod->name, strlen(mod->name), true);
3150 if (old != NULL) {
3151 if (old->state == MODULE_STATE_COMING
3152 || old->state == MODULE_STATE_UNFORMED) {
3153 /* Wait in case it fails to load. */
3154 mutex_unlock(&module_mutex);
3155 err = wait_event_interruptible(module_wq,
3156 finished_loading(mod->name));
3157 if (err)
3158 goto out_unlocked;
3159 goto again;
3161 err = -EEXIST;
3162 goto out;
3164 list_add_rcu(&mod->list, &modules);
3165 err = 0;
3167 out:
3168 mutex_unlock(&module_mutex);
3169 out_unlocked:
3170 return err;
3173 static int complete_formation(struct module *mod, struct load_info *info)
3175 int err;
3177 mutex_lock(&module_mutex);
3179 /* Find duplicate symbols (must be called under lock). */
3180 err = verify_export_symbols(mod);
3181 if (err < 0)
3182 goto out;
3184 /* This relies on module_mutex for list integrity. */
3185 module_bug_finalize(info->hdr, info->sechdrs, mod);
3187 /* Mark state as coming so strong_try_module_get() ignores us,
3188 * but kallsyms etc. can see us. */
3189 mod->state = MODULE_STATE_COMING;
3191 out:
3192 mutex_unlock(&module_mutex);
3193 return err;
3196 static int unknown_module_param_cb(char *param, char *val, const char *modname)
3198 /* Check for magic 'dyndbg' arg */
3199 int ret = ddebug_dyndbg_module_param_cb(param, val, modname);
3200 if (ret != 0) {
3201 printk(KERN_WARNING "%s: unknown parameter '%s' ignored\n",
3202 modname, param);
3204 return 0;
3207 /* Allocate and load the module: note that size of section 0 is always
3208 zero, and we rely on this for optional sections. */
3209 static int load_module(struct load_info *info, const char __user *uargs,
3210 int flags)
3212 struct module *mod;
3213 long err;
3215 err = module_sig_check(info);
3216 if (err)
3217 goto free_copy;
3219 err = elf_header_check(info);
3220 if (err)
3221 goto free_copy;
3223 /* Figure out module layout, and allocate all the memory. */
3224 mod = layout_and_allocate(info, flags);
3225 if (IS_ERR(mod)) {
3226 err = PTR_ERR(mod);
3227 goto free_copy;
3230 /* Reserve our place in the list. */
3231 err = add_unformed_module(mod);
3232 if (err)
3233 goto free_module;
3235 #ifdef CONFIG_MODULE_SIG
3236 mod->sig_ok = info->sig_ok;
3237 if (!mod->sig_ok) {
3238 printk_once(KERN_NOTICE
3239 "%s: module verification failed: signature and/or"
3240 " required key missing - tainting kernel\n",
3241 mod->name);
3242 add_taint_module(mod, TAINT_FORCED_MODULE, LOCKDEP_STILL_OK);
3244 #endif
3246 /* To avoid stressing percpu allocator, do this once we're unique. */
3247 err = percpu_modalloc(mod, info);
3248 if (err)
3249 goto unlink_mod;
3251 /* Now module is in final location, initialize linked lists, etc. */
3252 err = module_unload_init(mod);
3253 if (err)
3254 goto unlink_mod;
3256 /* Now we've got everything in the final locations, we can
3257 * find optional sections. */
3258 find_module_sections(mod, info);
3260 err = check_module_license_and_versions(mod);
3261 if (err)
3262 goto free_unload;
3264 /* Set up MODINFO_ATTR fields */
3265 setup_modinfo(mod, info);
3267 /* Fix up syms, so that st_value is a pointer to location. */
3268 err = simplify_symbols(mod, info);
3269 if (err < 0)
3270 goto free_modinfo;
3272 err = apply_relocations(mod, info);
3273 if (err < 0)
3274 goto free_modinfo;
3276 err = post_relocation(mod, info);
3277 if (err < 0)
3278 goto free_modinfo;
3280 flush_module_icache(mod);
3282 /* Now copy in args */
3283 mod->args = strndup_user(uargs, ~0UL >> 1);
3284 if (IS_ERR(mod->args)) {
3285 err = PTR_ERR(mod->args);
3286 goto free_arch_cleanup;
3289 dynamic_debug_setup(info->debug, info->num_debug);
3291 /* Finally it's fully formed, ready to start executing. */
3292 err = complete_formation(mod, info);
3293 if (err)
3294 goto ddebug_cleanup;
3296 /* Module is ready to execute: parsing args may do that. */
3297 err = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
3298 -32768, 32767, unknown_module_param_cb);
3299 if (err < 0)
3300 goto bug_cleanup;
3302 /* Link in to syfs. */
3303 err = mod_sysfs_setup(mod, info, mod->kp, mod->num_kp);
3304 if (err < 0)
3305 goto bug_cleanup;
3307 /* Get rid of temporary copy. */
3308 free_copy(info);
3310 /* Done! */
3311 trace_module_load(mod);
3313 return do_init_module(mod);
3315 bug_cleanup:
3316 /* module_bug_cleanup needs module_mutex protection */
3317 mutex_lock(&module_mutex);
3318 module_bug_cleanup(mod);
3319 mutex_unlock(&module_mutex);
3320 ddebug_cleanup:
3321 dynamic_debug_remove(info->debug);
3322 synchronize_sched();
3323 kfree(mod->args);
3324 free_arch_cleanup:
3325 module_arch_cleanup(mod);
3326 free_modinfo:
3327 free_modinfo(mod);
3328 free_unload:
3329 module_unload_free(mod);
3330 unlink_mod:
3331 mutex_lock(&module_mutex);
3332 /* Unlink carefully: kallsyms could be walking list. */
3333 list_del_rcu(&mod->list);
3334 wake_up_all(&module_wq);
3335 mutex_unlock(&module_mutex);
3336 free_module:
3337 module_deallocate(mod, info);
3338 free_copy:
3339 free_copy(info);
3340 return err;
3343 SYSCALL_DEFINE3(init_module, void __user *, umod,
3344 unsigned long, len, const char __user *, uargs)
3346 int err;
3347 struct load_info info = { };
3349 err = may_init_module();
3350 if (err)
3351 return err;
3353 pr_debug("init_module: umod=%p, len=%lu, uargs=%p\n",
3354 umod, len, uargs);
3356 err = copy_module_from_user(umod, len, &info);
3357 if (err)
3358 return err;
3360 return load_module(&info, uargs, 0);
3363 SYSCALL_DEFINE3(finit_module, int, fd, const char __user *, uargs, int, flags)
3365 int err;
3366 struct load_info info = { };
3368 err = may_init_module();
3369 if (err)
3370 return err;
3372 pr_debug("finit_module: fd=%d, uargs=%p, flags=%i\n", fd, uargs, flags);
3374 if (flags & ~(MODULE_INIT_IGNORE_MODVERSIONS
3375 |MODULE_INIT_IGNORE_VERMAGIC))
3376 return -EINVAL;
3378 err = copy_module_from_fd(fd, &info);
3379 if (err)
3380 return err;
3382 return load_module(&info, uargs, flags);
3385 static inline int within(unsigned long addr, void *start, unsigned long size)
3387 return ((void *)addr >= start && (void *)addr < start + size);
3390 #ifdef CONFIG_KALLSYMS
3392 * This ignores the intensely annoying "mapping symbols" found
3393 * in ARM ELF files: $a, $t and $d.
3395 static inline int is_arm_mapping_symbol(const char *str)
3397 return str[0] == '$' && strchr("atd", str[1])
3398 && (str[2] == '\0' || str[2] == '.');
3401 static const char *get_ksymbol(struct module *mod,
3402 unsigned long addr,
3403 unsigned long *size,
3404 unsigned long *offset)
3406 unsigned int i, best = 0;
3407 unsigned long nextval;
3409 /* At worse, next value is at end of module */
3410 if (within_module_init(addr, mod))
3411 nextval = (unsigned long)mod->module_init+mod->init_text_size;
3412 else
3413 nextval = (unsigned long)mod->module_core+mod->core_text_size;
3415 /* Scan for closest preceding symbol, and next symbol. (ELF
3416 starts real symbols at 1). */
3417 for (i = 1; i < mod->num_symtab; i++) {
3418 if (mod->symtab[i].st_shndx == SHN_UNDEF)
3419 continue;
3421 /* We ignore unnamed symbols: they're uninformative
3422 * and inserted at a whim. */
3423 if (mod->symtab[i].st_value <= addr
3424 && mod->symtab[i].st_value > mod->symtab[best].st_value
3425 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3426 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3427 best = i;
3428 if (mod->symtab[i].st_value > addr
3429 && mod->symtab[i].st_value < nextval
3430 && *(mod->strtab + mod->symtab[i].st_name) != '\0'
3431 && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
3432 nextval = mod->symtab[i].st_value;
3435 if (!best)
3436 return NULL;
3438 if (size)
3439 *size = nextval - mod->symtab[best].st_value;
3440 if (offset)
3441 *offset = addr - mod->symtab[best].st_value;
3442 return mod->strtab + mod->symtab[best].st_name;
3445 /* For kallsyms to ask for address resolution. NULL means not found. Careful
3446 * not to lock to avoid deadlock on oopses, simply disable preemption. */
3447 const char *module_address_lookup(unsigned long addr,
3448 unsigned long *size,
3449 unsigned long *offset,
3450 char **modname,
3451 char *namebuf)
3453 struct module *mod;
3454 const char *ret = NULL;
3456 preempt_disable();
3457 list_for_each_entry_rcu(mod, &modules, list) {
3458 if (mod->state == MODULE_STATE_UNFORMED)
3459 continue;
3460 if (within_module_init(addr, mod) ||
3461 within_module_core(addr, mod)) {
3462 if (modname)
3463 *modname = mod->name;
3464 ret = get_ksymbol(mod, addr, size, offset);
3465 break;
3468 /* Make a copy in here where it's safe */
3469 if (ret) {
3470 strncpy(namebuf, ret, KSYM_NAME_LEN - 1);
3471 ret = namebuf;
3473 preempt_enable();
3474 return ret;
3477 int lookup_module_symbol_name(unsigned long addr, char *symname)
3479 struct module *mod;
3481 preempt_disable();
3482 list_for_each_entry_rcu(mod, &modules, list) {
3483 if (mod->state == MODULE_STATE_UNFORMED)
3484 continue;
3485 if (within_module_init(addr, mod) ||
3486 within_module_core(addr, mod)) {
3487 const char *sym;
3489 sym = get_ksymbol(mod, addr, NULL, NULL);
3490 if (!sym)
3491 goto out;
3492 strlcpy(symname, sym, KSYM_NAME_LEN);
3493 preempt_enable();
3494 return 0;
3497 out:
3498 preempt_enable();
3499 return -ERANGE;
3502 int lookup_module_symbol_attrs(unsigned long addr, unsigned long *size,
3503 unsigned long *offset, char *modname, char *name)
3505 struct module *mod;
3507 preempt_disable();
3508 list_for_each_entry_rcu(mod, &modules, list) {
3509 if (mod->state == MODULE_STATE_UNFORMED)
3510 continue;
3511 if (within_module_init(addr, mod) ||
3512 within_module_core(addr, mod)) {
3513 const char *sym;
3515 sym = get_ksymbol(mod, addr, size, offset);
3516 if (!sym)
3517 goto out;
3518 if (modname)
3519 strlcpy(modname, mod->name, MODULE_NAME_LEN);
3520 if (name)
3521 strlcpy(name, sym, KSYM_NAME_LEN);
3522 preempt_enable();
3523 return 0;
3526 out:
3527 preempt_enable();
3528 return -ERANGE;
3531 int module_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
3532 char *name, char *module_name, int *exported)
3534 struct module *mod;
3536 preempt_disable();
3537 list_for_each_entry_rcu(mod, &modules, list) {
3538 if (mod->state == MODULE_STATE_UNFORMED)
3539 continue;
3540 if (symnum < mod->num_symtab) {
3541 *value = mod->symtab[symnum].st_value;
3542 *type = mod->symtab[symnum].st_info;
3543 strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
3544 KSYM_NAME_LEN);
3545 strlcpy(module_name, mod->name, MODULE_NAME_LEN);
3546 *exported = is_exported(name, *value, mod);
3547 preempt_enable();
3548 return 0;
3550 symnum -= mod->num_symtab;
3552 preempt_enable();
3553 return -ERANGE;
3556 static unsigned long mod_find_symname(struct module *mod, const char *name)
3558 unsigned int i;
3560 for (i = 0; i < mod->num_symtab; i++)
3561 if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
3562 mod->symtab[i].st_info != 'U')
3563 return mod->symtab[i].st_value;
3564 return 0;
3567 /* Look for this name: can be of form module:name. */
3568 unsigned long module_kallsyms_lookup_name(const char *name)
3570 struct module *mod;
3571 char *colon;
3572 unsigned long ret = 0;
3574 /* Don't lock: we're in enough trouble already. */
3575 preempt_disable();
3576 if ((colon = strchr(name, ':')) != NULL) {
3577 if ((mod = find_module_all(name, colon - name, false)) != NULL)
3578 ret = mod_find_symname(mod, colon+1);
3579 } else {
3580 list_for_each_entry_rcu(mod, &modules, list) {
3581 if (mod->state == MODULE_STATE_UNFORMED)
3582 continue;
3583 if ((ret = mod_find_symname(mod, name)) != 0)
3584 break;
3587 preempt_enable();
3588 return ret;
3591 int module_kallsyms_on_each_symbol(int (*fn)(void *, const char *,
3592 struct module *, unsigned long),
3593 void *data)
3595 struct module *mod;
3596 unsigned int i;
3597 int ret;
3599 list_for_each_entry(mod, &modules, list) {
3600 if (mod->state == MODULE_STATE_UNFORMED)
3601 continue;
3602 for (i = 0; i < mod->num_symtab; i++) {
3603 ret = fn(data, mod->strtab + mod->symtab[i].st_name,
3604 mod, mod->symtab[i].st_value);
3605 if (ret != 0)
3606 return ret;
3609 return 0;
3611 #endif /* CONFIG_KALLSYMS */
3613 static char *module_flags(struct module *mod, char *buf)
3615 int bx = 0;
3617 BUG_ON(mod->state == MODULE_STATE_UNFORMED);
3618 if (mod->taints ||
3619 mod->state == MODULE_STATE_GOING ||
3620 mod->state == MODULE_STATE_COMING) {
3621 buf[bx++] = '(';
3622 bx += module_flags_taint(mod, buf + bx);
3623 /* Show a - for module-is-being-unloaded */
3624 if (mod->state == MODULE_STATE_GOING)
3625 buf[bx++] = '-';
3626 /* Show a + for module-is-being-loaded */
3627 if (mod->state == MODULE_STATE_COMING)
3628 buf[bx++] = '+';
3629 buf[bx++] = ')';
3631 buf[bx] = '\0';
3633 return buf;
3636 #ifdef CONFIG_PROC_FS
3637 /* Called by the /proc file system to return a list of modules. */
3638 static void *m_start(struct seq_file *m, loff_t *pos)
3640 mutex_lock(&module_mutex);
3641 return seq_list_start(&modules, *pos);
3644 static void *m_next(struct seq_file *m, void *p, loff_t *pos)
3646 return seq_list_next(p, &modules, pos);
3649 static void m_stop(struct seq_file *m, void *p)
3651 mutex_unlock(&module_mutex);
3654 static int m_show(struct seq_file *m, void *p)
3656 struct module *mod = list_entry(p, struct module, list);
3657 char buf[8];
3659 /* We always ignore unformed modules. */
3660 if (mod->state == MODULE_STATE_UNFORMED)
3661 return 0;
3663 seq_printf(m, "%s %u",
3664 mod->name, mod->init_size + mod->core_size);
3665 print_unload_info(m, mod);
3667 /* Informative for users. */
3668 seq_printf(m, " %s",
3669 mod->state == MODULE_STATE_GOING ? "Unloading":
3670 mod->state == MODULE_STATE_COMING ? "Loading":
3671 "Live");
3672 /* Used by oprofile and other similar tools. */
3673 seq_printf(m, " 0x%pK", mod->module_core);
3675 /* Taints info */
3676 if (mod->taints)
3677 seq_printf(m, " %s", module_flags(mod, buf));
3679 seq_printf(m, "\n");
3680 return 0;
3683 /* Format: modulename size refcount deps address
3685 Where refcount is a number or -, and deps is a comma-separated list
3686 of depends or -.
3688 static const struct seq_operations modules_op = {
3689 .start = m_start,
3690 .next = m_next,
3691 .stop = m_stop,
3692 .show = m_show
3695 static int modules_open(struct inode *inode, struct file *file)
3697 return seq_open(file, &modules_op);
3700 static const struct file_operations proc_modules_operations = {
3701 .open = modules_open,
3702 .read = seq_read,
3703 .llseek = seq_lseek,
3704 .release = seq_release,
3707 static int __init proc_modules_init(void)
3709 proc_create("modules", 0, NULL, &proc_modules_operations);
3710 return 0;
3712 module_init(proc_modules_init);
3713 #endif
3715 /* Given an address, look for it in the module exception tables. */
3716 const struct exception_table_entry *search_module_extables(unsigned long addr)
3718 const struct exception_table_entry *e = NULL;
3719 struct module *mod;
3721 preempt_disable();
3722 list_for_each_entry_rcu(mod, &modules, list) {
3723 if (mod->state == MODULE_STATE_UNFORMED)
3724 continue;
3725 if (mod->num_exentries == 0)
3726 continue;
3728 e = search_extable(mod->extable,
3729 mod->extable + mod->num_exentries - 1,
3730 addr);
3731 if (e)
3732 break;
3734 preempt_enable();
3736 /* Now, if we found one, we are running inside it now, hence
3737 we cannot unload the module, hence no refcnt needed. */
3738 return e;
3742 * is_module_address - is this address inside a module?
3743 * @addr: the address to check.
3745 * See is_module_text_address() if you simply want to see if the address
3746 * is code (not data).
3748 bool is_module_address(unsigned long addr)
3750 bool ret;
3752 preempt_disable();
3753 ret = __module_address(addr) != NULL;
3754 preempt_enable();
3756 return ret;
3760 * __module_address - get the module which contains an address.
3761 * @addr: the address.
3763 * Must be called with preempt disabled or module mutex held so that
3764 * module doesn't get freed during this.
3766 struct module *__module_address(unsigned long addr)
3768 struct module *mod;
3770 if (addr < module_addr_min || addr > module_addr_max)
3771 return NULL;
3773 list_for_each_entry_rcu(mod, &modules, list) {
3774 if (mod->state == MODULE_STATE_UNFORMED)
3775 continue;
3776 if (within_module_core(addr, mod)
3777 || within_module_init(addr, mod))
3778 return mod;
3780 return NULL;
3782 EXPORT_SYMBOL_GPL(__module_address);
3785 * is_module_text_address - is this address inside module code?
3786 * @addr: the address to check.
3788 * See is_module_address() if you simply want to see if the address is
3789 * anywhere in a module. See kernel_text_address() for testing if an
3790 * address corresponds to kernel or module code.
3792 bool is_module_text_address(unsigned long addr)
3794 bool ret;
3796 preempt_disable();
3797 ret = __module_text_address(addr) != NULL;
3798 preempt_enable();
3800 return ret;
3804 * __module_text_address - get the module whose code contains an address.
3805 * @addr: the address.
3807 * Must be called with preempt disabled or module mutex held so that
3808 * module doesn't get freed during this.
3810 struct module *__module_text_address(unsigned long addr)
3812 struct module *mod = __module_address(addr);
3813 if (mod) {
3814 /* Make sure it's within the text section. */
3815 if (!within(addr, mod->module_init, mod->init_text_size)
3816 && !within(addr, mod->module_core, mod->core_text_size))
3817 mod = NULL;
3819 return mod;
3821 EXPORT_SYMBOL_GPL(__module_text_address);
3823 /* Don't grab lock, we're oopsing. */
3824 void print_modules(void)
3826 struct module *mod;
3827 char buf[8];
3829 printk(KERN_DEFAULT "Modules linked in:");
3830 /* Most callers should already have preempt disabled, but make sure */
3831 preempt_disable();
3832 list_for_each_entry_rcu(mod, &modules, list) {
3833 if (mod->state == MODULE_STATE_UNFORMED)
3834 continue;
3835 printk(" %s%s", mod->name, module_flags(mod, buf));
3837 preempt_enable();
3838 if (last_unloaded_module[0])
3839 printk(" [last unloaded: %s]", last_unloaded_module);
3840 printk("\n");
3843 #ifdef CONFIG_MODVERSIONS
3844 /* Generate the signature for all relevant module structures here.
3845 * If these change, we don't want to try to parse the module. */
3846 void module_layout(struct module *mod,
3847 struct modversion_info *ver,
3848 struct kernel_param *kp,
3849 struct kernel_symbol *ks,
3850 struct tracepoint * const *tp)
3853 EXPORT_SYMBOL(module_layout);
3854 #endif