1 #include <linux/config.h>
3 #include <linux/module.h>
4 #include <asm/module.h>
5 #include <asm/uaccess.h>
6 #include <linux/kallsyms.h>
7 #include <linux/vmalloc.h>
8 #include <linux/smp_lock.h>
9 #include <asm/pgalloc.h>
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/kmod.h>
13 #include <linux/seq_file.h>
15 #include <asm/cacheflush.h>
18 * Originally by Anonymous (as far as I know...)
19 * Linux version by Bas Laarhoven <bas@vimec.nl>
20 * 0.99.14 version by Jon Tombs <jon@gtex02.us.es>,
21 * Heavily modified by Bjorn Ekwall <bj0rn@blox.se> May 1994 (C)
22 * Rewritten by Richard Henderson <rth@tamu.edu> Dec 1996
23 * Add MOD_INITIALIZING Keith Owens <kaos@ocs.com.au> Nov 1999
24 * Add kallsyms support, Keith Owens <kaos@ocs.com.au> Apr 2000
25 * Add asm/module support, IA64 has special requirements. Keith Owens <kaos@ocs.com.au> Sep 2000
26 * Fix assorted bugs in module verification. Keith Owens <kaos@ocs.com.au> Sep 2000
27 * Fix sys_init_module race, Andrew Morton <andrewm@uow.edu.au> Oct 2000
28 * http://www.uwsg.iu.edu/hypermail/linux/kernel/0008.3/0379.html
29 * Replace xxx_module_symbol with inter_module_xxx. Keith Owens <kaos@ocs.com.au> Oct 2000
30 * Add a module list lock for kernel fault race fixing. Alan Cox <alan@redhat.com>
32 * This source is covered by the GNU GPL, the same as all kernel sources.
35 #if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
37 extern struct module_symbol __start___ksymtab
[];
38 extern struct module_symbol __stop___ksymtab
[];
40 extern const struct exception_table_entry __start___ex_table
[];
41 extern const struct exception_table_entry __stop___ex_table
[];
43 extern const char __start___kallsyms
[] __attribute__((weak
));
44 extern const char __stop___kallsyms
[] __attribute__((weak
));
46 /* modutils uses these exported symbols to figure out if
47 kallsyms support is present */
49 EXPORT_SYMBOL(__start___kallsyms
);
50 EXPORT_SYMBOL(__stop___kallsyms
);
52 struct module kernel_module
=
54 .size_of_struct
= sizeof(struct module
),
56 .uc
= {ATOMIC_INIT(1)},
58 .syms
= __start___ksymtab
,
59 .ex_table_start
= __start___ex_table
,
60 .ex_table_end
= __stop___ex_table
,
61 .kallsyms_start
= __start___kallsyms
,
62 .kallsyms_end
= __stop___kallsyms
,
65 struct module
*module_list
= &kernel_module
;
67 #endif /* defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS) */
69 /* inter_module functions are always available, even when the kernel is
70 * compiled without modules. Consumers of inter_module_xxx routines
71 * will always work, even when both are built into the kernel, this
72 * approach removes lots of #ifdefs in mainline code.
75 static struct list_head ime_list
= LIST_HEAD_INIT(ime_list
);
76 static spinlock_t ime_lock
= SPIN_LOCK_UNLOCKED
;
77 static int kmalloc_failed
;
80 * This lock prevents modifications that might race the kernel fault
81 * fixups. It does not prevent reader walks that the modules code
82 * does. The kernel lock does that.
84 * Since vmalloc fault fixups occur in any context this lock is taken
85 * irqsave at all times.
88 spinlock_t modlist_lock
= SPIN_LOCK_UNLOCKED
;
91 * inter_module_register - register a new set of inter module data.
92 * @im_name: an arbitrary string to identify the data, must be unique
93 * @owner: module that is registering the data, always use THIS_MODULE
94 * @userdata: pointer to arbitrary userdata to be registered
96 * Description: Check that the im_name has not already been registered,
97 * complain if it has. For new data, add it to the inter_module_entry
100 void inter_module_register(const char *im_name
, struct module
*owner
, const void *userdata
)
102 struct list_head
*tmp
;
103 struct inter_module_entry
*ime
, *ime_new
;
105 if (!(ime_new
= kmalloc(sizeof(*ime
), GFP_KERNEL
))) {
106 /* Overloaded kernel, not fatal */
108 "Aiee, inter_module_register: cannot kmalloc entry for '%s'\n",
113 memset(ime_new
, 0, sizeof(*ime_new
));
114 ime_new
->im_name
= im_name
;
115 ime_new
->owner
= owner
;
116 ime_new
->userdata
= userdata
;
118 spin_lock(&ime_lock
);
119 list_for_each(tmp
, &ime_list
) {
120 ime
= list_entry(tmp
, struct inter_module_entry
, list
);
121 if (strcmp(ime
->im_name
, im_name
) == 0) {
122 spin_unlock(&ime_lock
);
124 /* Program logic error, fatal */
125 printk(KERN_ERR
"inter_module_register: duplicate im_name '%s'", im_name
);
129 list_add(&(ime_new
->list
), &ime_list
);
130 spin_unlock(&ime_lock
);
134 * inter_module_unregister - unregister a set of inter module data.
135 * @im_name: an arbitrary string to identify the data, must be unique
137 * Description: Check that the im_name has been registered, complain if
138 * it has not. For existing data, remove it from the
139 * inter_module_entry list.
141 void inter_module_unregister(const char *im_name
)
143 struct list_head
*tmp
;
144 struct inter_module_entry
*ime
;
146 spin_lock(&ime_lock
);
147 list_for_each(tmp
, &ime_list
) {
148 ime
= list_entry(tmp
, struct inter_module_entry
, list
);
149 if (strcmp(ime
->im_name
, im_name
) == 0) {
150 list_del(&(ime
->list
));
151 spin_unlock(&ime_lock
);
156 spin_unlock(&ime_lock
);
157 if (kmalloc_failed
) {
159 "inter_module_unregister: no entry for '%s', "
160 "probably caused by previous kmalloc failure\n",
165 /* Program logic error, fatal */
166 printk(KERN_ERR
"inter_module_unregister: no entry for '%s'", im_name
);
172 * inter_module_get - return arbitrary userdata from another module.
173 * @im_name: an arbitrary string to identify the data, must be unique
175 * Description: If the im_name has not been registered, return NULL.
176 * Try to increment the use count on the owning module, if that fails
177 * then return NULL. Otherwise return the userdata.
179 const void *inter_module_get(const char *im_name
)
181 struct list_head
*tmp
;
182 struct inter_module_entry
*ime
;
183 const void *result
= NULL
;
185 spin_lock(&ime_lock
);
186 list_for_each(tmp
, &ime_list
) {
187 ime
= list_entry(tmp
, struct inter_module_entry
, list
);
188 if (strcmp(ime
->im_name
, im_name
) == 0) {
189 if (try_inc_mod_count(ime
->owner
))
190 result
= ime
->userdata
;
194 spin_unlock(&ime_lock
);
199 * inter_module_get_request - im get with automatic request_module.
200 * @im_name: an arbitrary string to identify the data, must be unique
201 * @modname: module that is expected to register im_name
203 * Description: If inter_module_get fails, do request_module then retry.
205 const void *inter_module_get_request(const char *im_name
, const char *modname
)
207 const void *result
= inter_module_get(im_name
);
209 request_module(modname
);
210 result
= inter_module_get(im_name
);
216 * inter_module_put - release use of data from another module.
217 * @im_name: an arbitrary string to identify the data, must be unique
219 * Description: If the im_name has not been registered, complain,
220 * otherwise decrement the use count on the owning module.
222 void inter_module_put(const char *im_name
)
224 struct list_head
*tmp
;
225 struct inter_module_entry
*ime
;
227 spin_lock(&ime_lock
);
228 list_for_each(tmp
, &ime_list
) {
229 ime
= list_entry(tmp
, struct inter_module_entry
, list
);
230 if (strcmp(ime
->im_name
, im_name
) == 0) {
232 __MOD_DEC_USE_COUNT(ime
->owner
);
233 spin_unlock(&ime_lock
);
237 spin_unlock(&ime_lock
);
238 printk(KERN_ERR
"inter_module_put: no entry for '%s'", im_name
);
243 #if defined(CONFIG_MODULES) /* The rest of the source */
245 static long get_mod_name(const char *user_name
, char **buf
);
246 static void put_mod_name(char *buf
);
247 struct module
*find_module(const char *name
);
248 void free_module(struct module
*, int tag_freed
);
252 * Called at boot time
255 void __init
init_modules(void)
257 kernel_module
.nsyms
= __stop___ksymtab
- __start___ksymtab
;
259 arch_init_modules(&kernel_module
);
263 * Copy the name of a module from user space.
267 get_mod_name(const char *user_name
, char **buf
)
272 page
= __get_free_page(GFP_KERNEL
);
276 retval
= strncpy_from_user((char *)page
, user_name
, PAGE_SIZE
);
278 if (retval
< PAGE_SIZE
) {
282 retval
= -ENAMETOOLONG
;
291 put_mod_name(char *buf
)
293 free_page((unsigned long)buf
);
297 * Allocate space for a module.
300 asmlinkage
unsigned long
301 sys_create_module(const char *name_user
, size_t size
)
308 if (!capable(CAP_SYS_MODULE
))
311 if ((namelen
= get_mod_name(name_user
, &name
)) < 0) {
315 if (size
< sizeof(struct module
)+namelen
) {
319 if (find_module(name
) != NULL
) {
323 if ((mod
= (struct module
*)module_map(size
)) == NULL
) {
328 memset(mod
, 0, sizeof(*mod
));
329 mod
->size_of_struct
= sizeof(*mod
);
330 mod
->name
= (char *)(mod
+ 1);
332 memcpy((char*)(mod
+1), name
, namelen
+1);
336 spin_lock_irqsave(&modlist_lock
, flags
);
337 mod
->next
= module_list
;
338 module_list
= mod
; /* link it in */
339 spin_unlock_irqrestore(&modlist_lock
, flags
);
351 * Initialize a module.
355 sys_init_module(const char *name_user
, struct module
*mod_user
)
357 struct module mod_tmp
, *mod
;
358 char *name
, *n_name
, *name_tmp
= NULL
;
359 long namelen
, n_namelen
, i
, error
;
360 unsigned long mod_user_size
;
361 struct module_ref
*dep
;
363 if (!capable(CAP_SYS_MODULE
))
366 if ((namelen
= get_mod_name(name_user
, &name
)) < 0) {
370 if ((mod
= find_module(name
)) == NULL
) {
375 /* Check module header size. We allow a bit of slop over the
376 size we are familiar with to cope with a version of insmod
377 for a newer kernel. But don't over do it. */
378 if ((error
= get_user(mod_user_size
, &mod_user
->size_of_struct
)) != 0)
380 if (mod_user_size
< (unsigned long)&((struct module
*)0L)->persist_start
381 || mod_user_size
> sizeof(struct module
) + 16*sizeof(void*)) {
382 printk(KERN_ERR
"init_module: Invalid module header size.\n"
383 KERN_ERR
"A new version of the modutils is likely "
389 /* Hold the current contents while we play with the user's idea
392 name_tmp
= kmalloc(strlen(mod
->name
) + 1, GFP_KERNEL
); /* Where's kstrdup()? */
393 if (name_tmp
== NULL
) {
397 strcpy(name_tmp
, mod
->name
);
399 error
= copy_from_user(mod
, mod_user
, mod_user_size
);
405 /* Sanity check the size of the module. */
408 if (mod
->size
> mod_tmp
.size
) {
409 printk(KERN_ERR
"init_module: Size of initialized module "
410 "exceeds size of created module.\n");
414 /* Make sure all interesting pointers are sane. */
416 if (!mod_bound(mod
->name
, namelen
, mod
)) {
417 printk(KERN_ERR
"init_module: mod->name out of bounds.\n");
420 if (mod
->nsyms
&& !mod_bound(mod
->syms
, mod
->nsyms
, mod
)) {
421 printk(KERN_ERR
"init_module: mod->syms out of bounds.\n");
424 if (mod
->ndeps
&& !mod_bound(mod
->deps
, mod
->ndeps
, mod
)) {
425 printk(KERN_ERR
"init_module: mod->deps out of bounds.\n");
428 if (mod
->init
&& !mod_bound(mod
->init
, 0, mod
)) {
429 printk(KERN_ERR
"init_module: mod->init out of bounds.\n");
432 if (mod
->cleanup
&& !mod_bound(mod
->cleanup
, 0, mod
)) {
433 printk(KERN_ERR
"init_module: mod->cleanup out of bounds.\n");
436 if (mod
->ex_table_start
> mod
->ex_table_end
437 || (mod
->ex_table_start
&&
438 !((unsigned long)mod
->ex_table_start
>= ((unsigned long)mod
+ mod
->size_of_struct
)
439 && ((unsigned long)mod
->ex_table_end
440 < (unsigned long)mod
+ mod
->size
)))
441 || (((unsigned long)mod
->ex_table_start
442 - (unsigned long)mod
->ex_table_end
)
443 % sizeof(struct exception_table_entry
))) {
444 printk(KERN_ERR
"init_module: mod->ex_table_* invalid.\n");
447 if (mod
->flags
& ~MOD_AUTOCLEAN
) {
448 printk(KERN_ERR
"init_module: mod->flags invalid.\n");
451 if (mod_member_present(mod
, can_unload
)
452 && mod
->can_unload
&& !mod_bound(mod
->can_unload
, 0, mod
)) {
453 printk(KERN_ERR
"init_module: mod->can_unload out of bounds.\n");
456 if (mod_member_present(mod
, kallsyms_end
)) {
457 if (mod
->kallsyms_end
&&
458 (!mod_bound(mod
->kallsyms_start
, 0, mod
) ||
459 !mod_bound(mod
->kallsyms_end
, 0, mod
))) {
460 printk(KERN_ERR
"init_module: mod->kallsyms out of bounds.\n");
463 if (mod
->kallsyms_start
> mod
->kallsyms_end
) {
464 printk(KERN_ERR
"init_module: mod->kallsyms invalid.\n");
468 if (mod_member_present(mod
, archdata_end
)) {
469 if (mod
->archdata_end
&&
470 (!mod_bound(mod
->archdata_start
, 0, mod
) ||
471 !mod_bound(mod
->archdata_end
, 0, mod
))) {
472 printk(KERN_ERR
"init_module: mod->archdata out of bounds.\n");
475 if (mod
->archdata_start
> mod
->archdata_end
) {
476 printk(KERN_ERR
"init_module: mod->archdata invalid.\n");
480 if (mod_member_present(mod
, kernel_data
) && mod
->kernel_data
) {
481 printk(KERN_ERR
"init_module: mod->kernel_data must be zero.\n");
485 /* Check that the user isn't doing something silly with the name. */
487 if ((n_namelen
= get_mod_name(mod
->name
- (unsigned long)mod
488 + (unsigned long)mod_user
,
490 printk(KERN_ERR
"init_module: get_mod_name failure.\n");
494 if (namelen
!= n_namelen
|| strcmp(n_name
, mod_tmp
.name
) != 0) {
495 printk(KERN_ERR
"init_module: changed module name to "
497 n_name
, mod_tmp
.name
);
501 /* Ok, that's about all the sanity we can stomach; copy the rest. */
503 if (copy_from_user((char *)mod
+mod_user_size
,
504 (char *)mod_user
+mod_user_size
,
505 mod
->size
-mod_user_size
)) {
510 if (module_arch_init(mod
))
513 /* On some machines it is necessary to do something here
514 to make the I and D caches consistent. */
515 flush_icache_range((unsigned long)mod
, (unsigned long)mod
+ mod
->size
);
517 mod
->next
= mod_tmp
.next
;
520 /* Sanity check the module's dependents */
521 for (i
= 0, dep
= mod
->deps
; i
< mod
->ndeps
; ++i
, ++dep
) {
522 struct module
*o
, *d
= dep
->dep
;
524 /* Make sure the indicated dependencies are really modules. */
526 printk(KERN_ERR
"init_module: self-referential "
527 "dependency in mod->deps.\n");
531 /* Scan the current modules for this dependency */
532 for (o
= module_list
; o
!= &kernel_module
&& o
!= d
; o
= o
->next
)
536 printk(KERN_ERR
"init_module: found dependency that is "
537 "(no longer?) a module.\n");
542 /* Update module references. */
543 for (i
= 0, dep
= mod
->deps
; i
< mod
->ndeps
; ++i
, ++dep
) {
544 struct module
*d
= dep
->dep
;
547 dep
->next_ref
= d
->refs
;
549 /* Being referenced by a dependent module counts as a
550 use as far as kmod is concerned. */
551 d
->flags
|= MOD_USED_ONCE
;
554 /* Free our temporary memory. */
555 put_mod_name(n_name
);
558 /* Initialize the module. */
559 atomic_set(&mod
->uc
.usecount
,1);
560 mod
->flags
|= MOD_INITIALIZING
;
561 if (mod
->init
&& (error
= mod
->init()) != 0) {
562 atomic_set(&mod
->uc
.usecount
,0);
563 mod
->flags
&= ~MOD_INITIALIZING
;
564 if (error
> 0) /* Buggy module */
568 atomic_dec(&mod
->uc
.usecount
);
570 /* And set it running. */
571 mod
->flags
= (mod
->flags
| MOD_RUNNING
) & ~MOD_INITIALIZING
;
576 put_mod_name(n_name
);
579 strcpy((char *)mod
->name
, name_tmp
); /* We know there is room for this */
588 static spinlock_t unload_lock
= SPIN_LOCK_UNLOCKED
;
589 int try_inc_mod_count(struct module
*mod
)
593 spin_lock(&unload_lock
);
594 if (mod
->flags
& MOD_DELETED
)
597 __MOD_INC_USE_COUNT(mod
);
598 spin_unlock(&unload_lock
);
604 sys_delete_module(const char *name_user
)
606 struct module
*mod
, *next
;
609 int something_changed
;
611 if (!capable(CAP_SYS_MODULE
))
616 if ((error
= get_mod_name(name_user
, &name
)) < 0)
619 if ((mod
= find_module(name
)) == NULL
) {
625 if (mod
->refs
!= NULL
)
628 spin_lock(&unload_lock
);
629 if (!__MOD_IN_USE(mod
)) {
630 mod
->flags
|= MOD_DELETED
;
631 spin_unlock(&unload_lock
);
635 spin_unlock(&unload_lock
);
640 /* Do automatic reaping */
642 something_changed
= 0;
644 for (mod
= module_list
; mod
!= &kernel_module
; mod
= next
) {
646 spin_lock(&unload_lock
);
647 if (mod
->refs
== NULL
648 && (mod
->flags
& MOD_AUTOCLEAN
)
649 && (mod
->flags
& MOD_RUNNING
)
650 && !(mod
->flags
& MOD_DELETED
)
651 && (mod
->flags
& MOD_USED_ONCE
)
652 && !__MOD_IN_USE(mod
)) {
653 if ((mod
->flags
& MOD_VISITED
)
654 && !(mod
->flags
& MOD_JUST_FREED
)) {
655 spin_unlock(&unload_lock
);
656 mod
->flags
&= ~MOD_VISITED
;
658 mod
->flags
|= MOD_DELETED
;
659 spin_unlock(&unload_lock
);
661 something_changed
= 1;
664 spin_unlock(&unload_lock
);
668 if (something_changed
)
671 for (mod
= module_list
; mod
!= &kernel_module
; mod
= mod
->next
)
672 mod
->flags
&= ~MOD_JUST_FREED
;
680 /* Query various bits about modules. */
683 qm_modules(char *buf
, size_t bufsize
, size_t *ret
)
686 size_t nmod
, space
, len
;
690 for (mod
=module_list
; mod
!= &kernel_module
; mod
=mod
->next
, ++nmod
) {
691 len
= strlen(mod
->name
)+1;
693 goto calc_space_needed
;
694 if (copy_to_user(buf
, mod
->name
, len
))
701 if (put_user(nmod
, ret
))
708 while ((mod
= mod
->next
) != &kernel_module
)
709 space
+= strlen(mod
->name
)+1;
711 if (put_user(space
, ret
))
718 qm_deps(struct module
*mod
, char *buf
, size_t bufsize
, size_t *ret
)
720 size_t i
, space
, len
;
722 if (mod
== &kernel_module
)
724 if (!MOD_CAN_QUERY(mod
))
725 if (put_user(0, ret
))
731 for (i
= 0; i
< mod
->ndeps
; ++i
) {
732 const char *dep_name
= mod
->deps
[i
].dep
->name
;
734 len
= strlen(dep_name
)+1;
736 goto calc_space_needed
;
737 if (copy_to_user(buf
, dep_name
, len
))
744 if (put_user(i
, ret
))
751 while (++i
< mod
->ndeps
)
752 space
+= strlen(mod
->deps
[i
].dep
->name
)+1;
754 if (put_user(space
, ret
))
761 qm_refs(struct module
*mod
, char *buf
, size_t bufsize
, size_t *ret
)
763 size_t nrefs
, space
, len
;
764 struct module_ref
*ref
;
766 if (mod
== &kernel_module
)
768 if (!MOD_CAN_QUERY(mod
))
769 if (put_user(0, ret
))
775 for (nrefs
= 0, ref
= mod
->refs
; ref
; ++nrefs
, ref
= ref
->next_ref
) {
776 const char *ref_name
= ref
->ref
->name
;
778 len
= strlen(ref_name
)+1;
780 goto calc_space_needed
;
781 if (copy_to_user(buf
, ref_name
, len
))
788 if (put_user(nrefs
, ret
))
795 while ((ref
= ref
->next_ref
) != NULL
)
796 space
+= strlen(ref
->ref
->name
)+1;
798 if (put_user(space
, ret
))
805 qm_symbols(struct module
*mod
, char *buf
, size_t bufsize
, size_t *ret
)
807 size_t i
, space
, len
;
808 struct module_symbol
*s
;
812 if (!MOD_CAN_QUERY(mod
))
813 if (put_user(0, ret
))
818 space
= mod
->nsyms
* 2*sizeof(void *);
824 goto calc_space_needed
;
826 if (!access_ok(VERIFY_WRITE
, buf
, space
))
830 vals
= (unsigned long *)buf
;
833 for (; i
< mod
->nsyms
; ++i
, ++s
, vals
+= 2) {
834 len
= strlen(s
->name
)+1;
836 goto calc_space_needed
;
838 if (copy_to_user(strings
, s
->name
, len
)
839 || __put_user(s
->value
, vals
+0)
840 || __put_user(space
, vals
+1))
847 if (put_user(i
, ret
))
853 for (; i
< mod
->nsyms
; ++i
, ++s
)
854 space
+= strlen(s
->name
)+1;
856 if (put_user(space
, ret
))
863 qm_info(struct module
*mod
, char *buf
, size_t bufsize
, size_t *ret
)
867 if (mod
== &kernel_module
)
870 if (sizeof(struct module_info
) <= bufsize
) {
871 struct module_info info
;
872 info
.addr
= (unsigned long)mod
;
873 info
.size
= mod
->size
;
874 info
.flags
= mod
->flags
;
876 /* usecount is one too high here - report appropriately to
877 compensate for locking */
878 info
.usecount
= (mod_member_present(mod
, can_unload
)
879 && mod
->can_unload
? -1 : atomic_read(&mod
->uc
.usecount
)-1);
881 if (copy_to_user(buf
, &info
, sizeof(struct module_info
)))
886 if (put_user(sizeof(struct module_info
), ret
))
893 sys_query_module(const char *name_user
, int which
, char *buf
, size_t bufsize
,
900 if (name_user
== NULL
)
901 mod
= &kernel_module
;
906 if ((namelen
= get_mod_name(name_user
, &name
)) < 0) {
911 if ((mod
= find_module(name
)) == NULL
) {
918 /* __MOD_ touches the flags. We must avoid that */
920 atomic_inc(&mod
->uc
.usecount
);
928 err
= qm_modules(buf
, bufsize
, ret
);
931 err
= qm_deps(mod
, buf
, bufsize
, ret
);
934 err
= qm_refs(mod
, buf
, bufsize
, ret
);
937 err
= qm_symbols(mod
, buf
, bufsize
, ret
);
940 err
= qm_info(mod
, buf
, bufsize
, ret
);
946 atomic_dec(&mod
->uc
.usecount
);
954 * Copy the kernel symbol table to user space. If the argument is
955 * NULL, just return the size of the table.
957 * This call is obsolete. New programs should use query_module+QM_SYMBOLS
958 * which does not arbitrarily limit the length of symbols.
962 sys_get_kernel_syms(struct kernel_sym
*table
)
966 struct kernel_sym ksym
;
969 for (mod
= module_list
, i
= 0; mod
; mod
= mod
->next
) {
970 /* include the count for the module name! */
977 /* So that we don't give the user our stack content */
978 memset (&ksym
, 0, sizeof (ksym
));
980 for (mod
= module_list
, i
= 0; mod
; mod
= mod
->next
) {
981 struct module_symbol
*msym
;
984 if (!MOD_CAN_QUERY(mod
))
987 /* magic: write module info as a pseudo symbol */
988 ksym
.value
= (unsigned long)mod
;
990 strncpy(ksym
.name
+1, mod
->name
, sizeof(ksym
.name
)-1);
991 ksym
.name
[sizeof(ksym
.name
)-1] = '\0';
993 if (copy_to_user(table
, &ksym
, sizeof(ksym
)) != 0)
1000 for (j
= 0, msym
= mod
->syms
; j
< mod
->nsyms
; ++j
, ++msym
) {
1001 ksym
.value
= msym
->value
;
1002 strncpy(ksym
.name
, msym
->name
, sizeof(ksym
.name
));
1003 ksym
.name
[sizeof(ksym
.name
)-1] = '\0';
1005 if (copy_to_user(table
, &ksym
, sizeof(ksym
)) != 0)
1016 * Look for a module by name, ignoring modules marked for deletion.
1020 find_module(const char *name
)
1024 for (mod
= module_list
; mod
; mod
= mod
->next
) {
1025 if (mod
->flags
& MOD_DELETED
)
1027 if (!strcmp(mod
->name
, name
))
1035 * Free the given module.
1039 free_module(struct module
*mod
, int tag_freed
)
1041 struct module_ref
*dep
;
1043 unsigned long flags
;
1045 /* Let the module clean up. */
1047 if (mod
->flags
& MOD_RUNNING
)
1051 mod
->flags
&= ~MOD_RUNNING
;
1054 /* Remove the module from the dependency lists. */
1056 for (i
= 0, dep
= mod
->deps
; i
< mod
->ndeps
; ++i
, ++dep
) {
1057 struct module_ref
**pp
;
1058 for (pp
= &dep
->dep
->refs
; *pp
!= dep
; pp
= &(*pp
)->next_ref
)
1060 *pp
= dep
->next_ref
;
1061 if (tag_freed
&& dep
->dep
->refs
== NULL
)
1062 dep
->dep
->flags
|= MOD_JUST_FREED
;
1065 /* And from the main module list. */
1067 spin_lock_irqsave(&modlist_lock
, flags
);
1068 if (mod
== module_list
) {
1069 module_list
= mod
->next
;
1072 for (p
= module_list
; p
->next
!= mod
; p
= p
->next
)
1074 p
->next
= mod
->next
;
1076 spin_unlock_irqrestore(&modlist_lock
, flags
);
1078 /* And free the memory. */
1084 * Called by the /proc file system to return a current list of modules.
1086 static void *m_start(struct seq_file
*m
, loff_t
*pos
)
1091 for (v
= module_list
; v
&& n
--; v
= v
->next
)
1095 static void *m_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1097 struct module
*v
= p
;
1101 static void m_stop(struct seq_file
*m
, void *p
)
1105 static int m_show(struct seq_file
*m
, void *p
)
1107 struct module
*mod
= p
;
1108 struct module_ref
*ref
= mod
->refs
;
1110 if (mod
== &kernel_module
)
1113 seq_printf(m
, "%-20s%8lu", mod
->name
, mod
->size
);
1114 if (mod
->flags
& MOD_RUNNING
)
1115 seq_printf(m
, "%4ld",
1116 (mod_member_present(mod
, can_unload
)
1118 ? -1L : (long)atomic_read(&mod
->uc
.usecount
)));
1120 if (mod
->flags
& MOD_DELETED
)
1121 seq_puts(m
, " (deleted)");
1122 else if (mod
->flags
& MOD_RUNNING
) {
1123 if (mod
->flags
& MOD_AUTOCLEAN
)
1124 seq_puts(m
, " (autoclean)");
1125 if (!(mod
->flags
& MOD_USED_ONCE
))
1126 seq_puts(m
, " (unused)");
1127 } else if (mod
->flags
& MOD_INITIALIZING
)
1128 seq_puts(m
, " (initializing)");
1130 seq_puts(m
, " (uninitialized)");
1134 for (c
= '[' ; ref
; c
= ' ', ref
= ref
->next_ref
)
1135 seq_printf(m
, "%c%s", c
, ref
->ref
->name
);
1141 struct seq_operations modules_op
= {
1149 * Called by the /proc file system to return a current list of ksyms.
1159 static void *s_start(struct seq_file
*m
, loff_t
*pos
)
1161 struct mod_sym
*p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
1166 return ERR_PTR(-ENOMEM
);
1168 for (v
= module_list
, n
= *pos
; v
; n
-= v
->nsyms
, v
= v
->next
) {
1180 static void *s_next(struct seq_file
*m
, void *p
, loff_t
*pos
)
1182 struct mod_sym
*v
= p
;
1184 if (++v
->index
>= v
->mod
->nsyms
) {
1186 v
->mod
= v
->mod
->next
;
1192 } while (!v
->mod
->nsyms
);
1198 static void s_stop(struct seq_file
*m
, void *p
)
1200 if (p
&& !IS_ERR(p
)) {
1206 static int s_show(struct seq_file
*m
, void *p
)
1208 struct mod_sym
*v
= p
;
1209 struct module_symbol
*sym
;
1211 if (!MOD_CAN_QUERY(v
->mod
))
1213 sym
= &v
->mod
->syms
[v
->index
];
1215 seq_printf(m
, "%0*lx %s\t[%s]\n", (int)(2*sizeof(void*)),
1216 sym
->value
, sym
->name
, v
->mod
->name
);
1218 seq_printf(m
, "%0*lx %s\n", (int)(2*sizeof(void*)),
1219 sym
->value
, sym
->name
);
1223 struct seq_operations ksyms_op
= {
1230 #define MODLIST_SIZE 4096
1233 * this function isn't smp safe but that's not really a problem; it's
1234 * called from oops context only and any locking could actually prevent
1235 * the oops from going out; the line that is generated is informational
1236 * only and should NEVER prevent the real oops from going out.
1238 void print_modules(void)
1240 static char modlist
[MODLIST_SIZE
];
1241 struct module
*this_mod
;
1244 this_mod
= module_list
;
1247 pos
+= snprintf(modlist
+pos
, MODLIST_SIZE
-pos
-1,
1248 "%s ", this_mod
->name
);
1249 this_mod
= this_mod
->next
;
1251 printk("%s\n",modlist
);
1254 #else /* CONFIG_MODULES */
1256 /* Dummy syscalls for people who don't want modules */
1258 asmlinkage
unsigned long
1259 sys_create_module(const char *name_user
, size_t size
)
1265 sys_init_module(const char *name_user
, struct module
*mod_user
)
1271 sys_delete_module(const char *name_user
)
1277 sys_query_module(const char *name_user
, int which
, char *buf
, size_t bufsize
,
1280 /* Let the program know about the new interface. Not that
1281 it'll do them much good. */
1289 sys_get_kernel_syms(struct kernel_sym
*table
)
1294 int try_inc_mod_count(struct module
*mod
)
1299 void print_modules(void)
1303 #endif /* CONFIG_MODULES */
1306 #if defined(CONFIG_MODULES) || defined(CONFIG_KALLSYMS)
1308 #define MAX_SYMBOL_SIZE 512
1311 address_to_exported_symbol(unsigned long address
, const char **mod_name
,
1312 const char **sym_name
, unsigned long *sym_start
,
1313 unsigned long *sym_end
)
1315 struct module
*this_mod
;
1318 for (this_mod
= module_list
; this_mod
; this_mod
= this_mod
->next
) {
1319 /* walk the symbol list of this module. Only symbols
1320 who's address is smaller than the searched for address
1321 are relevant; and only if it's better than the best so far */
1322 for (i
= 0; i
< this_mod
->nsyms
; i
++)
1323 if ((this_mod
->syms
[i
].value
<= address
) &&
1324 (*sym_start
< this_mod
->syms
[i
].value
)) {
1325 *sym_start
= this_mod
->syms
[i
].value
;
1326 *sym_name
= this_mod
->syms
[i
].name
;
1327 *mod_name
= this_mod
->name
;
1328 if (i
+ 1 < this_mod
->nsyms
)
1329 *sym_end
= this_mod
->syms
[i
+1].value
;
1331 *sym_end
= (unsigned long) this_mod
+ this_mod
->size
;
1337 print_symbol(const char *fmt
, unsigned long address
)
1339 /* static to not take up stackspace; if we race here too bad */
1340 static char buffer
[MAX_SYMBOL_SIZE
];
1342 const char *mod_name
= NULL
, *sec_name
= NULL
, *sym_name
= NULL
;
1343 unsigned long mod_start
, mod_end
, sec_start
, sec_end
,
1347 memset(buffer
, 0, MAX_SYMBOL_SIZE
);
1350 if (!kallsyms_address_to_symbol(address
, &mod_name
, &mod_start
, &mod_end
, &sec_name
, &sec_start
, &sec_end
, &sym_name
, &sym_start
, &sym_end
)) {
1352 address_to_exported_symbol(address
, &mod_name
, &sym_name
, &sym_start
, &sym_end
);
1357 snprintf(buffer
, MAX_SYMBOL_SIZE
- 1, "%s%s+%#x/%#x [%s]",
1359 (unsigned int)(address
- sym_start
),
1360 (unsigned int)(sym_end
- sym_start
),
1363 snprintf(buffer
, MAX_SYMBOL_SIZE
- 1, "%s%s+%#x/%#x",
1365 (unsigned int)(address
- sym_start
),
1366 (unsigned int)(sym_end
- sym_start
));
1367 printk(fmt
, buffer
);
1371 printk(fmt
, "[unresolved]");