2 * fs/proc/kcore.c kernel ELF core dumper
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@veritas.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@veritas.com>
9 * Safe accesses to vmalloc/direct-mapped discontiguous areas, Kanoj Sarcar <kanoj@sgi.com>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/capability.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <linux/highmem.h>
20 #include <linux/bootmem.h>
21 #include <linux/init.h>
22 #include <linux/slab.h>
23 #include <asm/uaccess.h>
25 #include <linux/list.h>
26 #include <linux/ioport.h>
27 #include <linux/memory.h>
28 #include <asm/sections.h>
30 #define CORE_STR "CORE"
32 #ifndef ELF_CORE_EFLAGS
33 #define ELF_CORE_EFLAGS 0
36 static struct proc_dir_entry
*proc_root_kcore
;
39 #ifndef kc_vaddr_to_offset
40 #define kc_vaddr_to_offset(v) ((v) - PAGE_OFFSET)
42 #ifndef kc_offset_to_vaddr
43 #define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
46 /* An ELF note in memory */
55 static LIST_HEAD(kclist_head
);
56 static DEFINE_RWLOCK(kclist_lock
);
57 static int kcore_need_update
= 1;
60 kclist_add(struct kcore_list
*new, void *addr
, size_t size
, int type
)
62 new->addr
= (unsigned long)addr
;
66 write_lock(&kclist_lock
);
67 list_add_tail(&new->list
, &kclist_head
);
68 write_unlock(&kclist_lock
);
71 static size_t get_kcore_size(int *nphdr
, size_t *elf_buflen
)
76 *nphdr
= 1; /* PT_NOTE */
79 list_for_each_entry(m
, &kclist_head
, list
) {
80 try = kc_vaddr_to_offset((size_t)m
->addr
+ m
->size
);
85 *elf_buflen
= sizeof(struct elfhdr
) +
86 (*nphdr
+ 2)*sizeof(struct elf_phdr
) +
87 3 * ((sizeof(struct elf_note
)) +
88 roundup(sizeof(CORE_STR
), 4)) +
89 roundup(sizeof(struct elf_prstatus
), 4) +
90 roundup(sizeof(struct elf_prpsinfo
), 4) +
91 roundup(sizeof(struct task_struct
), 4);
92 *elf_buflen
= PAGE_ALIGN(*elf_buflen
);
93 return size
+ *elf_buflen
;
96 static void free_kclist_ents(struct list_head
*head
)
98 struct kcore_list
*tmp
, *pos
;
100 list_for_each_entry_safe(pos
, tmp
, head
, list
) {
101 list_del(&pos
->list
);
106 * Replace all KCORE_RAM/KCORE_VMEMMAP information with passed list.
108 static void __kcore_update_ram(struct list_head
*list
)
112 struct kcore_list
*tmp
, *pos
;
115 write_lock(&kclist_lock
);
116 if (kcore_need_update
) {
117 list_for_each_entry_safe(pos
, tmp
, &kclist_head
, list
) {
118 if (pos
->type
== KCORE_RAM
119 || pos
->type
== KCORE_VMEMMAP
)
120 list_move(&pos
->list
, &garbage
);
122 list_splice_tail(list
, &kclist_head
);
124 list_splice(list
, &garbage
);
125 kcore_need_update
= 0;
126 proc_root_kcore
->size
= get_kcore_size(&nphdr
, &size
);
127 write_unlock(&kclist_lock
);
129 free_kclist_ents(&garbage
);
133 #ifdef CONFIG_HIGHMEM
135 * If no highmem, we can assume [0...max_low_pfn) continuous range of memory
136 * because memory hole is not as big as !HIGHMEM case.
137 * (HIGHMEM is special because part of memory is _invisible_ from the kernel.)
139 static int kcore_update_ram(void)
142 struct kcore_list
*ent
;
145 ent
= kmalloc(sizeof(*ent
), GFP_KERNEL
);
148 ent
->addr
= (unsigned long)__va(0);
149 ent
->size
= max_low_pfn
<< PAGE_SHIFT
;
150 ent
->type
= KCORE_RAM
;
151 list_add(&ent
->list
, &head
);
152 __kcore_update_ram(&head
);
156 #else /* !CONFIG_HIGHMEM */
158 #ifdef CONFIG_SPARSEMEM_VMEMMAP
159 /* calculate vmemmap's address from given system ram pfn and register it */
160 int get_sparsemem_vmemmap_info(struct kcore_list
*ent
, struct list_head
*head
)
162 unsigned long pfn
= __pa(ent
->addr
) >> PAGE_SHIFT
;
163 unsigned long nr_pages
= ent
->size
>> PAGE_SHIFT
;
164 unsigned long start
, end
;
165 struct kcore_list
*vmm
, *tmp
;
168 start
= ((unsigned long)pfn_to_page(pfn
)) & PAGE_MASK
;
169 end
= ((unsigned long)pfn_to_page(pfn
+ nr_pages
)) - 1;
170 end
= ALIGN(end
, PAGE_SIZE
);
171 /* overlap check (because we have to align page */
172 list_for_each_entry(tmp
, head
, list
) {
173 if (tmp
->type
!= KCORE_VMEMMAP
)
175 if (start
< tmp
->addr
+ tmp
->size
)
180 vmm
= kmalloc(sizeof(*vmm
), GFP_KERNEL
);
184 vmm
->size
= end
- start
;
185 vmm
->type
= KCORE_VMEMMAP
;
186 list_add_tail(&vmm
->list
, head
);
192 int get_sparsemem_vmemmap_info(struct kcore_list
*ent
, struct list_head
*head
)
200 kclist_add_private(unsigned long pfn
, unsigned long nr_pages
, void *arg
)
202 struct list_head
*head
= (struct list_head
*)arg
;
203 struct kcore_list
*ent
;
205 ent
= kmalloc(sizeof(*ent
), GFP_KERNEL
);
208 ent
->addr
= (unsigned long)__va((pfn
<< PAGE_SHIFT
));
209 ent
->size
= nr_pages
<< PAGE_SHIFT
;
211 /* Sanity check: Can happen in 32bit arch...maybe */
212 if (ent
->addr
< (unsigned long) __va(0))
215 /* cut not-mapped area. ....from ppc-32 code. */
216 if (ULONG_MAX
- ent
->addr
< ent
->size
)
217 ent
->size
= ULONG_MAX
- ent
->addr
;
219 /* cut when vmalloc() area is higher than direct-map area */
220 if (VMALLOC_START
> (unsigned long)__va(0)) {
221 if (ent
->addr
> VMALLOC_START
)
223 if (VMALLOC_START
- ent
->addr
< ent
->size
)
224 ent
->size
= VMALLOC_START
- ent
->addr
;
227 ent
->type
= KCORE_RAM
;
228 list_add_tail(&ent
->list
, head
);
230 if (!get_sparsemem_vmemmap_info(ent
, head
)) {
231 list_del(&ent
->list
);
241 static int kcore_update_ram(void)
244 unsigned long end_pfn
;
247 /* Not inialized....update now */
248 /* find out "max pfn" */
250 for_each_node_state(nid
, N_HIGH_MEMORY
) {
251 unsigned long node_end
;
252 node_end
= NODE_DATA(nid
)->node_start_pfn
+
253 NODE_DATA(nid
)->node_spanned_pages
;
254 if (end_pfn
< node_end
)
257 /* scan 0 to max_pfn */
258 ret
= walk_system_ram_range(0, end_pfn
, &head
, kclist_add_private
);
260 free_kclist_ents(&head
);
263 __kcore_update_ram(&head
);
266 #endif /* CONFIG_HIGHMEM */
268 /*****************************************************************************/
270 * determine size of ELF note
272 static int notesize(struct memelfnote
*en
)
276 sz
= sizeof(struct elf_note
);
277 sz
+= roundup((strlen(en
->name
) + 1), 4);
278 sz
+= roundup(en
->datasz
, 4);
281 } /* end notesize() */
283 /*****************************************************************************/
285 * store a note in the header buffer
287 static char *storenote(struct memelfnote
*men
, char *bufp
)
291 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
293 en
.n_namesz
= strlen(men
->name
) + 1;
294 en
.n_descsz
= men
->datasz
;
295 en
.n_type
= men
->type
;
297 DUMP_WRITE(&en
, sizeof(en
));
298 DUMP_WRITE(men
->name
, en
.n_namesz
);
300 /* XXX - cast from long long to long to avoid need for libgcc.a */
301 bufp
= (char*) roundup((unsigned long)bufp
,4);
302 DUMP_WRITE(men
->data
, men
->datasz
);
303 bufp
= (char*) roundup((unsigned long)bufp
,4);
308 } /* end storenote() */
311 * store an ELF coredump header in the supplied buffer
312 * nphdr is the number of elf_phdr to insert
314 static void elf_kcore_store_hdr(char *bufp
, int nphdr
, int dataoff
)
316 struct elf_prstatus prstatus
; /* NT_PRSTATUS */
317 struct elf_prpsinfo prpsinfo
; /* NT_PRPSINFO */
318 struct elf_phdr
*nhdr
, *phdr
;
320 struct memelfnote notes
[3];
322 struct kcore_list
*m
;
324 /* setup ELF header */
325 elf
= (struct elfhdr
*) bufp
;
326 bufp
+= sizeof(struct elfhdr
);
327 offset
+= sizeof(struct elfhdr
);
328 memcpy(elf
->e_ident
, ELFMAG
, SELFMAG
);
329 elf
->e_ident
[EI_CLASS
] = ELF_CLASS
;
330 elf
->e_ident
[EI_DATA
] = ELF_DATA
;
331 elf
->e_ident
[EI_VERSION
]= EV_CURRENT
;
332 elf
->e_ident
[EI_OSABI
] = ELF_OSABI
;
333 memset(elf
->e_ident
+EI_PAD
, 0, EI_NIDENT
-EI_PAD
);
334 elf
->e_type
= ET_CORE
;
335 elf
->e_machine
= ELF_ARCH
;
336 elf
->e_version
= EV_CURRENT
;
338 elf
->e_phoff
= sizeof(struct elfhdr
);
340 elf
->e_flags
= ELF_CORE_EFLAGS
;
341 elf
->e_ehsize
= sizeof(struct elfhdr
);
342 elf
->e_phentsize
= sizeof(struct elf_phdr
);
343 elf
->e_phnum
= nphdr
;
348 /* setup ELF PT_NOTE program header */
349 nhdr
= (struct elf_phdr
*) bufp
;
350 bufp
+= sizeof(struct elf_phdr
);
351 offset
+= sizeof(struct elf_phdr
);
352 nhdr
->p_type
= PT_NOTE
;
361 /* setup ELF PT_LOAD program header for every area */
362 list_for_each_entry(m
, &kclist_head
, list
) {
363 phdr
= (struct elf_phdr
*) bufp
;
364 bufp
+= sizeof(struct elf_phdr
);
365 offset
+= sizeof(struct elf_phdr
);
367 phdr
->p_type
= PT_LOAD
;
368 phdr
->p_flags
= PF_R
|PF_W
|PF_X
;
369 phdr
->p_offset
= kc_vaddr_to_offset(m
->addr
) + dataoff
;
370 phdr
->p_vaddr
= (size_t)m
->addr
;
372 phdr
->p_filesz
= phdr
->p_memsz
= m
->size
;
373 phdr
->p_align
= PAGE_SIZE
;
377 * Set up the notes in similar form to SVR4 core dumps made
378 * with info from their /proc.
380 nhdr
->p_offset
= offset
;
382 /* set up the process status */
383 notes
[0].name
= CORE_STR
;
384 notes
[0].type
= NT_PRSTATUS
;
385 notes
[0].datasz
= sizeof(struct elf_prstatus
);
386 notes
[0].data
= &prstatus
;
388 memset(&prstatus
, 0, sizeof(struct elf_prstatus
));
390 nhdr
->p_filesz
= notesize(¬es
[0]);
391 bufp
= storenote(¬es
[0], bufp
);
393 /* set up the process info */
394 notes
[1].name
= CORE_STR
;
395 notes
[1].type
= NT_PRPSINFO
;
396 notes
[1].datasz
= sizeof(struct elf_prpsinfo
);
397 notes
[1].data
= &prpsinfo
;
399 memset(&prpsinfo
, 0, sizeof(struct elf_prpsinfo
));
400 prpsinfo
.pr_state
= 0;
401 prpsinfo
.pr_sname
= 'R';
402 prpsinfo
.pr_zomb
= 0;
404 strcpy(prpsinfo
.pr_fname
, "vmlinux");
405 strncpy(prpsinfo
.pr_psargs
, saved_command_line
, ELF_PRARGSZ
);
407 nhdr
->p_filesz
+= notesize(¬es
[1]);
408 bufp
= storenote(¬es
[1], bufp
);
410 /* set up the task structure */
411 notes
[2].name
= CORE_STR
;
412 notes
[2].type
= NT_TASKSTRUCT
;
413 notes
[2].datasz
= sizeof(struct task_struct
);
414 notes
[2].data
= current
;
416 nhdr
->p_filesz
+= notesize(¬es
[2]);
417 bufp
= storenote(¬es
[2], bufp
);
419 } /* end elf_kcore_store_hdr() */
421 /*****************************************************************************/
423 * read from the ELF header and then kernel memory
426 read_kcore(struct file
*file
, char __user
*buffer
, size_t buflen
, loff_t
*fpos
)
434 read_lock(&kclist_lock
);
435 size
= get_kcore_size(&nphdr
, &elf_buflen
);
437 if (buflen
== 0 || *fpos
>= size
) {
438 read_unlock(&kclist_lock
);
442 /* trim buflen to not go beyond EOF */
443 if (buflen
> size
- *fpos
)
444 buflen
= size
- *fpos
;
446 /* construct an ELF core header if we'll need some of it */
447 if (*fpos
< elf_buflen
) {
450 tsz
= elf_buflen
- *fpos
;
453 elf_buf
= kzalloc(elf_buflen
, GFP_ATOMIC
);
455 read_unlock(&kclist_lock
);
458 elf_kcore_store_hdr(elf_buf
, nphdr
, elf_buflen
);
459 read_unlock(&kclist_lock
);
460 if (copy_to_user(buffer
, elf_buf
+ *fpos
, tsz
)) {
470 /* leave now if filled buffer already */
474 read_unlock(&kclist_lock
);
477 * Check to see if our file offset matches with any of
478 * the addresses in the elf_phdr on our list.
480 start
= kc_offset_to_vaddr(*fpos
- elf_buflen
);
481 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
485 struct kcore_list
*m
;
487 read_lock(&kclist_lock
);
488 list_for_each_entry(m
, &kclist_head
, list
) {
489 if (start
>= m
->addr
&& start
< (m
->addr
+m
->size
))
492 read_unlock(&kclist_lock
);
494 if (&m
->list
== &kclist_head
) {
495 if (clear_user(buffer
, tsz
))
497 } else if (is_vmalloc_or_module_addr((void *)start
)) {
500 elf_buf
= kzalloc(tsz
, GFP_KERNEL
);
503 vread(elf_buf
, (char *)start
, tsz
);
504 /* we have to zero-fill user buffer even if no read */
505 if (copy_to_user(buffer
, elf_buf
, tsz
)) {
511 if (kern_addr_valid(start
)) {
514 n
= copy_to_user(buffer
, (char *)start
, tsz
);
516 * We cannot distingush between fault on source
517 * and fault on destination. When this happens
518 * we clear too and hope it will trigger the
522 if (clear_user(buffer
+ tsz
- n
,
527 if (clear_user(buffer
, tsz
))
536 tsz
= (buflen
> PAGE_SIZE
? PAGE_SIZE
: buflen
);
543 static int open_kcore(struct inode
*inode
, struct file
*filp
)
545 if (!capable(CAP_SYS_RAWIO
))
547 if (kcore_need_update
)
549 if (i_size_read(inode
) != proc_root_kcore
->size
) {
550 mutex_lock(&inode
->i_mutex
);
551 i_size_write(inode
, proc_root_kcore
->size
);
552 mutex_unlock(&inode
->i_mutex
);
558 static const struct file_operations proc_kcore_operations
= {
561 .llseek
= generic_file_llseek
,
564 #ifdef CONFIG_MEMORY_HOTPLUG
565 /* just remember that we have to update kcore */
566 static int __meminit
kcore_callback(struct notifier_block
*self
,
567 unsigned long action
, void *arg
)
572 write_lock(&kclist_lock
);
573 kcore_need_update
= 1;
574 write_unlock(&kclist_lock
);
581 static struct kcore_list kcore_vmalloc
;
583 #ifdef CONFIG_ARCH_PROC_KCORE_TEXT
584 static struct kcore_list kcore_text
;
586 * If defined, special segment is used for mapping kernel text instead of
587 * direct-map area. We need to create special TEXT section.
589 static void __init
proc_kcore_text_init(void)
591 kclist_add(&kcore_text
, _text
, _end
- _text
, KCORE_TEXT
);
594 static void __init
proc_kcore_text_init(void)
599 #if defined(CONFIG_MODULES) && defined(MODULES_VADDR)
601 * MODULES_VADDR has no intersection with VMALLOC_ADDR.
603 struct kcore_list kcore_modules
;
604 static void __init
add_modules_range(void)
606 kclist_add(&kcore_modules
, (void *)MODULES_VADDR
,
607 MODULES_END
- MODULES_VADDR
, KCORE_VMALLOC
);
610 static void __init
add_modules_range(void)
615 static int __init
proc_kcore_init(void)
617 proc_root_kcore
= proc_create("kcore", S_IRUSR
, NULL
,
618 &proc_kcore_operations
);
619 if (!proc_root_kcore
) {
620 printk(KERN_ERR
"couldn't create /proc/kcore\n");
621 return 0; /* Always returns 0. */
623 /* Store text area if it's special */
624 proc_kcore_text_init();
625 /* Store vmalloc area */
626 kclist_add(&kcore_vmalloc
, (void *)VMALLOC_START
,
627 VMALLOC_END
- VMALLOC_START
, KCORE_VMALLOC
);
629 /* Store direct-map area from physical memory map */
631 hotplug_memory_notifier(kcore_callback
, 0);
635 module_init(proc_kcore_init
);