2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/highmem.h>
16 #include <linux/bootmem.h>
17 #include <linux/init.h>
18 #include <linux/crash_dump.h>
19 #include <linux/list.h>
20 #include <asm/uaccess.h>
23 /* List representing chunks of contiguous memory areas and their offsets in
26 static LIST_HEAD(vmcore_list
);
28 /* Stores the pointer to the buffer containing kernel elf core headers. */
29 static char *elfcorebuf
;
30 static size_t elfcorebuf_sz
;
32 /* Total size of vmcore file. */
33 static u64 vmcore_size
;
35 static struct proc_dir_entry
*proc_vmcore
= NULL
;
37 /* Reads a page from the oldmem device from given offset. */
38 static ssize_t
read_from_oldmem(char *buf
, size_t count
,
39 u64
*ppos
, int userbuf
)
41 unsigned long pfn
, offset
;
43 ssize_t read
= 0, tmp
;
48 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
49 pfn
= (unsigned long)(*ppos
/ PAGE_SIZE
);
52 if (count
> (PAGE_SIZE
- offset
))
53 nr_bytes
= PAGE_SIZE
- offset
;
57 tmp
= copy_oldmem_page(pfn
, buf
, nr_bytes
, offset
, userbuf
);
71 /* Maps vmcore file offset to respective physical address in memroy. */
72 static u64
map_offset_to_paddr(loff_t offset
, struct list_head
*vc_list
,
73 struct vmcore
**m_ptr
)
78 list_for_each_entry(m
, vc_list
, list
) {
81 end
= m
->offset
+ m
->size
- 1;
82 if (offset
>= start
&& offset
<= end
) {
83 paddr
= m
->paddr
+ offset
- start
;
92 /* Read from the ELF header and then the crash dump. On error, negative value is
93 * returned otherwise number of bytes read are returned.
95 static ssize_t
read_vmcore(struct file
*file
, char __user
*buffer
,
96 size_t buflen
, loff_t
*fpos
)
101 struct vmcore
*curr_m
= NULL
;
103 if (buflen
== 0 || *fpos
>= vmcore_size
)
106 /* trim buflen to not go beyond EOF */
107 if (buflen
> vmcore_size
- *fpos
)
108 buflen
= vmcore_size
- *fpos
;
110 /* Read ELF core header */
111 if (*fpos
< elfcorebuf_sz
) {
112 tsz
= elfcorebuf_sz
- *fpos
;
115 if (copy_to_user(buffer
, elfcorebuf
+ *fpos
, tsz
))
122 /* leave now if filled buffer already */
127 start
= map_offset_to_paddr(*fpos
, &vmcore_list
, &curr_m
);
130 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
133 /* Calculate left bytes in current memory segment. */
134 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
139 tmp
= read_from_oldmem(buffer
, tsz
, &start
, 1);
146 if (start
>= (curr_m
->paddr
+ curr_m
->size
)) {
147 if (curr_m
->list
.next
== &vmcore_list
)
149 curr_m
= list_entry(curr_m
->list
.next
,
150 struct vmcore
, list
);
151 start
= curr_m
->paddr
;
153 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
155 /* Calculate left bytes in current memory segment. */
156 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
163 static const struct file_operations proc_vmcore_operations
= {
167 static struct vmcore
* __init
get_new_element(void)
171 p
= kmalloc(sizeof(*p
), GFP_KERNEL
);
173 memset(p
, 0, sizeof(*p
));
177 static u64 __init
get_vmcore_size_elf64(char *elfptr
)
181 Elf64_Ehdr
*ehdr_ptr
;
182 Elf64_Phdr
*phdr_ptr
;
184 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
185 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
186 size
= sizeof(Elf64_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
));
187 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
188 size
+= phdr_ptr
->p_memsz
;
194 static u64 __init
get_vmcore_size_elf32(char *elfptr
)
198 Elf32_Ehdr
*ehdr_ptr
;
199 Elf32_Phdr
*phdr_ptr
;
201 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
202 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
203 size
= sizeof(Elf32_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
));
204 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
205 size
+= phdr_ptr
->p_memsz
;
211 /* Merges all the PT_NOTE headers into one. */
212 static int __init
merge_note_headers_elf64(char *elfptr
, size_t *elfsz
,
213 struct list_head
*vc_list
)
215 int i
, nr_ptnote
=0, rc
=0;
217 Elf64_Ehdr
*ehdr_ptr
;
218 Elf64_Phdr phdr
, *phdr_ptr
;
219 Elf64_Nhdr
*nhdr_ptr
;
220 u64 phdr_sz
= 0, note_off
;
222 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
223 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
224 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
228 u64 offset
, max_sz
, sz
, real_sz
= 0;
229 if (phdr_ptr
->p_type
!= PT_NOTE
)
232 max_sz
= phdr_ptr
->p_memsz
;
233 offset
= phdr_ptr
->p_offset
;
234 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
237 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
239 kfree(notes_section
);
242 nhdr_ptr
= notes_section
;
243 for (j
= 0; j
< max_sz
; j
+= sz
) {
244 if (nhdr_ptr
->n_namesz
== 0)
246 sz
= sizeof(Elf64_Nhdr
) +
247 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
248 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
250 nhdr_ptr
= (Elf64_Nhdr
*)((char*)nhdr_ptr
+ sz
);
253 /* Add this contiguous chunk of notes section to vmcore list.*/
254 new = get_new_element();
256 kfree(notes_section
);
259 new->paddr
= phdr_ptr
->p_offset
;
261 list_add_tail(&new->list
, vc_list
);
263 kfree(notes_section
);
266 /* Prepare merged PT_NOTE program header. */
267 phdr
.p_type
= PT_NOTE
;
269 note_off
= sizeof(Elf64_Ehdr
) +
270 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf64_Phdr
);
271 phdr
.p_offset
= note_off
;
272 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
273 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
276 /* Add merged PT_NOTE program header*/
277 tmp
= elfptr
+ sizeof(Elf64_Ehdr
);
278 memcpy(tmp
, &phdr
, sizeof(phdr
));
281 /* Remove unwanted PT_NOTE program headers. */
282 i
= (nr_ptnote
- 1) * sizeof(Elf64_Phdr
);
284 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf64_Ehdr
)-sizeof(Elf64_Phdr
)));
286 /* Modify e_phnum to reflect merged headers. */
287 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
292 /* Merges all the PT_NOTE headers into one. */
293 static int __init
merge_note_headers_elf32(char *elfptr
, size_t *elfsz
,
294 struct list_head
*vc_list
)
296 int i
, nr_ptnote
=0, rc
=0;
298 Elf32_Ehdr
*ehdr_ptr
;
299 Elf32_Phdr phdr
, *phdr_ptr
;
300 Elf32_Nhdr
*nhdr_ptr
;
301 u64 phdr_sz
= 0, note_off
;
303 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
304 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
305 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
309 u64 offset
, max_sz
, sz
, real_sz
= 0;
310 if (phdr_ptr
->p_type
!= PT_NOTE
)
313 max_sz
= phdr_ptr
->p_memsz
;
314 offset
= phdr_ptr
->p_offset
;
315 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
318 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
320 kfree(notes_section
);
323 nhdr_ptr
= notes_section
;
324 for (j
= 0; j
< max_sz
; j
+= sz
) {
325 if (nhdr_ptr
->n_namesz
== 0)
327 sz
= sizeof(Elf32_Nhdr
) +
328 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
329 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
331 nhdr_ptr
= (Elf32_Nhdr
*)((char*)nhdr_ptr
+ sz
);
334 /* Add this contiguous chunk of notes section to vmcore list.*/
335 new = get_new_element();
337 kfree(notes_section
);
340 new->paddr
= phdr_ptr
->p_offset
;
342 list_add_tail(&new->list
, vc_list
);
344 kfree(notes_section
);
347 /* Prepare merged PT_NOTE program header. */
348 phdr
.p_type
= PT_NOTE
;
350 note_off
= sizeof(Elf32_Ehdr
) +
351 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf32_Phdr
);
352 phdr
.p_offset
= note_off
;
353 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
354 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
357 /* Add merged PT_NOTE program header*/
358 tmp
= elfptr
+ sizeof(Elf32_Ehdr
);
359 memcpy(tmp
, &phdr
, sizeof(phdr
));
362 /* Remove unwanted PT_NOTE program headers. */
363 i
= (nr_ptnote
- 1) * sizeof(Elf32_Phdr
);
365 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf32_Ehdr
)-sizeof(Elf32_Phdr
)));
367 /* Modify e_phnum to reflect merged headers. */
368 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
373 /* Add memory chunks represented by program headers to vmcore list. Also update
374 * the new offset fields of exported program headers. */
375 static int __init
process_ptload_program_headers_elf64(char *elfptr
,
377 struct list_head
*vc_list
)
380 Elf64_Ehdr
*ehdr_ptr
;
381 Elf64_Phdr
*phdr_ptr
;
385 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
386 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
)); /* PT_NOTE hdr */
388 /* First program header is PT_NOTE header. */
389 vmcore_off
= sizeof(Elf64_Ehdr
) +
390 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
) +
391 phdr_ptr
->p_memsz
; /* Note sections */
393 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
394 if (phdr_ptr
->p_type
!= PT_LOAD
)
397 /* Add this contiguous chunk of memory to vmcore list.*/
398 new = get_new_element();
401 new->paddr
= phdr_ptr
->p_offset
;
402 new->size
= phdr_ptr
->p_memsz
;
403 list_add_tail(&new->list
, vc_list
);
405 /* Update the program header offset. */
406 phdr_ptr
->p_offset
= vmcore_off
;
407 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
412 static int __init
process_ptload_program_headers_elf32(char *elfptr
,
414 struct list_head
*vc_list
)
417 Elf32_Ehdr
*ehdr_ptr
;
418 Elf32_Phdr
*phdr_ptr
;
422 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
423 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
)); /* PT_NOTE hdr */
425 /* First program header is PT_NOTE header. */
426 vmcore_off
= sizeof(Elf32_Ehdr
) +
427 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
) +
428 phdr_ptr
->p_memsz
; /* Note sections */
430 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
431 if (phdr_ptr
->p_type
!= PT_LOAD
)
434 /* Add this contiguous chunk of memory to vmcore list.*/
435 new = get_new_element();
438 new->paddr
= phdr_ptr
->p_offset
;
439 new->size
= phdr_ptr
->p_memsz
;
440 list_add_tail(&new->list
, vc_list
);
442 /* Update the program header offset */
443 phdr_ptr
->p_offset
= vmcore_off
;
444 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
449 /* Sets offset fields of vmcore elements. */
450 static void __init
set_vmcore_list_offsets_elf64(char *elfptr
,
451 struct list_head
*vc_list
)
454 Elf64_Ehdr
*ehdr_ptr
;
457 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
459 /* Skip Elf header and program headers. */
460 vmcore_off
= sizeof(Elf64_Ehdr
) +
461 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
);
463 list_for_each_entry(m
, vc_list
, list
) {
464 m
->offset
= vmcore_off
;
465 vmcore_off
+= m
->size
;
469 /* Sets offset fields of vmcore elements. */
470 static void __init
set_vmcore_list_offsets_elf32(char *elfptr
,
471 struct list_head
*vc_list
)
474 Elf32_Ehdr
*ehdr_ptr
;
477 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
479 /* Skip Elf header and program headers. */
480 vmcore_off
= sizeof(Elf32_Ehdr
) +
481 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
);
483 list_for_each_entry(m
, vc_list
, list
) {
484 m
->offset
= vmcore_off
;
485 vmcore_off
+= m
->size
;
489 static int __init
parse_crash_elf64_headers(void)
495 addr
= elfcorehdr_addr
;
497 /* Read Elf header */
498 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf64_Ehdr
), &addr
, 0);
502 /* Do some basic Verification. */
503 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
504 (ehdr
.e_type
!= ET_CORE
) ||
505 !vmcore_elf_check_arch(&ehdr
) ||
506 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS64
||
507 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
508 ehdr
.e_version
!= EV_CURRENT
||
509 ehdr
.e_ehsize
!= sizeof(Elf64_Ehdr
) ||
510 ehdr
.e_phentsize
!= sizeof(Elf64_Phdr
) ||
512 printk(KERN_WARNING
"Warning: Core image elf header is not"
517 /* Read in all elf headers. */
518 elfcorebuf_sz
= sizeof(Elf64_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf64_Phdr
);
519 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
522 addr
= elfcorehdr_addr
;
523 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
529 /* Merge all PT_NOTE headers into one. */
530 rc
= merge_note_headers_elf64(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
535 rc
= process_ptload_program_headers_elf64(elfcorebuf
, elfcorebuf_sz
,
541 set_vmcore_list_offsets_elf64(elfcorebuf
, &vmcore_list
);
545 static int __init
parse_crash_elf32_headers(void)
551 addr
= elfcorehdr_addr
;
553 /* Read Elf header */
554 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf32_Ehdr
), &addr
, 0);
558 /* Do some basic Verification. */
559 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
560 (ehdr
.e_type
!= ET_CORE
) ||
561 !elf_check_arch(&ehdr
) ||
562 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS32
||
563 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
564 ehdr
.e_version
!= EV_CURRENT
||
565 ehdr
.e_ehsize
!= sizeof(Elf32_Ehdr
) ||
566 ehdr
.e_phentsize
!= sizeof(Elf32_Phdr
) ||
568 printk(KERN_WARNING
"Warning: Core image elf header is not"
573 /* Read in all elf headers. */
574 elfcorebuf_sz
= sizeof(Elf32_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf32_Phdr
);
575 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
578 addr
= elfcorehdr_addr
;
579 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
585 /* Merge all PT_NOTE headers into one. */
586 rc
= merge_note_headers_elf32(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
591 rc
= process_ptload_program_headers_elf32(elfcorebuf
, elfcorebuf_sz
,
597 set_vmcore_list_offsets_elf32(elfcorebuf
, &vmcore_list
);
601 static int __init
parse_crash_elf_headers(void)
603 unsigned char e_ident
[EI_NIDENT
];
607 addr
= elfcorehdr_addr
;
608 rc
= read_from_oldmem(e_ident
, EI_NIDENT
, &addr
, 0);
611 if (memcmp(e_ident
, ELFMAG
, SELFMAG
) != 0) {
612 printk(KERN_WARNING
"Warning: Core image elf header"
617 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
618 rc
= parse_crash_elf64_headers();
622 /* Determine vmcore size. */
623 vmcore_size
= get_vmcore_size_elf64(elfcorebuf
);
624 } else if (e_ident
[EI_CLASS
] == ELFCLASS32
) {
625 rc
= parse_crash_elf32_headers();
629 /* Determine vmcore size. */
630 vmcore_size
= get_vmcore_size_elf32(elfcorebuf
);
632 printk(KERN_WARNING
"Warning: Core image elf header is not"
639 /* Init function for vmcore module. */
640 static int __init
vmcore_init(void)
644 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
645 if (!(is_vmcore_usable()))
647 rc
= parse_crash_elf_headers();
649 printk(KERN_WARNING
"Kdump: vmcore not initialized\n");
653 proc_vmcore
= proc_create("vmcore", S_IRUSR
, NULL
, &proc_vmcore_operations
);
655 proc_vmcore
->size
= vmcore_size
;
658 module_init(vmcore_init
)