2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/slab.h>
16 #include <linux/highmem.h>
17 #include <linux/bootmem.h>
18 #include <linux/init.h>
19 #include <linux/crash_dump.h>
20 #include <linux/list.h>
21 #include <asm/uaccess.h>
24 /* List representing chunks of contiguous memory areas and their offsets in
27 static LIST_HEAD(vmcore_list
);
29 /* Stores the pointer to the buffer containing kernel elf core headers. */
30 static char *elfcorebuf
;
31 static size_t elfcorebuf_sz
;
33 /* Total size of vmcore file. */
34 static u64 vmcore_size
;
36 static struct proc_dir_entry
*proc_vmcore
= NULL
;
38 /* Reads a page from the oldmem device from given offset. */
39 static ssize_t
read_from_oldmem(char *buf
, size_t count
,
40 u64
*ppos
, int userbuf
)
42 unsigned long pfn
, offset
;
44 ssize_t read
= 0, tmp
;
49 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
50 pfn
= (unsigned long)(*ppos
/ PAGE_SIZE
);
53 if (count
> (PAGE_SIZE
- offset
))
54 nr_bytes
= PAGE_SIZE
- offset
;
58 tmp
= copy_oldmem_page(pfn
, buf
, nr_bytes
, offset
, userbuf
);
72 /* Maps vmcore file offset to respective physical address in memroy. */
73 static u64
map_offset_to_paddr(loff_t offset
, struct list_head
*vc_list
,
74 struct vmcore
**m_ptr
)
79 list_for_each_entry(m
, vc_list
, list
) {
82 end
= m
->offset
+ m
->size
- 1;
83 if (offset
>= start
&& offset
<= end
) {
84 paddr
= m
->paddr
+ offset
- start
;
93 /* Read from the ELF header and then the crash dump. On error, negative value is
94 * returned otherwise number of bytes read are returned.
96 static ssize_t
read_vmcore(struct file
*file
, char __user
*buffer
,
97 size_t buflen
, loff_t
*fpos
)
102 struct vmcore
*curr_m
= NULL
;
104 if (buflen
== 0 || *fpos
>= vmcore_size
)
107 /* trim buflen to not go beyond EOF */
108 if (buflen
> vmcore_size
- *fpos
)
109 buflen
= vmcore_size
- *fpos
;
111 /* Read ELF core header */
112 if (*fpos
< elfcorebuf_sz
) {
113 tsz
= elfcorebuf_sz
- *fpos
;
116 if (copy_to_user(buffer
, elfcorebuf
+ *fpos
, tsz
))
123 /* leave now if filled buffer already */
128 start
= map_offset_to_paddr(*fpos
, &vmcore_list
, &curr_m
);
131 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
134 /* Calculate left bytes in current memory segment. */
135 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
140 tmp
= read_from_oldmem(buffer
, tsz
, &start
, 1);
147 if (start
>= (curr_m
->paddr
+ curr_m
->size
)) {
148 if (curr_m
->list
.next
== &vmcore_list
)
150 curr_m
= list_entry(curr_m
->list
.next
,
151 struct vmcore
, list
);
152 start
= curr_m
->paddr
;
154 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
156 /* Calculate left bytes in current memory segment. */
157 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
164 static const struct file_operations proc_vmcore_operations
= {
166 .llseek
= default_llseek
,
169 static struct vmcore
* __init
get_new_element(void)
171 return kzalloc(sizeof(struct vmcore
), GFP_KERNEL
);
174 static u64 __init
get_vmcore_size_elf64(char *elfptr
)
178 Elf64_Ehdr
*ehdr_ptr
;
179 Elf64_Phdr
*phdr_ptr
;
181 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
182 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
183 size
= sizeof(Elf64_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
));
184 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
185 size
+= phdr_ptr
->p_memsz
;
191 static u64 __init
get_vmcore_size_elf32(char *elfptr
)
195 Elf32_Ehdr
*ehdr_ptr
;
196 Elf32_Phdr
*phdr_ptr
;
198 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
199 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
200 size
= sizeof(Elf32_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
));
201 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
202 size
+= phdr_ptr
->p_memsz
;
208 /* Merges all the PT_NOTE headers into one. */
209 static int __init
merge_note_headers_elf64(char *elfptr
, size_t *elfsz
,
210 struct list_head
*vc_list
)
212 int i
, nr_ptnote
=0, rc
=0;
214 Elf64_Ehdr
*ehdr_ptr
;
215 Elf64_Phdr phdr
, *phdr_ptr
;
216 Elf64_Nhdr
*nhdr_ptr
;
217 u64 phdr_sz
= 0, note_off
;
219 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
220 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
221 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
225 u64 offset
, max_sz
, sz
, real_sz
= 0;
226 if (phdr_ptr
->p_type
!= PT_NOTE
)
229 max_sz
= phdr_ptr
->p_memsz
;
230 offset
= phdr_ptr
->p_offset
;
231 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
234 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
236 kfree(notes_section
);
239 nhdr_ptr
= notes_section
;
240 for (j
= 0; j
< max_sz
; j
+= sz
) {
241 if (nhdr_ptr
->n_namesz
== 0)
243 sz
= sizeof(Elf64_Nhdr
) +
244 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
245 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
247 nhdr_ptr
= (Elf64_Nhdr
*)((char*)nhdr_ptr
+ sz
);
250 /* Add this contiguous chunk of notes section to vmcore list.*/
251 new = get_new_element();
253 kfree(notes_section
);
256 new->paddr
= phdr_ptr
->p_offset
;
258 list_add_tail(&new->list
, vc_list
);
260 kfree(notes_section
);
263 /* Prepare merged PT_NOTE program header. */
264 phdr
.p_type
= PT_NOTE
;
266 note_off
= sizeof(Elf64_Ehdr
) +
267 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf64_Phdr
);
268 phdr
.p_offset
= note_off
;
269 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
270 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
273 /* Add merged PT_NOTE program header*/
274 tmp
= elfptr
+ sizeof(Elf64_Ehdr
);
275 memcpy(tmp
, &phdr
, sizeof(phdr
));
278 /* Remove unwanted PT_NOTE program headers. */
279 i
= (nr_ptnote
- 1) * sizeof(Elf64_Phdr
);
281 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf64_Ehdr
)-sizeof(Elf64_Phdr
)));
283 /* Modify e_phnum to reflect merged headers. */
284 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
289 /* Merges all the PT_NOTE headers into one. */
290 static int __init
merge_note_headers_elf32(char *elfptr
, size_t *elfsz
,
291 struct list_head
*vc_list
)
293 int i
, nr_ptnote
=0, rc
=0;
295 Elf32_Ehdr
*ehdr_ptr
;
296 Elf32_Phdr phdr
, *phdr_ptr
;
297 Elf32_Nhdr
*nhdr_ptr
;
298 u64 phdr_sz
= 0, note_off
;
300 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
301 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
302 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
306 u64 offset
, max_sz
, sz
, real_sz
= 0;
307 if (phdr_ptr
->p_type
!= PT_NOTE
)
310 max_sz
= phdr_ptr
->p_memsz
;
311 offset
= phdr_ptr
->p_offset
;
312 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
315 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
317 kfree(notes_section
);
320 nhdr_ptr
= notes_section
;
321 for (j
= 0; j
< max_sz
; j
+= sz
) {
322 if (nhdr_ptr
->n_namesz
== 0)
324 sz
= sizeof(Elf32_Nhdr
) +
325 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
326 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
328 nhdr_ptr
= (Elf32_Nhdr
*)((char*)nhdr_ptr
+ sz
);
331 /* Add this contiguous chunk of notes section to vmcore list.*/
332 new = get_new_element();
334 kfree(notes_section
);
337 new->paddr
= phdr_ptr
->p_offset
;
339 list_add_tail(&new->list
, vc_list
);
341 kfree(notes_section
);
344 /* Prepare merged PT_NOTE program header. */
345 phdr
.p_type
= PT_NOTE
;
347 note_off
= sizeof(Elf32_Ehdr
) +
348 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf32_Phdr
);
349 phdr
.p_offset
= note_off
;
350 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
351 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
354 /* Add merged PT_NOTE program header*/
355 tmp
= elfptr
+ sizeof(Elf32_Ehdr
);
356 memcpy(tmp
, &phdr
, sizeof(phdr
));
359 /* Remove unwanted PT_NOTE program headers. */
360 i
= (nr_ptnote
- 1) * sizeof(Elf32_Phdr
);
362 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf32_Ehdr
)-sizeof(Elf32_Phdr
)));
364 /* Modify e_phnum to reflect merged headers. */
365 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
370 /* Add memory chunks represented by program headers to vmcore list. Also update
371 * the new offset fields of exported program headers. */
372 static int __init
process_ptload_program_headers_elf64(char *elfptr
,
374 struct list_head
*vc_list
)
377 Elf64_Ehdr
*ehdr_ptr
;
378 Elf64_Phdr
*phdr_ptr
;
382 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
383 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
)); /* PT_NOTE hdr */
385 /* First program header is PT_NOTE header. */
386 vmcore_off
= sizeof(Elf64_Ehdr
) +
387 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
) +
388 phdr_ptr
->p_memsz
; /* Note sections */
390 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
391 if (phdr_ptr
->p_type
!= PT_LOAD
)
394 /* Add this contiguous chunk of memory to vmcore list.*/
395 new = get_new_element();
398 new->paddr
= phdr_ptr
->p_offset
;
399 new->size
= phdr_ptr
->p_memsz
;
400 list_add_tail(&new->list
, vc_list
);
402 /* Update the program header offset. */
403 phdr_ptr
->p_offset
= vmcore_off
;
404 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
409 static int __init
process_ptload_program_headers_elf32(char *elfptr
,
411 struct list_head
*vc_list
)
414 Elf32_Ehdr
*ehdr_ptr
;
415 Elf32_Phdr
*phdr_ptr
;
419 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
420 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
)); /* PT_NOTE hdr */
422 /* First program header is PT_NOTE header. */
423 vmcore_off
= sizeof(Elf32_Ehdr
) +
424 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
) +
425 phdr_ptr
->p_memsz
; /* Note sections */
427 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
428 if (phdr_ptr
->p_type
!= PT_LOAD
)
431 /* Add this contiguous chunk of memory to vmcore list.*/
432 new = get_new_element();
435 new->paddr
= phdr_ptr
->p_offset
;
436 new->size
= phdr_ptr
->p_memsz
;
437 list_add_tail(&new->list
, vc_list
);
439 /* Update the program header offset */
440 phdr_ptr
->p_offset
= vmcore_off
;
441 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
446 /* Sets offset fields of vmcore elements. */
447 static void __init
set_vmcore_list_offsets_elf64(char *elfptr
,
448 struct list_head
*vc_list
)
451 Elf64_Ehdr
*ehdr_ptr
;
454 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
456 /* Skip Elf header and program headers. */
457 vmcore_off
= sizeof(Elf64_Ehdr
) +
458 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
);
460 list_for_each_entry(m
, vc_list
, list
) {
461 m
->offset
= vmcore_off
;
462 vmcore_off
+= m
->size
;
466 /* Sets offset fields of vmcore elements. */
467 static void __init
set_vmcore_list_offsets_elf32(char *elfptr
,
468 struct list_head
*vc_list
)
471 Elf32_Ehdr
*ehdr_ptr
;
474 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
476 /* Skip Elf header and program headers. */
477 vmcore_off
= sizeof(Elf32_Ehdr
) +
478 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
);
480 list_for_each_entry(m
, vc_list
, list
) {
481 m
->offset
= vmcore_off
;
482 vmcore_off
+= m
->size
;
486 static int __init
parse_crash_elf64_headers(void)
492 addr
= elfcorehdr_addr
;
494 /* Read Elf header */
495 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf64_Ehdr
), &addr
, 0);
499 /* Do some basic Verification. */
500 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
501 (ehdr
.e_type
!= ET_CORE
) ||
502 !vmcore_elf64_check_arch(&ehdr
) ||
503 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS64
||
504 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
505 ehdr
.e_version
!= EV_CURRENT
||
506 ehdr
.e_ehsize
!= sizeof(Elf64_Ehdr
) ||
507 ehdr
.e_phentsize
!= sizeof(Elf64_Phdr
) ||
509 printk(KERN_WARNING
"Warning: Core image elf header is not"
514 /* Read in all elf headers. */
515 elfcorebuf_sz
= sizeof(Elf64_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf64_Phdr
);
516 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
519 addr
= elfcorehdr_addr
;
520 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
526 /* Merge all PT_NOTE headers into one. */
527 rc
= merge_note_headers_elf64(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
532 rc
= process_ptload_program_headers_elf64(elfcorebuf
, elfcorebuf_sz
,
538 set_vmcore_list_offsets_elf64(elfcorebuf
, &vmcore_list
);
542 static int __init
parse_crash_elf32_headers(void)
548 addr
= elfcorehdr_addr
;
550 /* Read Elf header */
551 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf32_Ehdr
), &addr
, 0);
555 /* Do some basic Verification. */
556 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
557 (ehdr
.e_type
!= ET_CORE
) ||
558 !elf_check_arch(&ehdr
) ||
559 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS32
||
560 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
561 ehdr
.e_version
!= EV_CURRENT
||
562 ehdr
.e_ehsize
!= sizeof(Elf32_Ehdr
) ||
563 ehdr
.e_phentsize
!= sizeof(Elf32_Phdr
) ||
565 printk(KERN_WARNING
"Warning: Core image elf header is not"
570 /* Read in all elf headers. */
571 elfcorebuf_sz
= sizeof(Elf32_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf32_Phdr
);
572 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
575 addr
= elfcorehdr_addr
;
576 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
582 /* Merge all PT_NOTE headers into one. */
583 rc
= merge_note_headers_elf32(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
588 rc
= process_ptload_program_headers_elf32(elfcorebuf
, elfcorebuf_sz
,
594 set_vmcore_list_offsets_elf32(elfcorebuf
, &vmcore_list
);
598 static int __init
parse_crash_elf_headers(void)
600 unsigned char e_ident
[EI_NIDENT
];
604 addr
= elfcorehdr_addr
;
605 rc
= read_from_oldmem(e_ident
, EI_NIDENT
, &addr
, 0);
608 if (memcmp(e_ident
, ELFMAG
, SELFMAG
) != 0) {
609 printk(KERN_WARNING
"Warning: Core image elf header"
614 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
615 rc
= parse_crash_elf64_headers();
619 /* Determine vmcore size. */
620 vmcore_size
= get_vmcore_size_elf64(elfcorebuf
);
621 } else if (e_ident
[EI_CLASS
] == ELFCLASS32
) {
622 rc
= parse_crash_elf32_headers();
626 /* Determine vmcore size. */
627 vmcore_size
= get_vmcore_size_elf32(elfcorebuf
);
629 printk(KERN_WARNING
"Warning: Core image elf header is not"
636 /* Init function for vmcore module. */
637 static int __init
vmcore_init(void)
641 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
642 if (!(is_vmcore_usable()))
644 rc
= parse_crash_elf_headers();
646 printk(KERN_WARNING
"Kdump: vmcore not initialized\n");
650 proc_vmcore
= proc_create("vmcore", S_IRUSR
, NULL
, &proc_vmcore_operations
);
652 proc_vmcore
->size
= vmcore_size
;
655 module_init(vmcore_init
)