2 * fs/proc/vmcore.c Interface for accessing the crash
3 * dump from the system's previous life.
4 * Heavily borrowed from fs/proc/kcore.c
5 * Created by: Hariprasad Nellitheertha (hari@in.ibm.com)
6 * Copyright (C) IBM Corporation, 2004. All rights reserved
11 #include <linux/proc_fs.h>
12 #include <linux/user.h>
13 #include <linux/elf.h>
14 #include <linux/elfcore.h>
15 #include <linux/highmem.h>
16 #include <linux/bootmem.h>
17 #include <linux/init.h>
18 #include <linux/crash_dump.h>
19 #include <linux/list.h>
20 #include <asm/uaccess.h>
23 /* List representing chunks of contiguous memory areas and their offsets in
26 static LIST_HEAD(vmcore_list
);
28 /* Stores the pointer to the buffer containing kernel elf core headers. */
29 static char *elfcorebuf
;
30 static size_t elfcorebuf_sz
;
32 /* Total size of vmcore file. */
33 static u64 vmcore_size
;
35 static struct proc_dir_entry
*proc_vmcore
= NULL
;
37 /* Reads a page from the oldmem device from given offset. */
38 static ssize_t
read_from_oldmem(char *buf
, size_t count
,
39 u64
*ppos
, int userbuf
)
41 unsigned long pfn
, offset
;
43 ssize_t read
= 0, tmp
;
48 offset
= (unsigned long)(*ppos
% PAGE_SIZE
);
49 pfn
= (unsigned long)(*ppos
/ PAGE_SIZE
);
52 if (count
> (PAGE_SIZE
- offset
))
53 nr_bytes
= PAGE_SIZE
- offset
;
57 tmp
= copy_oldmem_page(pfn
, buf
, nr_bytes
, offset
, userbuf
);
71 /* Maps vmcore file offset to respective physical address in memroy. */
72 static u64
map_offset_to_paddr(loff_t offset
, struct list_head
*vc_list
,
73 struct vmcore
**m_ptr
)
78 list_for_each_entry(m
, vc_list
, list
) {
81 end
= m
->offset
+ m
->size
- 1;
82 if (offset
>= start
&& offset
<= end
) {
83 paddr
= m
->paddr
+ offset
- start
;
92 /* Read from the ELF header and then the crash dump. On error, negative value is
93 * returned otherwise number of bytes read are returned.
95 static ssize_t
read_vmcore(struct file
*file
, char __user
*buffer
,
96 size_t buflen
, loff_t
*fpos
)
101 struct vmcore
*curr_m
= NULL
;
103 if (buflen
== 0 || *fpos
>= vmcore_size
)
106 /* trim buflen to not go beyond EOF */
107 if (buflen
> vmcore_size
- *fpos
)
108 buflen
= vmcore_size
- *fpos
;
110 /* Read ELF core header */
111 if (*fpos
< elfcorebuf_sz
) {
112 tsz
= elfcorebuf_sz
- *fpos
;
115 if (copy_to_user(buffer
, elfcorebuf
+ *fpos
, tsz
))
122 /* leave now if filled buffer already */
127 start
= map_offset_to_paddr(*fpos
, &vmcore_list
, &curr_m
);
130 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
133 /* Calculate left bytes in current memory segment. */
134 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
139 tmp
= read_from_oldmem(buffer
, tsz
, &start
, 1);
146 if (start
>= (curr_m
->paddr
+ curr_m
->size
)) {
147 if (curr_m
->list
.next
== &vmcore_list
)
149 curr_m
= list_entry(curr_m
->list
.next
,
150 struct vmcore
, list
);
151 start
= curr_m
->paddr
;
153 if ((tsz
= (PAGE_SIZE
- (start
& ~PAGE_MASK
))) > buflen
)
155 /* Calculate left bytes in current memory segment. */
156 nr_bytes
= (curr_m
->size
- (start
- curr_m
->paddr
));
163 static const struct file_operations proc_vmcore_operations
= {
167 static struct vmcore
* __init
get_new_element(void)
169 return kzalloc(sizeof(struct vmcore
), GFP_KERNEL
);
172 static u64 __init
get_vmcore_size_elf64(char *elfptr
)
176 Elf64_Ehdr
*ehdr_ptr
;
177 Elf64_Phdr
*phdr_ptr
;
179 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
180 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
181 size
= sizeof(Elf64_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
));
182 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
183 size
+= phdr_ptr
->p_memsz
;
189 static u64 __init
get_vmcore_size_elf32(char *elfptr
)
193 Elf32_Ehdr
*ehdr_ptr
;
194 Elf32_Phdr
*phdr_ptr
;
196 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
197 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
198 size
= sizeof(Elf32_Ehdr
) + ((ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
));
199 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++) {
200 size
+= phdr_ptr
->p_memsz
;
206 /* Merges all the PT_NOTE headers into one. */
207 static int __init
merge_note_headers_elf64(char *elfptr
, size_t *elfsz
,
208 struct list_head
*vc_list
)
210 int i
, nr_ptnote
=0, rc
=0;
212 Elf64_Ehdr
*ehdr_ptr
;
213 Elf64_Phdr phdr
, *phdr_ptr
;
214 Elf64_Nhdr
*nhdr_ptr
;
215 u64 phdr_sz
= 0, note_off
;
217 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
218 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
));
219 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
223 u64 offset
, max_sz
, sz
, real_sz
= 0;
224 if (phdr_ptr
->p_type
!= PT_NOTE
)
227 max_sz
= phdr_ptr
->p_memsz
;
228 offset
= phdr_ptr
->p_offset
;
229 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
232 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
234 kfree(notes_section
);
237 nhdr_ptr
= notes_section
;
238 for (j
= 0; j
< max_sz
; j
+= sz
) {
239 if (nhdr_ptr
->n_namesz
== 0)
241 sz
= sizeof(Elf64_Nhdr
) +
242 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
243 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
245 nhdr_ptr
= (Elf64_Nhdr
*)((char*)nhdr_ptr
+ sz
);
248 /* Add this contiguous chunk of notes section to vmcore list.*/
249 new = get_new_element();
251 kfree(notes_section
);
254 new->paddr
= phdr_ptr
->p_offset
;
256 list_add_tail(&new->list
, vc_list
);
258 kfree(notes_section
);
261 /* Prepare merged PT_NOTE program header. */
262 phdr
.p_type
= PT_NOTE
;
264 note_off
= sizeof(Elf64_Ehdr
) +
265 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf64_Phdr
);
266 phdr
.p_offset
= note_off
;
267 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
268 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
271 /* Add merged PT_NOTE program header*/
272 tmp
= elfptr
+ sizeof(Elf64_Ehdr
);
273 memcpy(tmp
, &phdr
, sizeof(phdr
));
276 /* Remove unwanted PT_NOTE program headers. */
277 i
= (nr_ptnote
- 1) * sizeof(Elf64_Phdr
);
279 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf64_Ehdr
)-sizeof(Elf64_Phdr
)));
281 /* Modify e_phnum to reflect merged headers. */
282 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
287 /* Merges all the PT_NOTE headers into one. */
288 static int __init
merge_note_headers_elf32(char *elfptr
, size_t *elfsz
,
289 struct list_head
*vc_list
)
291 int i
, nr_ptnote
=0, rc
=0;
293 Elf32_Ehdr
*ehdr_ptr
;
294 Elf32_Phdr phdr
, *phdr_ptr
;
295 Elf32_Nhdr
*nhdr_ptr
;
296 u64 phdr_sz
= 0, note_off
;
298 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
299 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
));
300 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
304 u64 offset
, max_sz
, sz
, real_sz
= 0;
305 if (phdr_ptr
->p_type
!= PT_NOTE
)
308 max_sz
= phdr_ptr
->p_memsz
;
309 offset
= phdr_ptr
->p_offset
;
310 notes_section
= kmalloc(max_sz
, GFP_KERNEL
);
313 rc
= read_from_oldmem(notes_section
, max_sz
, &offset
, 0);
315 kfree(notes_section
);
318 nhdr_ptr
= notes_section
;
319 for (j
= 0; j
< max_sz
; j
+= sz
) {
320 if (nhdr_ptr
->n_namesz
== 0)
322 sz
= sizeof(Elf32_Nhdr
) +
323 ((nhdr_ptr
->n_namesz
+ 3) & ~3) +
324 ((nhdr_ptr
->n_descsz
+ 3) & ~3);
326 nhdr_ptr
= (Elf32_Nhdr
*)((char*)nhdr_ptr
+ sz
);
329 /* Add this contiguous chunk of notes section to vmcore list.*/
330 new = get_new_element();
332 kfree(notes_section
);
335 new->paddr
= phdr_ptr
->p_offset
;
337 list_add_tail(&new->list
, vc_list
);
339 kfree(notes_section
);
342 /* Prepare merged PT_NOTE program header. */
343 phdr
.p_type
= PT_NOTE
;
345 note_off
= sizeof(Elf32_Ehdr
) +
346 (ehdr_ptr
->e_phnum
- nr_ptnote
+1) * sizeof(Elf32_Phdr
);
347 phdr
.p_offset
= note_off
;
348 phdr
.p_vaddr
= phdr
.p_paddr
= 0;
349 phdr
.p_filesz
= phdr
.p_memsz
= phdr_sz
;
352 /* Add merged PT_NOTE program header*/
353 tmp
= elfptr
+ sizeof(Elf32_Ehdr
);
354 memcpy(tmp
, &phdr
, sizeof(phdr
));
357 /* Remove unwanted PT_NOTE program headers. */
358 i
= (nr_ptnote
- 1) * sizeof(Elf32_Phdr
);
360 memmove(tmp
, tmp
+i
, ((*elfsz
)-sizeof(Elf32_Ehdr
)-sizeof(Elf32_Phdr
)));
362 /* Modify e_phnum to reflect merged headers. */
363 ehdr_ptr
->e_phnum
= ehdr_ptr
->e_phnum
- nr_ptnote
+ 1;
368 /* Add memory chunks represented by program headers to vmcore list. Also update
369 * the new offset fields of exported program headers. */
370 static int __init
process_ptload_program_headers_elf64(char *elfptr
,
372 struct list_head
*vc_list
)
375 Elf64_Ehdr
*ehdr_ptr
;
376 Elf64_Phdr
*phdr_ptr
;
380 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
381 phdr_ptr
= (Elf64_Phdr
*)(elfptr
+ sizeof(Elf64_Ehdr
)); /* PT_NOTE hdr */
383 /* First program header is PT_NOTE header. */
384 vmcore_off
= sizeof(Elf64_Ehdr
) +
385 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
) +
386 phdr_ptr
->p_memsz
; /* Note sections */
388 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
389 if (phdr_ptr
->p_type
!= PT_LOAD
)
392 /* Add this contiguous chunk of memory to vmcore list.*/
393 new = get_new_element();
396 new->paddr
= phdr_ptr
->p_offset
;
397 new->size
= phdr_ptr
->p_memsz
;
398 list_add_tail(&new->list
, vc_list
);
400 /* Update the program header offset. */
401 phdr_ptr
->p_offset
= vmcore_off
;
402 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
407 static int __init
process_ptload_program_headers_elf32(char *elfptr
,
409 struct list_head
*vc_list
)
412 Elf32_Ehdr
*ehdr_ptr
;
413 Elf32_Phdr
*phdr_ptr
;
417 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
418 phdr_ptr
= (Elf32_Phdr
*)(elfptr
+ sizeof(Elf32_Ehdr
)); /* PT_NOTE hdr */
420 /* First program header is PT_NOTE header. */
421 vmcore_off
= sizeof(Elf32_Ehdr
) +
422 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
) +
423 phdr_ptr
->p_memsz
; /* Note sections */
425 for (i
= 0; i
< ehdr_ptr
->e_phnum
; i
++, phdr_ptr
++) {
426 if (phdr_ptr
->p_type
!= PT_LOAD
)
429 /* Add this contiguous chunk of memory to vmcore list.*/
430 new = get_new_element();
433 new->paddr
= phdr_ptr
->p_offset
;
434 new->size
= phdr_ptr
->p_memsz
;
435 list_add_tail(&new->list
, vc_list
);
437 /* Update the program header offset */
438 phdr_ptr
->p_offset
= vmcore_off
;
439 vmcore_off
= vmcore_off
+ phdr_ptr
->p_memsz
;
444 /* Sets offset fields of vmcore elements. */
445 static void __init
set_vmcore_list_offsets_elf64(char *elfptr
,
446 struct list_head
*vc_list
)
449 Elf64_Ehdr
*ehdr_ptr
;
452 ehdr_ptr
= (Elf64_Ehdr
*)elfptr
;
454 /* Skip Elf header and program headers. */
455 vmcore_off
= sizeof(Elf64_Ehdr
) +
456 (ehdr_ptr
->e_phnum
) * sizeof(Elf64_Phdr
);
458 list_for_each_entry(m
, vc_list
, list
) {
459 m
->offset
= vmcore_off
;
460 vmcore_off
+= m
->size
;
464 /* Sets offset fields of vmcore elements. */
465 static void __init
set_vmcore_list_offsets_elf32(char *elfptr
,
466 struct list_head
*vc_list
)
469 Elf32_Ehdr
*ehdr_ptr
;
472 ehdr_ptr
= (Elf32_Ehdr
*)elfptr
;
474 /* Skip Elf header and program headers. */
475 vmcore_off
= sizeof(Elf32_Ehdr
) +
476 (ehdr_ptr
->e_phnum
) * sizeof(Elf32_Phdr
);
478 list_for_each_entry(m
, vc_list
, list
) {
479 m
->offset
= vmcore_off
;
480 vmcore_off
+= m
->size
;
484 static int __init
parse_crash_elf64_headers(void)
490 addr
= elfcorehdr_addr
;
492 /* Read Elf header */
493 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf64_Ehdr
), &addr
, 0);
497 /* Do some basic Verification. */
498 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
499 (ehdr
.e_type
!= ET_CORE
) ||
500 !vmcore_elf_check_arch(&ehdr
) ||
501 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS64
||
502 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
503 ehdr
.e_version
!= EV_CURRENT
||
504 ehdr
.e_ehsize
!= sizeof(Elf64_Ehdr
) ||
505 ehdr
.e_phentsize
!= sizeof(Elf64_Phdr
) ||
507 printk(KERN_WARNING
"Warning: Core image elf header is not"
512 /* Read in all elf headers. */
513 elfcorebuf_sz
= sizeof(Elf64_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf64_Phdr
);
514 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
517 addr
= elfcorehdr_addr
;
518 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
524 /* Merge all PT_NOTE headers into one. */
525 rc
= merge_note_headers_elf64(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
530 rc
= process_ptload_program_headers_elf64(elfcorebuf
, elfcorebuf_sz
,
536 set_vmcore_list_offsets_elf64(elfcorebuf
, &vmcore_list
);
540 static int __init
parse_crash_elf32_headers(void)
546 addr
= elfcorehdr_addr
;
548 /* Read Elf header */
549 rc
= read_from_oldmem((char*)&ehdr
, sizeof(Elf32_Ehdr
), &addr
, 0);
553 /* Do some basic Verification. */
554 if (memcmp(ehdr
.e_ident
, ELFMAG
, SELFMAG
) != 0 ||
555 (ehdr
.e_type
!= ET_CORE
) ||
556 !elf_check_arch(&ehdr
) ||
557 ehdr
.e_ident
[EI_CLASS
] != ELFCLASS32
||
558 ehdr
.e_ident
[EI_VERSION
] != EV_CURRENT
||
559 ehdr
.e_version
!= EV_CURRENT
||
560 ehdr
.e_ehsize
!= sizeof(Elf32_Ehdr
) ||
561 ehdr
.e_phentsize
!= sizeof(Elf32_Phdr
) ||
563 printk(KERN_WARNING
"Warning: Core image elf header is not"
568 /* Read in all elf headers. */
569 elfcorebuf_sz
= sizeof(Elf32_Ehdr
) + ehdr
.e_phnum
* sizeof(Elf32_Phdr
);
570 elfcorebuf
= kmalloc(elfcorebuf_sz
, GFP_KERNEL
);
573 addr
= elfcorehdr_addr
;
574 rc
= read_from_oldmem(elfcorebuf
, elfcorebuf_sz
, &addr
, 0);
580 /* Merge all PT_NOTE headers into one. */
581 rc
= merge_note_headers_elf32(elfcorebuf
, &elfcorebuf_sz
, &vmcore_list
);
586 rc
= process_ptload_program_headers_elf32(elfcorebuf
, elfcorebuf_sz
,
592 set_vmcore_list_offsets_elf32(elfcorebuf
, &vmcore_list
);
596 static int __init
parse_crash_elf_headers(void)
598 unsigned char e_ident
[EI_NIDENT
];
602 addr
= elfcorehdr_addr
;
603 rc
= read_from_oldmem(e_ident
, EI_NIDENT
, &addr
, 0);
606 if (memcmp(e_ident
, ELFMAG
, SELFMAG
) != 0) {
607 printk(KERN_WARNING
"Warning: Core image elf header"
612 if (e_ident
[EI_CLASS
] == ELFCLASS64
) {
613 rc
= parse_crash_elf64_headers();
617 /* Determine vmcore size. */
618 vmcore_size
= get_vmcore_size_elf64(elfcorebuf
);
619 } else if (e_ident
[EI_CLASS
] == ELFCLASS32
) {
620 rc
= parse_crash_elf32_headers();
624 /* Determine vmcore size. */
625 vmcore_size
= get_vmcore_size_elf32(elfcorebuf
);
627 printk(KERN_WARNING
"Warning: Core image elf header is not"
634 /* Init function for vmcore module. */
635 static int __init
vmcore_init(void)
639 /* If elfcorehdr= has been passed in cmdline, then capture the dump.*/
640 if (!(is_vmcore_usable()))
642 rc
= parse_crash_elf_headers();
644 printk(KERN_WARNING
"Kdump: vmcore not initialized\n");
648 proc_vmcore
= proc_create("vmcore", S_IRUSR
, NULL
, &proc_vmcore_operations
);
650 proc_vmcore
->size
= vmcore_size
;
653 module_init(vmcore_init
)