Merge with Linux 2.4.0-test6-pre9.
[linux-2.6/linux-mips.git] / fs / proc / kcore.c
blobfe944bab6d79d4f6d2b4e68207359e75cc04a693
1 /*
2 * fs/proc/kcore.c kernel ELF/AOUT core dumper
4 * Modelled on fs/exec.c:aout_core_dump()
5 * Jeremy Fitzhardinge <jeremy@sw.oz.au>
6 * ELF version written by David Howells <David.Howells@nexor.co.uk>
7 * Modified and incorporated into 2.3.x by Tigran Aivazian <tigran@sco.com>
8 * Support to dump vmalloc'd areas (ELF only), Tigran Aivazian <tigran@sco.com>
9 */
11 #include <linux/config.h>
12 #include <linux/mm.h>
13 #include <linux/proc_fs.h>
14 #include <linux/user.h>
15 #include <linux/a.out.h>
16 #include <linux/elf.h>
17 #include <linux/elfcore.h>
18 #include <linux/vmalloc.h>
19 #include <asm/uaccess.h>
20 #include <asm/io.h>
23 static int open_kcore(struct inode * inode, struct file * filp)
25 return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
28 static ssize_t read_kcore(struct file *, char *, size_t, loff_t *);
30 struct file_operations proc_kcore_operations = {
31 read: read_kcore,
32 open: open_kcore,
35 #ifdef CONFIG_KCORE_AOUT
36 static ssize_t read_kcore(struct file *file, char *buf, size_t count, loff_t *ppos)
38 unsigned long long p = *ppos, memsize;
39 ssize_t read;
40 ssize_t count1;
41 char * pnt;
42 struct user dump;
43 #if defined (__i386__) || defined (__mc68000__)
44 # define FIRST_MAPPED PAGE_SIZE /* we don't have page 0 mapped on x86.. */
45 #else
46 # define FIRST_MAPPED 0
47 #endif
49 memset(&dump, 0, sizeof(struct user));
50 dump.magic = CMAGIC;
51 dump.u_dsize = (virt_to_phys(high_memory) >> PAGE_SHIFT);
52 #if defined (__i386__)
53 dump.start_code = PAGE_OFFSET;
54 #endif
55 #ifdef __alpha__
56 dump.start_data = PAGE_OFFSET;
57 #endif
59 memsize = virt_to_phys(high_memory);
60 if (p >= memsize)
61 return 0;
62 if (count > memsize - p)
63 count = memsize - p;
64 read = 0;
66 if (p < sizeof(struct user) && count > 0) {
67 count1 = count;
68 if (p + count1 > sizeof(struct user))
69 count1 = sizeof(struct user)-p;
70 pnt = (char *) &dump + p;
71 if (copy_to_user(buf,(void *) pnt, count1))
72 return -EFAULT;
73 buf += count1;
74 p += count1;
75 count -= count1;
76 read += count1;
79 if (count > 0 && p < PAGE_SIZE + FIRST_MAPPED) {
80 count1 = PAGE_SIZE + FIRST_MAPPED - p;
81 if (count1 > count)
82 count1 = count;
83 if (clear_user(buf, count1))
84 return -EFAULT;
85 buf += count1;
86 p += count1;
87 count -= count1;
88 read += count1;
90 if (count > 0) {
91 if (copy_to_user(buf, (void *) (PAGE_OFFSET+p-PAGE_SIZE), count))
92 return -EFAULT;
93 read += count;
95 *ppos += read;
96 return read;
98 #else /* CONFIG_KCORE_AOUT */
100 #define roundup(x, y) ((((x)+((y)-1))/(y))*(y))
102 /* An ELF note in memory */
103 struct memelfnote
105 const char *name;
106 int type;
107 unsigned int datasz;
108 void *data;
111 extern char saved_command_line[];
113 static size_t get_kcore_size(int *num_vma, size_t *elf_buflen)
115 size_t try, size = 0;
116 struct vm_struct *m;
118 *num_vma = 0;
119 if (!vmlist) {
120 *elf_buflen = PAGE_SIZE;
121 return ((size_t)high_memory - PAGE_OFFSET + PAGE_SIZE);
124 for (m=vmlist; m; m=m->next) {
125 if (m->flags & VM_IOREMAP) /* don't dump ioremap'd stuff! (TA) */
126 continue;
127 try = (size_t)m->addr + m->size;
128 if (try > size)
129 size = try;
130 *num_vma = *num_vma + 1;
132 *elf_buflen = sizeof(struct elfhdr) +
133 (*num_vma + 2)*sizeof(struct elf_phdr) +
134 3 * sizeof(struct memelfnote);
135 *elf_buflen = PAGE_ALIGN(*elf_buflen);
136 return (size - PAGE_OFFSET + *elf_buflen);
140 /*****************************************************************************/
142 * determine size of ELF note
144 static int notesize(struct memelfnote *en)
146 int sz;
148 sz = sizeof(struct elf_note);
149 sz += roundup(strlen(en->name), 4);
150 sz += roundup(en->datasz, 4);
152 return sz;
153 } /* end notesize() */
155 /*****************************************************************************/
157 * store a note in the header buffer
159 static char *storenote(struct memelfnote *men, char *bufp)
161 struct elf_note en;
163 #define DUMP_WRITE(addr,nr) do { memcpy(bufp,addr,nr); bufp += nr; } while(0)
165 en.n_namesz = strlen(men->name);
166 en.n_descsz = men->datasz;
167 en.n_type = men->type;
169 DUMP_WRITE(&en, sizeof(en));
170 DUMP_WRITE(men->name, en.n_namesz);
172 /* XXX - cast from long long to long to avoid need for libgcc.a */
173 bufp = (char*) roundup((unsigned long)bufp,4);
174 DUMP_WRITE(men->data, men->datasz);
175 bufp = (char*) roundup((unsigned long)bufp,4);
177 #undef DUMP_WRITE
179 return bufp;
180 } /* end storenote() */
183 * store an ELF coredump header in the supplied buffer
184 * num_vma is the number of elements in vmlist
186 static void elf_kcore_store_hdr(char *bufp, int num_vma, int dataoff)
188 struct elf_prstatus prstatus; /* NT_PRSTATUS */
189 struct elf_prpsinfo prpsinfo; /* NT_PRPSINFO */
190 struct elf_phdr *nhdr, *phdr;
191 struct elfhdr *elf;
192 struct memelfnote notes[3];
193 off_t offset = 0;
194 struct vm_struct *m;
196 /* setup ELF header */
197 elf = (struct elfhdr *) bufp;
198 bufp += sizeof(struct elfhdr);
199 offset += sizeof(struct elfhdr);
200 memcpy(elf->e_ident, ELFMAG, SELFMAG);
201 elf->e_ident[EI_CLASS] = ELF_CLASS;
202 elf->e_ident[EI_DATA] = ELF_DATA;
203 elf->e_ident[EI_VERSION]= EV_CURRENT;
204 memset(elf->e_ident+EI_PAD, 0, EI_NIDENT-EI_PAD);
205 elf->e_type = ET_CORE;
206 elf->e_machine = ELF_ARCH;
207 elf->e_version = EV_CURRENT;
208 elf->e_entry = 0;
209 elf->e_phoff = sizeof(struct elfhdr);
210 elf->e_shoff = 0;
211 elf->e_flags = 0;
212 elf->e_ehsize = sizeof(struct elfhdr);
213 elf->e_phentsize= sizeof(struct elf_phdr);
214 elf->e_phnum = 2 + num_vma;
215 elf->e_shentsize= 0;
216 elf->e_shnum = 0;
217 elf->e_shstrndx = 0;
219 /* setup ELF PT_NOTE program header */
220 nhdr = (struct elf_phdr *) bufp;
221 bufp += sizeof(struct elf_phdr);
222 offset += sizeof(struct elf_phdr);
223 nhdr->p_type = PT_NOTE;
224 nhdr->p_offset = 0;
225 nhdr->p_vaddr = 0;
226 nhdr->p_paddr = 0;
227 nhdr->p_filesz = 0;
228 nhdr->p_memsz = 0;
229 nhdr->p_flags = 0;
230 nhdr->p_align = 0;
232 /* setup ELF PT_LOAD program header for the
233 * virtual range 0xc0000000 -> high_memory */
234 phdr = (struct elf_phdr *) bufp;
235 bufp += sizeof(struct elf_phdr);
236 offset += sizeof(struct elf_phdr);
237 phdr->p_type = PT_LOAD;
238 phdr->p_flags = PF_R|PF_W|PF_X;
239 phdr->p_offset = dataoff;
240 phdr->p_vaddr = PAGE_OFFSET;
241 phdr->p_paddr = __pa(PAGE_OFFSET);
242 phdr->p_filesz = phdr->p_memsz = ((unsigned long)high_memory - PAGE_OFFSET);
243 phdr->p_align = PAGE_SIZE;
245 /* setup ELF PT_LOAD program header for every vmalloc'd area */
246 for (m=vmlist; m; m=m->next) {
247 if (m->flags & VM_IOREMAP) /* don't dump ioremap'd stuff! (TA) */
248 continue;
250 phdr = (struct elf_phdr *) bufp;
251 bufp += sizeof(struct elf_phdr);
252 offset += sizeof(struct elf_phdr);
254 phdr->p_type = PT_LOAD;
255 phdr->p_flags = PF_R|PF_W|PF_X;
256 phdr->p_offset = (size_t)m->addr - PAGE_OFFSET + dataoff;
257 phdr->p_vaddr = (size_t)m->addr;
258 phdr->p_paddr = __pa(m->addr);
259 phdr->p_filesz = phdr->p_memsz = m->size;
260 phdr->p_align = PAGE_SIZE;
264 * Set up the notes in similar form to SVR4 core dumps made
265 * with info from their /proc.
267 nhdr->p_offset = offset;
269 /* set up the process status */
270 notes[0].name = "CORE";
271 notes[0].type = NT_PRSTATUS;
272 notes[0].datasz = sizeof(struct elf_prstatus);
273 notes[0].data = &prstatus;
275 memset(&prstatus, 0, sizeof(struct elf_prstatus));
277 nhdr->p_filesz = notesize(&notes[0]);
278 bufp = storenote(&notes[0], bufp);
280 /* set up the process info */
281 notes[1].name = "CORE";
282 notes[1].type = NT_PRPSINFO;
283 notes[1].datasz = sizeof(struct elf_prpsinfo);
284 notes[1].data = &prpsinfo;
286 memset(&prpsinfo, 0, sizeof(struct elf_prpsinfo));
287 prpsinfo.pr_state = 0;
288 prpsinfo.pr_sname = 'R';
289 prpsinfo.pr_zomb = 0;
291 strcpy(prpsinfo.pr_fname, "vmlinux");
292 strncpy(prpsinfo.pr_psargs, saved_command_line, ELF_PRARGSZ);
294 nhdr->p_filesz = notesize(&notes[1]);
295 bufp = storenote(&notes[1], bufp);
297 /* set up the task structure */
298 notes[2].name = "CORE";
299 notes[2].type = NT_TASKSTRUCT;
300 notes[2].datasz = sizeof(struct task_struct);
301 notes[2].data = current;
303 nhdr->p_filesz = notesize(&notes[2]);
304 bufp = storenote(&notes[2], bufp);
306 } /* end elf_kcore_store_hdr() */
308 /*****************************************************************************/
310 * read from the ELF header and then kernel memory
312 static ssize_t read_kcore(struct file *file, char *buffer, size_t buflen, loff_t *fpos)
314 ssize_t acc = 0;
315 size_t size, tsz;
316 size_t elf_buflen;
317 int num_vma;
319 read_lock(&vmlist_lock);
320 proc_root_kcore->size = size = get_kcore_size(&num_vma, &elf_buflen);
321 if (buflen == 0 || *fpos >= size) {
322 read_unlock(&vmlist_lock);
323 return 0;
326 /* trim buflen to not go beyond EOF */
327 if (buflen > size - *fpos)
328 buflen = size - *fpos;
330 /* construct an ELF core header if we'll need some of it */
331 if (*fpos < elf_buflen) {
332 char * elf_buf;
334 tsz = elf_buflen - *fpos;
335 if (buflen < tsz)
336 tsz = buflen;
337 elf_buf = kmalloc(elf_buflen, GFP_ATOMIC);
338 if (!elf_buf) {
339 read_unlock(&vmlist_lock);
340 return -ENOMEM;
342 memset(elf_buf, 0, elf_buflen);
343 elf_kcore_store_hdr(elf_buf, num_vma, elf_buflen);
344 read_unlock(&vmlist_lock);
345 if (copy_to_user(buffer, elf_buf + *fpos, tsz)) {
346 kfree(elf_buf);
347 return -EFAULT;
349 kfree(elf_buf);
350 buflen -= tsz;
351 *fpos += tsz;
352 buffer += tsz;
353 acc += tsz;
355 /* leave now if filled buffer already */
356 if (buflen == 0)
357 return acc;
358 } else
359 read_unlock(&vmlist_lock);
361 /* where page 0 not mapped, write zeros into buffer */
362 #if defined (__i386__) || defined (__mc68000__)
363 if (*fpos < PAGE_SIZE + elf_buflen) {
364 /* work out how much to clear */
365 tsz = PAGE_SIZE + elf_buflen - *fpos;
366 if (buflen < tsz)
367 tsz = buflen;
369 /* write zeros to buffer */
370 if (clear_user(buffer, tsz))
371 return -EFAULT;
372 buflen -= tsz;
373 *fpos += tsz;
374 buffer += tsz;
375 acc += tsz;
377 /* leave now if filled buffer already */
378 if (buflen == 0)
379 return tsz;
381 #endif
382 /* fill the remainder of the buffer from kernel VM space */
383 if (copy_to_user(buffer, __va(*fpos - elf_buflen), buflen))
384 return -EFAULT;
386 acc += buflen;
387 *fpos += buflen;
388 return acc;
390 #endif /* CONFIG_KCORE_AOUT */