commit
[crak.git] / src / dump.c
blob7a8c75d1f43fd3b6a1ede0285c67a6657008bd75
1 #include <linux/smp.h>
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/fs.h>
5 #include <linux/cdev.h>
6 #include <linux/binfmts.h>
7 #include <asm/mman.h>
8 #include <asm/uaccess.h>
9 #include <linux/file.h>
10 #include <linux/sys.h>
11 #include <asm/page.h>
12 #include "config.h"
13 #include "ckpt.h"
14 #include "ckptlib.h"
16 #include <linux/mount.h>
17 #include <linux/swapops.h>
18 #include <linux/sched.h>
23 * Write a packet of data to a file.
25 int pack_write(struct file *f, char *buf,
26 int len, int last_pkt, int flag)
28 static char *pack = NULL;
29 static long pos = 0;
30 int ret, to_copy, wrtn = 0;
32 if (!pack) {
33 if (!(pack=(char*)kmalloc(PACKET_SIZE, GFP_KERNEL)))
34 return -1;
37 while (len>0) {
38 to_copy = (len>(PACKET_SIZE-pos))?(PACKET_SIZE-pos):(len);
40 if (flag==FROM_USER)
41 copy_from_user(&(pack[pos]), buf+wrtn, to_copy);
42 else
43 ckpt_strncpy(&(pack[pos]), buf+wrtn, to_copy);
45 pos += to_copy;
46 len -= to_copy;
47 wrtn +=to_copy;
49 // If we've reached the last data.
50 if ( (pos==PACKET_SIZE) || (last_pkt) ) {
51 mm_segment_t fs = get_fs();
53 set_fs(KERNEL_DS);
54 ret = f->f_op->write(f, pack, pos, &(f->f_pos));
55 set_fs(fs);
56 if (ret!=pos)
57 return ret;
59 pos = 0;
60 if (last_pkt) {
61 kfree(pack);
62 pack = NULL;
67 if ( (last_pkt) && (pack!=NULL) ) {
68 if (pos!=0) {
69 mm_segment_t fs = get_fs();
71 set_fs(KERNEL_DS);
72 wrtn = f->f_op->write(f, pack, pos, &f->f_pos);
73 set_fs(fs);
75 kfree(pack);
76 pack = NULL;
77 pos = 0;
80 return wrtn;
85 void dump_header_struct(struct file *file, struct task_struct *p, int in_sys_call)
87 struct header hdr;
89 ckpt_strncpy(hdr.signature, "CKPT", 4);
90 hdr.major_version = CKPT_MAJOR;
91 hdr.minor_version = CKPT_MINOR;
92 hdr.num_segments = p->mm->map_count;
94 task_lock(p);
95 hdr.pid = p->pid;
96 hdr.uid = p->uid;
97 hdr.euid = p->euid;
98 hdr.suid = p->suid;
99 hdr.fsuid = p->fsuid;
100 hdr.gid = p->gid;
101 hdr.egid = p->egid;
102 hdr.sgid = p->sgid;
103 hdr.fsgid = p->fsgid;
104 hdr.ngroups = p->group_info->ngroups;
105 hdr.in_sys_call = in_sys_call;
106 ckpt_strncpy(hdr.comm, p->comm, 16);
107 task_unlock(p);
109 pack_write(file, (void*)&hdr, sizeof(struct header), 0, FROM_KERNEL);
112 void dump_memory_struct(struct file *file, struct task_struct *p)
114 struct mm_struct *mm = p->mm;
115 struct memory mem;
117 if (verbose)
118 printk("Saving vm structure\n");
120 task_lock(p);
121 mem.start_code = mm->start_code;
122 mem.end_code = mm->end_code;
123 mem.start_data = mm->start_data;
124 mem.end_data = mm->end_data;
125 mem.start_brk = mm->start_brk;
126 mem.brk = mm->brk;
127 mem.start_stack = mm->start_stack;
128 mem.arg_start = mm->arg_start;
129 mem.arg_end = mm->arg_end;
130 mem.env_start = mm->env_start;
131 mem.env_end = mm->env_end;
132 task_unlock(p);
134 pack_write(file, (void*)&mem, sizeof(struct memory), 0, FROM_KERNEL);
137 static inline int should_dump(unsigned long flags)
139 if (!(flags & VM_WRITE) || (flags & VM_MAYSHARE))
140 return 0;
141 else
142 return 1;
145 void dump_segments(struct file *file, struct mm_struct *mm, int memleft)
147 struct segments seg;
148 struct vm_area_struct *vm = mm->mmap;
149 char *buffer;
150 char *line;
153 if (verbose)
154 printk("Saving segments\n");
155 if (!(buffer = kmalloc(PAGE_SIZE, GFP_KERNEL)))
156 return;
158 for (; vm; vm = vm->vm_next) {
159 seg.vm_start = vm->vm_start;
160 seg.vm_end = vm->vm_end;
161 seg.prot = vm->vm_page_prot.pgprot;
162 seg.flags = vm->vm_flags;
163 seg.shared = 0;
164 seg.pgoff = vm->vm_pgoff;
165 seg.filename[0] = 0;
167 if (!should_dump(seg.flags)) {
168 if (vm->vm_file) {
169 struct path pth;
171 pth.mnt = vm->vm_file->f_vfsmnt;
172 pth.dentry = vm->vm_file->f_dentry;
173 line = d_path(&pth, buffer, PAGE_SIZE);
174 buffer[PAGE_SIZE-1] = 0;
175 seg.shared = 1;
176 ckpt_strncpy(seg.filename, line, CKPT_MAX_FILENAME);
180 pack_write(file, (void*)&seg, sizeof(struct segments), 0, FROM_KERNEL);
182 if (memleft < sizeof(struct segments))
183 memleft = PAGE_SIZE + memleft;
184 memleft -= sizeof(struct segments);
188 * Dump the padding so the header is a mutiple of a page size,
189 * that's what the memleft is used for.
191 if (memleft > 0) {
192 char *padbuf;
193 padbuf = (char*)kmalloc(memleft, GFP_KERNEL);
194 pack_write(file, padbuf, memleft, 0, FROM_KERNEL);
195 kfree(padbuf);
198 kfree(buffer);
201 static char *prot_str[] = {"PROT_READ", "PROT_WRITE", "PROT_EXEC"};
202 static void print_prot(uint32_t prot)
204 int i = 0;
206 prot &= 7;
207 for (; i < 3; i++) {
208 if (prot & 1)
209 printk("%s ", prot_str[i]);
210 prot >>= 1;
214 static void print_flags(uint32_t flags)
216 flags = get_mmap_flags(flags);
218 if (flags & MAP_SHARED)
219 printk("MAP_SHARED ");
220 else
221 printk("MAP_PRIVATE ");
223 if (flags & MAP_FIXED)
224 printk("MAP_FIXED ");
225 if (flags & MAP_ANONYMOUS)
226 printk("MAP_ANONYMOUS ");
229 if (flags & MAP_GROWSDOWN)
230 printk("MAP_GROWSDOWN ");
231 if (flags & MAP_DENYWRITE)
232 printk("MAP_DENYWRITE ");
233 if (flags & MAP_EXECUTABLE)
234 printk("MAP_EXECUTABLE ");
235 if (flags & MAP_LOCKED)
236 printk("MAP_LOCKED ");
237 if (flags & MAP_POPULATE)
238 printk("MAP_POPULATE ");
239 if (flags & MAP_NONBLOCK)
240 printk("MAP_NONBLOCK ");
244 * Dump vm area to file.
246 static void dump_vm_area(struct file *f, struct task_struct *p,
247 struct vm_area_struct *vm)
249 char * data;
250 unsigned long addr = vm->vm_start;
251 static int i = 0;
252 int j = 0;
254 printk(":::Saving the vm_area for the %d time(s)", ++i);
255 printk(":::%d page(s)\n", (int)(vm->vm_end - vm->vm_start + PAGE_SIZE - 1) >> 12);
257 /* we may write to the pgtable */
258 down_write(&p->mm->mmap_sem);
260 while (addr < vm->vm_end) {
261 data = get_kernel_address(p, vm, addr);
263 if (data) {
264 if ((unsigned long)data & ~PAGE_MASK)
265 printk("Warning: address %p not aligned!\n", data);
267 if (pack_write(f, (void*)data, PAGE_SIZE, 0, FROM_KERNEL) != PAGE_SIZE)
268 printk("Warning: not all dumped\n");
269 j++;
272 addr += PAGE_SIZE;
275 up_write(&p->mm->mmap_sem);
276 printk("::::::%d pages saved:::\n", j);
281 void dump_vm_areas(struct file *file, struct task_struct *p)
283 struct vm_area_struct *vm = p->mm->mmap;
285 if (verbose)
286 printk("Saving vm areas, we have %d vm areas\n", p->mm->map_count);
288 for (; vm; vm = vm->vm_next) {
289 #if 0
290 printk("0x%08x - 0x%08x ", (u32)vm->vm_start, (u32)vm->vm_end);
291 print_prot( vm->vm_page_prot.pgprot);
292 print_flags(vm->vm_flags);
293 printk("\n");
294 #endif
295 if (should_dump(vm->vm_flags) && vm->vm_file)
296 dump_vm_area(file, p, vm);
300 void dump_registers(struct file *file, struct task_struct *p, struct pt_regs *regs, int in_sys_call)
302 if (verbose)
303 printk("Saving registers\n");
305 if (p == current) {
306 /* avoid inifinte loop! */
307 regs->ax = 0;
308 } else if (in_sys_call) {
309 /* If we are in a system call, we must restart it */
310 regs->ip -= 2;
311 regs->ax = -EINTR;
314 pack_write(file, (void *)regs, sizeof(*regs), 0, FROM_KERNEL);
318 int get_file_cnt(struct task_struct *p, struct files_struct *files)
320 unsigned long set;
321 int fds;
322 int j = 0;
323 int filecnt = 0;
325 while (1) {
326 fds = j * __NFDBITS;
327 if (fds >= files->fdt->max_fds)
328 break;
329 set = files->fdt->open_fds->fds_bits[j++];
330 while (set) {
331 if (set & 1) {
332 if (ckpt_icheck_task(p, fds)) {
333 filecnt++;
337 fds++;
338 set >>= 1;
342 return filecnt;
345 int dump_open_files(struct file *file, struct task_struct *p, struct files_struct *files, int *size)
347 struct open_files_hdr open_files_hdr;
348 struct open_files open_files;
349 struct ckpt_fdcache_struct *fdcache;
350 char buffer[PAGE_SIZE];
351 char *line;
352 int fds, j = 0;
353 int fdcache_size = 0;
354 int file_cnt = get_file_cnt(p, files);
355 unsigned long set;
356 int struct_size = 0;
358 if (verbose) {
359 printk("Saving file table\n");
360 printk("max_fds = %d\n", files->fdt->max_fds);
361 printk("%d named opened files\n", file_cnt);
364 open_files_hdr.number_open_files = file_cnt;
365 pack_write(file, (void*)&open_files_hdr, sizeof(struct open_files_hdr), 0, FROM_KERNEL);
367 if (! (fdcache = kmalloc(sizeof(struct ckpt_fdcache_struct) * file_cnt, GFP_KERNEL)))
368 return -ENOMEM;
370 /* Now we dump them */
371 for (;;) {
372 fds = j * __NFDBITS;
373 if (fds >= files->fdt->max_fds)
374 break;
375 set = files->fdt->open_fds->fds_bits[j++];
376 while (set) {
377 if (set & 1) {
378 struct file *fdes = fcheck_files(files, fds);
379 struct dentry *dent = fdes ? fdes->f_dentry : NULL;
380 struct inode *inode = dent ? dent->d_inode : NULL;
381 int i;
383 if (!inode)
384 goto next;
386 open_files.fd = fds;
388 // check whether this inode has appeared before
389 for (i = 0; i < fdcache_size; i++) {
390 if (inode == fdcache[i].inode) {
391 /* cache hit */
392 if (verbose)
393 printk("fd %d is a dup of fd %d\n", (int)fds, fdcache[i].fd);
394 open_files.type = CKPT_DUP;
395 open_files.u.dup.dupfd = fdcache[i].fd;
396 open_files.entry_size = sizeof(struct open_files);
397 pack_write(file, (void*)&open_files, sizeof(struct open_files), 0, FROM_KERNEL);
398 struct_size += open_files.entry_size;
399 goto next;
403 // if not a dup, push to the cache
404 fdcache[fdcache_size].fd = fds;
405 fdcache[fdcache_size].inode = inode;
406 fdcache_size++;
408 if (!inode) {
409 printk("fd %d has no entry\n", fds);
410 } else if (S_ISSOCK(inode->i_mode)) {
411 printk("fd %d is socket - unsupported\n", fds);
412 return -ENOSYS;
413 } else if (IS_FD_NAMED(fdes)) { /* *** REGULAR FILE *** */
414 struct path pth;
415 pth.mnt = mntget(fdes->f_vfsmnt);
416 pth.dentry = dget(dent);
417 line = d_path(&pth, buffer, PAGE_SIZE);
418 buffer[PAGE_SIZE-1] = 0;
419 dput(pth.dentry);
420 mntput(pth.mnt);
422 open_files.type = CKPT_FILE;
423 open_files.fd = fds;
424 open_files.u.file.file = (unsigned long)fdes;
425 open_files.u.file.flags = fdes->f_flags;
426 open_files.u.file.mode = fdes->f_mode;
427 open_files.u.file.file_pos = fdes->f_pos;
428 open_files.entry_size = buffer + PAGE_SIZE - line +
429 sizeof(struct open_files);
430 pack_write(file, (void*)&open_files, sizeof(struct open_files), 0, FROM_KERNEL);
432 if (verbose)
433 printk("fd %02d: %04d: %08ld: %s \n", fds, open_files.entry_size,
434 open_files.u.file.file_pos, line);
436 pack_write(file, (void*)line, open_files.entry_size - sizeof(struct open_files), 0, FROM_KERNEL);
438 struct_size += open_files.entry_size;
440 } else {
441 printk("Unknown file type, cannot handle\n");
444 next:
445 fds++;
446 set >>= 1;
450 *size = struct_size;
451 return 0;
455 void dump_cwd(struct file *file, struct task_struct *p, int *res)
457 struct path pth;
458 char buffer[PAGE_SIZE];
459 char *line;
460 int size;
462 pth.dentry = dget(p->fs->pwd.dentry);
463 pth.mnt = mntget(p->fs->pwd.mnt);
464 line = d_path(&pth, buffer, PAGE_SIZE);
465 buffer[PAGE_SIZE-1] = 0;
466 size = buffer + PAGE_SIZE - line;
467 dput(pth.dentry);
468 mntput(pth.mnt);
470 if (verbose)
471 printk("saving cwd: %s\n", line);
472 pack_write(file, (void *)&size, sizeof(size), 0, FROM_KERNEL);
473 pack_write(file, (void *)line, size, 0, FROM_KERNEL);
475 *res = size + sizeof(size);
478 void dump_signal(struct file *file, struct task_struct *p, int *size)
480 sigset_t blocked;
481 struct signal_struct sig;
482 unsigned long * signal = (unsigned long *)&p->pending.signal;
484 if (verbose)
485 printk("Saving signal handlers\n");
487 spin_lock_irq(&p->sighand->siglock);
490 /* ignore SIGSTOP/SIGCONT */
491 if ((_NSIG_WORDS == 2 && signal[0]) & ~(0x60000L || signal[1]))
492 printk("pending signals not saved: %08lx %08lx\n",
493 signal[0], signal[1]);
495 blocked = p->blocked;
496 sig = *(p->signal);
498 spin_unlock_irq(&p->sighand->siglock);
500 pack_write(file, (void*)&blocked, sizeof(blocked), 0, FROM_KERNEL);
501 pack_write(file, (void*)&sig, sizeof(sig), 0, FROM_KERNEL);
502 *size = sizeof(blocked) + sizeof(sig);
505 void write_end(struct file *file)
507 pack_write(file, NULL, 0, 1, FROM_KERNEL); /* last packet */