Linux 2.1.131pre2
[davej-history.git] / arch / i386 / kernel / process.c
blob00f39d4ed5ed84d325dd403396b3866a041fc26e
1 /*
2 * linux/arch/i386/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
5 */
7 /*
8 * This file handles the architecture-dependent parts of process handling..
9 */
11 #define __KERNEL_SYSCALLS__
12 #include <stdarg.h>
14 #include <linux/errno.h>
15 #include <linux/sched.h>
16 #include <linux/kernel.h>
17 #include <linux/mm.h>
18 #include <linux/smp.h>
19 #include <linux/smp_lock.h>
20 #include <linux/stddef.h>
21 #include <linux/unistd.h>
22 #include <linux/ptrace.h>
23 #include <linux/malloc.h>
24 #include <linux/vmalloc.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/config.h>
29 #include <linux/unistd.h>
30 #include <linux/delay.h>
31 #include <linux/smp.h>
32 #include <linux/reboot.h>
33 #include <linux/init.h>
34 #if defined(CONFIG_APM) && defined(CONFIG_APM_POWER_OFF)
35 #include <linux/apm_bios.h>
36 #endif
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/system.h>
41 #include <asm/io.h>
42 #include <asm/ldt.h>
43 #include <asm/processor.h>
44 #include <asm/desc.h>
45 #ifdef CONFIG_MATH_EMULATION
46 #include <asm/math_emu.h>
47 #endif
49 #include "irq.h"
51 spinlock_t semaphore_wake_lock = SPIN_LOCK_UNLOCKED;
53 asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
55 #ifdef CONFIG_APM
56 extern int apm_do_idle(void);
57 extern void apm_do_busy(void);
58 #endif
60 static int hlt_counter=0;
62 #define HARD_IDLE_TIMEOUT (HZ / 3)
64 void disable_hlt(void)
66 hlt_counter++;
69 void enable_hlt(void)
71 hlt_counter--;
74 #ifndef __SMP__
76 static void hard_idle(void)
78 while (!current->need_resched) {
79 if (boot_cpu_data.hlt_works_ok && !hlt_counter) {
80 #ifdef CONFIG_APM
81 /* If the APM BIOS is not enabled, or there
82 is an error calling the idle routine, we
83 should hlt if possible. We need to check
84 need_resched again because an interrupt
85 may have occurred in apm_do_idle(). */
86 start_bh_atomic();
87 if (!apm_do_idle() && !current->need_resched)
88 __asm__("hlt");
89 end_bh_atomic();
90 #else
91 __asm__("hlt");
92 #endif
94 if (current->need_resched)
95 break;
96 schedule();
98 #ifdef CONFIG_APM
99 apm_do_busy();
100 #endif
104 * The idle loop on a uniprocessor i386..
106 static int cpu_idle(void *unused)
108 unsigned long start_idle = jiffies;
110 /* endless idle loop with no priority at all */
111 for (;;) {
112 if (jiffies - start_idle > HARD_IDLE_TIMEOUT)
113 hard_idle();
114 else {
115 if (boot_cpu_data.hlt_works_ok && !hlt_counter && !current->need_resched)
116 __asm__("hlt");
118 if (current->need_resched)
119 start_idle = jiffies;
120 current->policy = SCHED_YIELD;
121 schedule();
122 check_pgt_cache();
126 #else
129 * This is being executed in task 0 'user space'.
132 int cpu_idle(void *unused)
135 /* endless idle loop with no priority at all */
136 while(1) {
137 if (current_cpu_data.hlt_works_ok && !hlt_counter && !current->need_resched)
138 __asm__("hlt");
139 current->policy = SCHED_YIELD;
140 schedule();
141 check_pgt_cache();
145 #endif
147 asmlinkage int sys_idle(void)
149 if (current->pid != 0)
150 return -EPERM;
151 cpu_idle(NULL);
152 return 0;
156 * This routine reboots the machine by asking the keyboard
157 * controller to pulse the reset-line low. We try that for a while,
158 * and if it doesn't work, we do some other stupid things.
161 static long no_idt[2] = {0, 0};
162 static int reboot_mode = 0;
163 static int reboot_thru_bios = 0;
165 __initfunc(void reboot_setup(char *str, int *ints))
167 while(1) {
168 switch (*str) {
169 case 'w': /* "warm" reboot (no memory testing etc) */
170 reboot_mode = 0x1234;
171 break;
172 case 'c': /* "cold" reboot (with memory testing etc) */
173 reboot_mode = 0x0;
174 break;
175 case 'b': /* "bios" reboot by jumping through the BIOS */
176 reboot_thru_bios = 1;
177 break;
178 case 'h': /* "hard" reboot by toggling RESET and/or crashing the CPU */
179 reboot_thru_bios = 0;
180 break;
182 if((str = strchr(str,',')) != NULL)
183 str++;
184 else
185 break;
190 /* The following code and data reboots the machine by switching to real
191 mode and jumping to the BIOS reset entry point, as if the CPU has
192 really been reset. The previous version asked the keyboard
193 controller to pulse the CPU reset line, which is more thorough, but
194 doesn't work with at least one type of 486 motherboard. It is easy
195 to stop this code working; hence the copious comments. */
197 static unsigned long long
198 real_mode_gdt_entries [3] =
200 0x0000000000000000ULL, /* Null descriptor */
201 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
202 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
205 static struct
207 unsigned short size __attribute__ ((packed));
208 unsigned long long * base __attribute__ ((packed));
210 real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
211 real_mode_idt = { 0x3ff, 0 };
213 /* This is 16-bit protected mode code to disable paging and the cache,
214 switch to real mode and jump to the BIOS reset code.
216 The instruction that switches to real mode by writing to CR0 must be
217 followed immediately by a far jump instruction, which set CS to a
218 valid value for real mode, and flushes the prefetch queue to avoid
219 running instructions that have already been decoded in protected
220 mode.
222 Clears all the flags except ET, especially PG (paging), PE
223 (protected-mode enable) and TS (task switch for coprocessor state
224 save). Flushes the TLB after paging has been disabled. Sets CD and
225 NW, to disable the cache on a 486, and invalidates the cache. This
226 is more like the state of a 486 after reset. I don't know if
227 something else should be done for other chips.
229 More could be done here to set up the registers as if a CPU reset had
230 occurred; hopefully real BIOSs don't assume much. */
232 static unsigned char real_mode_switch [] =
234 0x66, 0x0f, 0x20, 0xc0, /* movl %cr0,%eax */
235 0x66, 0x83, 0xe0, 0x11, /* andl $0x00000011,%eax */
236 0x66, 0x0d, 0x00, 0x00, 0x00, 0x60, /* orl $0x60000000,%eax */
237 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
238 0x66, 0x0f, 0x22, 0xd8, /* movl %eax,%cr3 */
239 0x66, 0x0f, 0x20, 0xc3, /* movl %cr0,%ebx */
240 0x66, 0x81, 0xe3, 0x00, 0x00, 0x00, 0x60, /* andl $0x60000000,%ebx */
241 0x74, 0x02, /* jz f */
242 0x0f, 0x08, /* invd */
243 0x24, 0x10, /* f: andb $0x10,al */
244 0x66, 0x0f, 0x22, 0xc0, /* movl %eax,%cr0 */
245 0xea, 0x00, 0x00, 0xff, 0xff /* ljmp $0xffff,$0x0000 */
248 static inline void kb_wait(void)
250 int i;
252 for (i=0; i<0x10000; i++)
253 if ((inb_p(0x64) & 0x02) == 0)
254 break;
257 void machine_restart(char * __unused)
259 #if __SMP__
261 * turn off the IO-APIC, so we can do a clean reboot
263 init_pic_mode();
264 #endif
266 if(!reboot_thru_bios) {
267 /* rebooting needs to touch the page at absolute addr 0 */
268 *((unsigned short *)__va(0x472)) = reboot_mode;
269 for (;;) {
270 int i;
271 for (i=0; i<100; i++) {
272 kb_wait();
273 udelay(50);
274 outb(0xfe,0x64); /* pulse reset low */
275 udelay(50);
277 /* That didn't work - force a triple fault.. */
278 __asm__ __volatile__("lidt %0": :"m" (no_idt));
279 __asm__ __volatile__("int3");
283 cli();
285 /* Write zero to CMOS register number 0x0f, which the BIOS POST
286 routine will recognize as telling it to do a proper reboot. (Well
287 that's what this book in front of me says -- it may only apply to
288 the Phoenix BIOS though, it's not clear). At the same time,
289 disable NMIs by setting the top bit in the CMOS address register,
290 as we're about to do peculiar things to the CPU. I'm not sure if
291 `outb_p' is needed instead of just `outb'. Use it to be on the
292 safe side. */
294 outb_p (0x8f, 0x70);
295 outb_p (0x00, 0x71);
297 /* Remap the kernel at virtual address zero, as well as offset zero
298 from the kernel segment. This assumes the kernel segment starts at
299 virtual address PAGE_OFFSET. */
301 memcpy (swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS,
302 sizeof (swapper_pg_dir [0]) * KERNEL_PGD_PTRS);
304 /* Make sure the first page is mapped to the start of physical memory.
305 It is normally not mapped, to trap kernel NULL pointer dereferences. */
307 pg0[0] = 7;
310 * Use `swapper_pg_dir' as our page directory. We bother with
311 * `SET_PAGE_DIR' because although might be rebooting, but if we change
312 * the way we set root page dir in the future, then we wont break a
313 * seldom used feature ;)
316 SET_PAGE_DIR(current,swapper_pg_dir);
318 /* Write 0x1234 to absolute memory location 0x472. The BIOS reads
319 this on booting to tell it to "Bypass memory test (also warm
320 boot)". This seems like a fairly standard thing that gets set by
321 REBOOT.COM programs, and the previous reset routine did this
322 too. */
324 *((unsigned short *)0x472) = reboot_mode;
326 /* For the switch to real mode, copy some code to low memory. It has
327 to be in the first 64k because it is running in 16-bit mode, and it
328 has to have the same physical and virtual address, because it turns
329 off paging. Copy it near the end of the first page, out of the way
330 of BIOS variables. */
332 memcpy ((void *) (0x1000 - sizeof (real_mode_switch)),
333 real_mode_switch, sizeof (real_mode_switch));
335 /* Set up the IDT for real mode. */
337 __asm__ __volatile__ ("lidt %0" : : "m" (real_mode_idt));
339 /* Set up a GDT from which we can load segment descriptors for real
340 mode. The GDT is not used in real mode; it is just needed here to
341 prepare the descriptors. */
343 __asm__ __volatile__ ("lgdt %0" : : "m" (real_mode_gdt));
345 /* Load the data segment registers, and thus the descriptors ready for
346 real mode. The base address of each segment is 0x100, 16 times the
347 selector value being loaded here. This is so that the segment
348 registers don't have to be reloaded after switching to real mode:
349 the values are consistent for real mode operation already. */
351 __asm__ __volatile__ ("movl $0x0010,%%eax\n"
352 "\tmovl %%ax,%%ds\n"
353 "\tmovl %%ax,%%es\n"
354 "\tmovl %%ax,%%fs\n"
355 "\tmovl %%ax,%%gs\n"
356 "\tmovl %%ax,%%ss" : : : "eax");
358 /* Jump to the 16-bit code that we copied earlier. It disables paging
359 and the cache, switches to real mode, and jumps to the BIOS reset
360 entry point. */
362 __asm__ __volatile__ ("ljmp $0x0008,%0"
364 : "i" ((void *) (0x1000 - sizeof (real_mode_switch))));
367 void machine_halt(void)
371 void machine_power_off(void)
373 #if defined(CONFIG_APM) && defined(CONFIG_APM_POWER_OFF)
374 apm_power_off();
375 #endif
379 void show_regs(struct pt_regs * regs)
381 long cr0 = 0L, cr2 = 0L, cr3 = 0L;
383 printk("\n");
384 printk("EIP: %04x:[<%08lx>]",0xffff & regs->xcs,regs->eip);
385 if (regs->xcs & 3)
386 printk(" ESP: %04x:%08lx",0xffff & regs->xss,regs->esp);
387 printk(" EFLAGS: %08lx\n",regs->eflags);
388 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
389 regs->eax,regs->ebx,regs->ecx,regs->edx);
390 printk("ESI: %08lx EDI: %08lx EBP: %08lx",
391 regs->esi, regs->edi, regs->ebp);
392 printk(" DS: %04x ES: %04x\n",
393 0xffff & regs->xds,0xffff & regs->xes);
394 __asm__("movl %%cr0, %0": "=r" (cr0));
395 __asm__("movl %%cr2, %0": "=r" (cr2));
396 __asm__("movl %%cr3, %0": "=r" (cr3));
397 printk("CR0: %08lx CR2: %08lx CR3: %08lx\n", cr0, cr2, cr3);
401 * Allocation and freeing of basic task resources.
403 * NOTE! The task struct and the stack go together
405 * The task structure is a two-page thing, and as such
406 * not reliable to allocate using the basic page alloc
407 * functions. We have a small cache of structures for
408 * when the allocations fail..
410 * This extra buffer essentially acts to make for less
411 * "jitter" in the allocations..
413 * On SMP we don't do this right now because:
414 * - we aren't holding any locks when called, and we might
415 * as well just depend on the generic memory management
416 * to do proper locking for us instead of complicating it
417 * here.
418 * - if you use SMP you have a beefy enough machine that
419 * this shouldn't matter..
421 #ifndef __SMP__
422 #define EXTRA_TASK_STRUCT 16
423 static struct task_struct * task_struct_stack[EXTRA_TASK_STRUCT];
424 static int task_struct_stack_ptr = -1;
425 #endif
427 struct task_struct * alloc_task_struct(void)
429 #ifndef EXTRA_TASK_STRUCT
430 return (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
431 #else
432 int index;
433 struct task_struct *ret;
435 index = task_struct_stack_ptr;
436 if (index >= EXTRA_TASK_STRUCT/2)
437 goto use_cache;
438 ret = (struct task_struct *) __get_free_pages(GFP_KERNEL,1);
439 if (!ret) {
440 index = task_struct_stack_ptr;
441 if (index >= 0) {
442 use_cache:
443 ret = task_struct_stack[index];
444 task_struct_stack_ptr = index-1;
447 return ret;
448 #endif
451 void free_task_struct(struct task_struct *p)
453 #ifdef EXTRA_TASK_STRUCT
454 int index = task_struct_stack_ptr+1;
456 if (index < EXTRA_TASK_STRUCT) {
457 task_struct_stack[index] = p;
458 task_struct_stack_ptr = index;
459 } else
460 #endif
461 free_pages((unsigned long) p, 1);
464 void release_segments(struct mm_struct *mm)
466 /* forget local segments */
467 __asm__ __volatile__("movl %w0,%%fs ; movl %w0,%%gs"
468 : /* no outputs */
469 : "r" (0));
470 if (mm->segments) {
471 void * ldt = mm->segments;
474 * Get the LDT entry from init_task.
476 current->tss.ldt = _LDT(0);
477 load_ldt(0);
479 mm->segments = NULL;
480 vfree(ldt);
485 * Create a kernel thread
487 int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
489 long retval, d0;
491 __asm__ __volatile__(
492 "movl %%esp,%%esi\n\t"
493 "int $0x80\n\t" /* Linux/i386 system call */
494 "cmpl %%esp,%%esi\n\t" /* child or parent? */
495 "je 1f\n\t" /* parent - jump */
496 /* Load the argument into eax, and push it. That way, it does
497 * not matter whether the called function is compiled with
498 * -mregparm or not. */
499 "movl %4,%%eax\n\t"
500 "pushl %%eax\n\t"
501 "call *%5\n\t" /* call fn */
502 "movl %3,%0\n\t" /* exit */
503 "int $0x80\n"
504 "1:\t"
505 :"=&a" (retval), "=&S" (d0)
506 :"0" (__NR_clone), "i" (__NR_exit),
507 "r" (arg), "r" (fn),
508 "b" (flags | CLONE_VM)
509 : "memory");
510 return retval;
514 * Free current thread data structures etc..
516 void exit_thread(void)
518 /* nothing to do ... */
521 void flush_thread(void)
523 int i;
524 struct task_struct *tsk = current;
526 for (i=0 ; i<8 ; i++)
527 tsk->tss.debugreg[i] = 0;
530 * Forget coprocessor state..
532 clear_fpu(tsk);
533 tsk->used_math = 0;
536 void release_thread(struct task_struct *dead_task)
541 * If new_mm is NULL, we're being called to set up the LDT descriptor
542 * for a clone task. Each clone must have a separate entry in the GDT.
544 void copy_segments(int nr, struct task_struct *p, struct mm_struct *new_mm)
546 struct mm_struct * old_mm = current->mm;
547 void * old_ldt = old_mm->segments, * ldt = old_ldt;
549 /* default LDT - use the one from init_task */
550 p->tss.ldt = _LDT(0);
551 if (old_ldt) {
552 if (new_mm) {
553 ldt = vmalloc(LDT_ENTRIES*LDT_ENTRY_SIZE);
554 new_mm->segments = ldt;
555 if (!ldt) {
556 printk(KERN_WARNING "ldt allocation failed\n");
557 return;
559 memcpy(ldt, old_ldt, LDT_ENTRIES*LDT_ENTRY_SIZE);
561 p->tss.ldt = _LDT(nr);
562 set_ldt_desc(nr, ldt, LDT_ENTRIES);
563 return;
568 * Save a segment.
570 #define savesegment(seg,value) \
571 asm volatile("movl %%" #seg ",%0":"=m" (*(int *)&(value)))
573 int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
574 struct task_struct * p, struct pt_regs * regs)
576 struct pt_regs * childregs;
578 childregs = ((struct pt_regs *) (2*PAGE_SIZE + (unsigned long) p)) - 1;
579 *childregs = *regs;
580 childregs->eax = 0;
581 childregs->esp = esp;
582 childregs->eflags = regs->eflags & 0xffffcfff; /* iopl always 0 for a new process */
584 p->tss.esp = (unsigned long) childregs;
585 p->tss.esp0 = (unsigned long) (childregs+1);
586 p->tss.ss0 = __KERNEL_DS;
588 p->tss.tr = _TSS(nr);
589 set_tss_desc(nr,&(p->tss));
590 p->tss.eip = (unsigned long) ret_from_fork;
592 savesegment(fs,p->tss.fs);
593 savesegment(gs,p->tss.gs);
596 * a bitmap offset pointing outside of the TSS limit causes a nicely
597 * controllable SIGSEGV. The first sys_ioperm() call sets up the
598 * bitmap properly.
600 p->tss.bitmap = sizeof(struct thread_struct);
602 unlazy_fpu(current);
603 p->tss.i387 = current->tss.i387;
605 return 0;
609 * fill in the FPU structure for a core dump.
611 int dump_fpu (struct pt_regs * regs, struct user_i387_struct* fpu)
613 int fpvalid;
614 struct task_struct *tsk = current;
616 fpvalid = tsk->used_math;
617 if (fpvalid) {
618 unlazy_fpu(tsk);
619 memcpy(fpu,&tsk->tss.i387.hard,sizeof(*fpu));
622 return fpvalid;
626 * fill in the user structure for a core dump..
628 void dump_thread(struct pt_regs * regs, struct user * dump)
630 int i;
632 /* changed the size calculations - should hopefully work better. lbt */
633 dump->magic = CMAGIC;
634 dump->start_code = 0;
635 dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
636 dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
637 dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
638 dump->u_dsize -= dump->u_tsize;
639 dump->u_ssize = 0;
640 for (i = 0; i < 8; i++)
641 dump->u_debugreg[i] = current->tss.debugreg[i];
643 if (dump->start_stack < TASK_SIZE)
644 dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;
646 dump->regs.ebx = regs->ebx;
647 dump->regs.ecx = regs->ecx;
648 dump->regs.edx = regs->edx;
649 dump->regs.esi = regs->esi;
650 dump->regs.edi = regs->edi;
651 dump->regs.ebp = regs->ebp;
652 dump->regs.eax = regs->eax;
653 dump->regs.ds = regs->xds;
654 dump->regs.es = regs->xes;
655 savesegment(fs,dump->regs.fs);
656 savesegment(gs,dump->regs.gs);
657 dump->regs.orig_eax = regs->orig_eax;
658 dump->regs.eip = regs->eip;
659 dump->regs.cs = regs->xcs;
660 dump->regs.eflags = regs->eflags;
661 dump->regs.esp = regs->esp;
662 dump->regs.ss = regs->xss;
664 dump->u_fpvalid = dump_fpu (regs, &dump->i387);
668 * This special macro can be used to load a debugging register
670 #define loaddebug(tsk,register) \
671 __asm__("movl %0,%%db" #register \
672 : /* no output */ \
673 :"r" (tsk->tss.debugreg[register]))
677 * switch_to(x,yn) should switch tasks from x to y.
679 * We fsave/fwait so that an exception goes off at the right time
680 * (as a call from the fsave or fwait in effect) rather than to
681 * the wrong process. Lazy FP saving no longer makes any sense
682 * with modern CPU's, and this simplifies a lot of things (SMP
683 * and UP become the same).
685 * NOTE! We used to use the x86 hardware context switching. The
686 * reason for not using it any more becomes apparent when you
687 * try to recover gracefully from saved state that is no longer
688 * valid (stale segment register values in particular). With the
689 * hardware task-switch, there is no way to fix up bad state in
690 * a reasonable manner.
692 * The fact that Intel documents the hardware task-switching to
693 * be slow is a fairly red herring - this code is not noticeably
694 * faster. However, there _is_ some room for improvement here,
695 * so the performance issues may eventually be a valid point.
696 * More important, however, is the fact that this allows us much
697 * more flexibility.
699 void __switch_to(struct task_struct *prev, struct task_struct *next)
701 /* Do the FPU save and set TS if it wasn't set before.. */
702 unlazy_fpu(prev);
705 * Reload TR, LDT and the page table pointers..
707 * We need TR for the IO permission bitmask (and
708 * the vm86 bitmasks in case we ever use enhanced
709 * v86 mode properly).
711 * We may want to get rid of the TR register some
712 * day, and copy the bitmaps around by hand. Oh,
713 * well. In the meantime we have to clear the busy
714 * bit in the TSS entry, ugh.
716 gdt_table[next->tss.tr >> 3].b &= 0xfffffdff;
717 asm volatile("ltr %0": :"g" (*(unsigned short *)&next->tss.tr));
720 * Save away %fs and %gs. No need to save %es and %ds, as
721 * those are always kernel segments while inside the kernel.
723 asm volatile("movl %%fs,%0":"=m" (*(int *)&prev->tss.fs));
724 asm volatile("movl %%gs,%0":"=m" (*(int *)&prev->tss.gs));
726 /* Re-load LDT if necessary */
727 if (next->mm->segments != prev->mm->segments)
728 asm volatile("lldt %0": :"g" (*(unsigned short *)&next->tss.ldt));
730 /* Re-load page tables */
732 unsigned long new_cr3 = next->tss.cr3;
733 if (new_cr3 != prev->tss.cr3)
734 asm volatile("movl %0,%%cr3": :"r" (new_cr3));
738 * Restore %fs and %gs.
740 loadsegment(fs,next->tss.fs);
741 loadsegment(gs,next->tss.gs);
744 * Now maybe reload the debug registers
746 if (next->tss.debugreg[7]){
747 loaddebug(next,0);
748 loaddebug(next,1);
749 loaddebug(next,2);
750 loaddebug(next,3);
751 loaddebug(next,6);
752 loaddebug(next,7);
756 asmlinkage int sys_fork(struct pt_regs regs)
758 return do_fork(SIGCHLD, regs.esp, &regs);
761 asmlinkage int sys_clone(struct pt_regs regs)
763 unsigned long clone_flags;
764 unsigned long newsp;
766 clone_flags = regs.ebx;
767 newsp = regs.ecx;
768 if (!newsp)
769 newsp = regs.esp;
770 return do_fork(clone_flags, newsp, &regs);
774 * sys_execve() executes a new program.
776 asmlinkage int sys_execve(struct pt_regs regs)
778 int error;
779 char * filename;
781 lock_kernel();
782 filename = getname((char *) regs.ebx);
783 error = PTR_ERR(filename);
784 if (IS_ERR(filename))
785 goto out;
786 error = do_execve(filename, (char **) regs.ecx, (char **) regs.edx, &regs);
787 if (error == 0)
788 current->flags &= ~PF_DTRACE;
789 putname(filename);
790 out:
791 unlock_kernel();
792 return error;