powerpc: Merge spinlock.h
[linux-2.6/zen-sources.git] / arch / x86_64 / kernel / process.c
blob5afd63e8cef7ab9c9a71bb1f35df69a738ce90d5
1 /*
2 * linux/arch/x86-64/kernel/process.c
4 * Copyright (C) 1995 Linus Torvalds
6 * Pentium III FXSR, SSE support
7 * Gareth Hughes <gareth@valinux.com>, May 2000
8 *
9 * X86-64 port
10 * Andi Kleen.
12 * CPU hotplug support - ashok.raj@intel.com
13 * $Id: process.c,v 1.38 2002/01/15 10:08:03 ak Exp $
17 * This file handles the architecture-dependent parts of process handling..
20 #include <stdarg.h>
22 #include <linux/cpu.h>
23 #include <linux/errno.h>
24 #include <linux/sched.h>
25 #include <linux/kernel.h>
26 #include <linux/mm.h>
27 #include <linux/elfcore.h>
28 #include <linux/smp.h>
29 #include <linux/slab.h>
30 #include <linux/user.h>
31 #include <linux/module.h>
32 #include <linux/a.out.h>
33 #include <linux/interrupt.h>
34 #include <linux/delay.h>
35 #include <linux/ptrace.h>
36 #include <linux/utsname.h>
37 #include <linux/random.h>
38 #include <linux/kprobes.h>
40 #include <asm/uaccess.h>
41 #include <asm/pgtable.h>
42 #include <asm/system.h>
43 #include <asm/io.h>
44 #include <asm/processor.h>
45 #include <asm/i387.h>
46 #include <asm/mmu_context.h>
47 #include <asm/pda.h>
48 #include <asm/prctl.h>
49 #include <asm/kdebug.h>
50 #include <asm/desc.h>
51 #include <asm/proto.h>
52 #include <asm/ia32.h>
54 asmlinkage extern void ret_from_fork(void);
56 unsigned long kernel_thread_flags = CLONE_VM | CLONE_UNTRACED;
58 static atomic_t hlt_counter = ATOMIC_INIT(0);
60 unsigned long boot_option_idle_override = 0;
61 EXPORT_SYMBOL(boot_option_idle_override);
64 * Powermanagement idle function, if any..
66 void (*pm_idle)(void);
67 static DEFINE_PER_CPU(unsigned int, cpu_idle_state);
69 void disable_hlt(void)
71 atomic_inc(&hlt_counter);
74 EXPORT_SYMBOL(disable_hlt);
76 void enable_hlt(void)
78 atomic_dec(&hlt_counter);
81 EXPORT_SYMBOL(enable_hlt);
84 * We use this if we don't have any better
85 * idle routine..
87 void default_idle(void)
89 local_irq_enable();
91 if (!atomic_read(&hlt_counter)) {
92 clear_thread_flag(TIF_POLLING_NRFLAG);
93 smp_mb__after_clear_bit();
94 while (!need_resched()) {
95 local_irq_disable();
96 if (!need_resched())
97 safe_halt();
98 else
99 local_irq_enable();
101 set_thread_flag(TIF_POLLING_NRFLAG);
102 } else {
103 while (!need_resched())
104 cpu_relax();
109 * On SMP it's slightly faster (but much more power-consuming!)
110 * to poll the ->need_resched flag instead of waiting for the
111 * cross-CPU IPI to arrive. Use this option with caution.
113 static void poll_idle (void)
115 local_irq_enable();
117 asm volatile(
118 "2:"
119 "testl %0,%1;"
120 "rep; nop;"
121 "je 2b;"
123 "i" (_TIF_NEED_RESCHED),
124 "m" (current_thread_info()->flags));
127 void cpu_idle_wait(void)
129 unsigned int cpu, this_cpu = get_cpu();
130 cpumask_t map;
132 set_cpus_allowed(current, cpumask_of_cpu(this_cpu));
133 put_cpu();
135 cpus_clear(map);
136 for_each_online_cpu(cpu) {
137 per_cpu(cpu_idle_state, cpu) = 1;
138 cpu_set(cpu, map);
141 __get_cpu_var(cpu_idle_state) = 0;
143 wmb();
144 do {
145 ssleep(1);
146 for_each_online_cpu(cpu) {
147 if (cpu_isset(cpu, map) &&
148 !per_cpu(cpu_idle_state, cpu))
149 cpu_clear(cpu, map);
151 cpus_and(map, map, cpu_online_map);
152 } while (!cpus_empty(map));
154 EXPORT_SYMBOL_GPL(cpu_idle_wait);
156 #ifdef CONFIG_HOTPLUG_CPU
157 DECLARE_PER_CPU(int, cpu_state);
159 #include <asm/nmi.h>
160 /* We don't actually take CPU down, just spin without interrupts. */
161 static inline void play_dead(void)
163 idle_task_exit();
164 wbinvd();
165 mb();
166 /* Ack it */
167 __get_cpu_var(cpu_state) = CPU_DEAD;
169 while (1)
170 safe_halt();
172 #else
173 static inline void play_dead(void)
175 BUG();
177 #endif /* CONFIG_HOTPLUG_CPU */
180 * The idle thread. There's no useful work to be
181 * done, so just try to conserve power and have a
182 * low exit latency (ie sit in a loop waiting for
183 * somebody to say that they'd like to reschedule)
185 void cpu_idle (void)
187 set_thread_flag(TIF_POLLING_NRFLAG);
189 /* endless idle loop with no priority at all */
190 while (1) {
191 while (!need_resched()) {
192 void (*idle)(void);
194 if (__get_cpu_var(cpu_idle_state))
195 __get_cpu_var(cpu_idle_state) = 0;
197 rmb();
198 idle = pm_idle;
199 if (!idle)
200 idle = default_idle;
201 if (cpu_is_offline(smp_processor_id()))
202 play_dead();
203 idle();
206 preempt_enable_no_resched();
207 schedule();
208 preempt_disable();
213 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
214 * which can obviate IPI to trigger checking of need_resched.
215 * We execute MONITOR against need_resched and enter optimized wait state
216 * through MWAIT. Whenever someone changes need_resched, we would be woken
217 * up from MWAIT (without an IPI).
219 static void mwait_idle(void)
221 local_irq_enable();
223 while (!need_resched()) {
224 __monitor((void *)&current_thread_info()->flags, 0, 0);
225 smp_mb();
226 if (need_resched())
227 break;
228 __mwait(0, 0);
232 void __cpuinit select_idle_routine(const struct cpuinfo_x86 *c)
234 static int printed;
235 if (cpu_has(c, X86_FEATURE_MWAIT)) {
237 * Skip, if setup has overridden idle.
238 * One CPU supports mwait => All CPUs supports mwait
240 if (!pm_idle) {
241 if (!printed) {
242 printk("using mwait in idle threads.\n");
243 printed = 1;
245 pm_idle = mwait_idle;
250 static int __init idle_setup (char *str)
252 if (!strncmp(str, "poll", 4)) {
253 printk("using polling idle threads.\n");
254 pm_idle = poll_idle;
257 boot_option_idle_override = 1;
258 return 1;
261 __setup("idle=", idle_setup);
263 /* Prints also some state that isn't saved in the pt_regs */
264 void __show_regs(struct pt_regs * regs)
266 unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L, fs, gs, shadowgs;
267 unsigned int fsindex,gsindex;
268 unsigned int ds,cs,es;
270 printk("\n");
271 print_modules();
272 printk("Pid: %d, comm: %.20s %s %s %.*s\n",
273 current->pid, current->comm, print_tainted(),
274 system_utsname.release,
275 (int)strcspn(system_utsname.version, " "),
276 system_utsname.version);
277 printk("RIP: %04lx:[<%016lx>] ", regs->cs & 0xffff, regs->rip);
278 printk_address(regs->rip);
279 printk("\nRSP: %04lx:%016lx EFLAGS: %08lx\n", regs->ss, regs->rsp,
280 regs->eflags);
281 printk("RAX: %016lx RBX: %016lx RCX: %016lx\n",
282 regs->rax, regs->rbx, regs->rcx);
283 printk("RDX: %016lx RSI: %016lx RDI: %016lx\n",
284 regs->rdx, regs->rsi, regs->rdi);
285 printk("RBP: %016lx R08: %016lx R09: %016lx\n",
286 regs->rbp, regs->r8, regs->r9);
287 printk("R10: %016lx R11: %016lx R12: %016lx\n",
288 regs->r10, regs->r11, regs->r12);
289 printk("R13: %016lx R14: %016lx R15: %016lx\n",
290 regs->r13, regs->r14, regs->r15);
292 asm("movl %%ds,%0" : "=r" (ds));
293 asm("movl %%cs,%0" : "=r" (cs));
294 asm("movl %%es,%0" : "=r" (es));
295 asm("movl %%fs,%0" : "=r" (fsindex));
296 asm("movl %%gs,%0" : "=r" (gsindex));
298 rdmsrl(MSR_FS_BASE, fs);
299 rdmsrl(MSR_GS_BASE, gs);
300 rdmsrl(MSR_KERNEL_GS_BASE, shadowgs);
302 asm("movq %%cr0, %0": "=r" (cr0));
303 asm("movq %%cr2, %0": "=r" (cr2));
304 asm("movq %%cr3, %0": "=r" (cr3));
305 asm("movq %%cr4, %0": "=r" (cr4));
307 printk("FS: %016lx(%04x) GS:%016lx(%04x) knlGS:%016lx\n",
308 fs,fsindex,gs,gsindex,shadowgs);
309 printk("CS: %04x DS: %04x ES: %04x CR0: %016lx\n", cs, ds, es, cr0);
310 printk("CR2: %016lx CR3: %016lx CR4: %016lx\n", cr2, cr3, cr4);
313 void show_regs(struct pt_regs *regs)
315 printk("CPU %d:", smp_processor_id());
316 __show_regs(regs);
317 show_trace(&regs->rsp);
321 * Free current thread data structures etc..
323 void exit_thread(void)
325 struct task_struct *me = current;
326 struct thread_struct *t = &me->thread;
329 * Remove function-return probe instances associated with this task
330 * and put them back on the free list. Do not insert an exit probe for
331 * this function, it will be disabled by kprobe_flush_task if you do.
333 kprobe_flush_task(me);
335 if (me->thread.io_bitmap_ptr) {
336 struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
338 kfree(t->io_bitmap_ptr);
339 t->io_bitmap_ptr = NULL;
341 * Careful, clear this in the TSS too:
343 memset(tss->io_bitmap, 0xff, t->io_bitmap_max);
344 t->io_bitmap_max = 0;
345 put_cpu();
349 void flush_thread(void)
351 struct task_struct *tsk = current;
352 struct thread_info *t = current_thread_info();
355 * Remove function-return probe instances associated with this task
356 * and put them back on the free list. Do not insert an exit probe for
357 * this function, it will be disabled by kprobe_flush_task if you do.
359 kprobe_flush_task(tsk);
361 if (t->flags & _TIF_ABI_PENDING)
362 t->flags ^= (_TIF_ABI_PENDING | _TIF_IA32);
364 tsk->thread.debugreg0 = 0;
365 tsk->thread.debugreg1 = 0;
366 tsk->thread.debugreg2 = 0;
367 tsk->thread.debugreg3 = 0;
368 tsk->thread.debugreg6 = 0;
369 tsk->thread.debugreg7 = 0;
370 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
372 * Forget coprocessor state..
374 clear_fpu(tsk);
375 clear_used_math();
378 void release_thread(struct task_struct *dead_task)
380 if (dead_task->mm) {
381 if (dead_task->mm->context.size) {
382 printk("WARNING: dead process %8s still has LDT? <%p/%d>\n",
383 dead_task->comm,
384 dead_task->mm->context.ldt,
385 dead_task->mm->context.size);
386 BUG();
391 static inline void set_32bit_tls(struct task_struct *t, int tls, u32 addr)
393 struct user_desc ud = {
394 .base_addr = addr,
395 .limit = 0xfffff,
396 .seg_32bit = 1,
397 .limit_in_pages = 1,
398 .useable = 1,
400 struct n_desc_struct *desc = (void *)t->thread.tls_array;
401 desc += tls;
402 desc->a = LDT_entry_a(&ud);
403 desc->b = LDT_entry_b(&ud);
406 static inline u32 read_32bit_tls(struct task_struct *t, int tls)
408 struct desc_struct *desc = (void *)t->thread.tls_array;
409 desc += tls;
410 return desc->base0 |
411 (((u32)desc->base1) << 16) |
412 (((u32)desc->base2) << 24);
416 * This gets called before we allocate a new thread and copy
417 * the current task into it.
419 void prepare_to_copy(struct task_struct *tsk)
421 unlazy_fpu(tsk);
424 int copy_thread(int nr, unsigned long clone_flags, unsigned long rsp,
425 unsigned long unused,
426 struct task_struct * p, struct pt_regs * regs)
428 int err;
429 struct pt_regs * childregs;
430 struct task_struct *me = current;
432 childregs = ((struct pt_regs *)
433 (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
434 *childregs = *regs;
436 childregs->rax = 0;
437 childregs->rsp = rsp;
438 if (rsp == ~0UL)
439 childregs->rsp = (unsigned long)childregs;
441 p->thread.rsp = (unsigned long) childregs;
442 p->thread.rsp0 = (unsigned long) (childregs+1);
443 p->thread.userrsp = me->thread.userrsp;
445 set_ti_thread_flag(p->thread_info, TIF_FORK);
447 p->thread.fs = me->thread.fs;
448 p->thread.gs = me->thread.gs;
450 asm("mov %%gs,%0" : "=m" (p->thread.gsindex));
451 asm("mov %%fs,%0" : "=m" (p->thread.fsindex));
452 asm("mov %%es,%0" : "=m" (p->thread.es));
453 asm("mov %%ds,%0" : "=m" (p->thread.ds));
455 if (unlikely(me->thread.io_bitmap_ptr != NULL)) {
456 p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
457 if (!p->thread.io_bitmap_ptr) {
458 p->thread.io_bitmap_max = 0;
459 return -ENOMEM;
461 memcpy(p->thread.io_bitmap_ptr, me->thread.io_bitmap_ptr,
462 IO_BITMAP_BYTES);
466 * Set a new TLS for the child thread?
468 if (clone_flags & CLONE_SETTLS) {
469 #ifdef CONFIG_IA32_EMULATION
470 if (test_thread_flag(TIF_IA32))
471 err = ia32_child_tls(p, childregs);
472 else
473 #endif
474 err = do_arch_prctl(p, ARCH_SET_FS, childregs->r8);
475 if (err)
476 goto out;
478 err = 0;
479 out:
480 if (err && p->thread.io_bitmap_ptr) {
481 kfree(p->thread.io_bitmap_ptr);
482 p->thread.io_bitmap_max = 0;
484 return err;
488 * This special macro can be used to load a debugging register
490 #define loaddebug(thread,r) set_debug(thread->debugreg ## r, r)
493 * switch_to(x,y) should switch tasks from x to y.
495 * This could still be optimized:
496 * - fold all the options into a flag word and test it with a single test.
497 * - could test fs/gs bitsliced
499 struct task_struct *
500 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
502 struct thread_struct *prev = &prev_p->thread,
503 *next = &next_p->thread;
504 int cpu = smp_processor_id();
505 struct tss_struct *tss = &per_cpu(init_tss, cpu);
507 unlazy_fpu(prev_p);
510 * Reload esp0, LDT and the page table pointer:
512 tss->rsp0 = next->rsp0;
515 * Switch DS and ES.
516 * This won't pick up thread selector changes, but I guess that is ok.
518 asm volatile("mov %%es,%0" : "=m" (prev->es));
519 if (unlikely(next->es | prev->es))
520 loadsegment(es, next->es);
522 asm volatile ("mov %%ds,%0" : "=m" (prev->ds));
523 if (unlikely(next->ds | prev->ds))
524 loadsegment(ds, next->ds);
526 load_TLS(next, cpu);
529 * Switch FS and GS.
532 unsigned fsindex;
533 asm volatile("movl %%fs,%0" : "=r" (fsindex));
534 /* segment register != 0 always requires a reload.
535 also reload when it has changed.
536 when prev process used 64bit base always reload
537 to avoid an information leak. */
538 if (unlikely(fsindex | next->fsindex | prev->fs)) {
539 loadsegment(fs, next->fsindex);
540 /* check if the user used a selector != 0
541 * if yes clear 64bit base, since overloaded base
542 * is always mapped to the Null selector
544 if (fsindex)
545 prev->fs = 0;
547 /* when next process has a 64bit base use it */
548 if (next->fs)
549 wrmsrl(MSR_FS_BASE, next->fs);
550 prev->fsindex = fsindex;
553 unsigned gsindex;
554 asm volatile("movl %%gs,%0" : "=r" (gsindex));
555 if (unlikely(gsindex | next->gsindex | prev->gs)) {
556 load_gs_index(next->gsindex);
557 if (gsindex)
558 prev->gs = 0;
560 if (next->gs)
561 wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
562 prev->gsindex = gsindex;
566 * Switch the PDA context.
568 prev->userrsp = read_pda(oldrsp);
569 write_pda(oldrsp, next->userrsp);
570 write_pda(pcurrent, next_p);
571 write_pda(kernelstack,
572 (unsigned long)next_p->thread_info + THREAD_SIZE - PDA_STACKOFFSET);
575 * Now maybe reload the debug registers
577 if (unlikely(next->debugreg7)) {
578 loaddebug(next, 0);
579 loaddebug(next, 1);
580 loaddebug(next, 2);
581 loaddebug(next, 3);
582 /* no 4 and 5 */
583 loaddebug(next, 6);
584 loaddebug(next, 7);
589 * Handle the IO bitmap
591 if (unlikely(prev->io_bitmap_ptr || next->io_bitmap_ptr)) {
592 if (next->io_bitmap_ptr)
594 * Copy the relevant range of the IO bitmap.
595 * Normally this is 128 bytes or less:
597 memcpy(tss->io_bitmap, next->io_bitmap_ptr,
598 max(prev->io_bitmap_max, next->io_bitmap_max));
599 else {
601 * Clear any possible leftover bits:
603 memset(tss->io_bitmap, 0xff, prev->io_bitmap_max);
607 return prev_p;
611 * sys_execve() executes a new program.
613 asmlinkage
614 long sys_execve(char __user *name, char __user * __user *argv,
615 char __user * __user *envp, struct pt_regs regs)
617 long error;
618 char * filename;
620 filename = getname(name);
621 error = PTR_ERR(filename);
622 if (IS_ERR(filename))
623 return error;
624 error = do_execve(filename, argv, envp, &regs);
625 if (error == 0) {
626 task_lock(current);
627 current->ptrace &= ~PT_DTRACE;
628 task_unlock(current);
630 putname(filename);
631 return error;
634 void set_personality_64bit(void)
636 /* inherit personality from parent */
638 /* Make sure to be in 64bit mode */
639 clear_thread_flag(TIF_IA32);
641 /* TBD: overwrites user setup. Should have two bits.
642 But 64bit processes have always behaved this way,
643 so it's not too bad. The main problem is just that
644 32bit childs are affected again. */
645 current->personality &= ~READ_IMPLIES_EXEC;
648 asmlinkage long sys_fork(struct pt_regs *regs)
650 return do_fork(SIGCHLD, regs->rsp, regs, 0, NULL, NULL);
653 asmlinkage long
654 sys_clone(unsigned long clone_flags, unsigned long newsp,
655 void __user *parent_tid, void __user *child_tid, struct pt_regs *regs)
657 if (!newsp)
658 newsp = regs->rsp;
659 return do_fork(clone_flags, newsp, regs, 0, parent_tid, child_tid);
663 * This is trivial, and on the face of it looks like it
664 * could equally well be done in user mode.
666 * Not so, for quite unobvious reasons - register pressure.
667 * In user mode vfork() cannot have a stack frame, and if
668 * done by calling the "clone()" system call directly, you
669 * do not have enough call-clobbered registers to hold all
670 * the information you need.
672 asmlinkage long sys_vfork(struct pt_regs *regs)
674 return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, regs->rsp, regs, 0,
675 NULL, NULL);
678 unsigned long get_wchan(struct task_struct *p)
680 unsigned long stack;
681 u64 fp,rip;
682 int count = 0;
684 if (!p || p == current || p->state==TASK_RUNNING)
685 return 0;
686 stack = (unsigned long)p->thread_info;
687 if (p->thread.rsp < stack || p->thread.rsp > stack+THREAD_SIZE)
688 return 0;
689 fp = *(u64 *)(p->thread.rsp);
690 do {
691 if (fp < (unsigned long)stack ||
692 fp > (unsigned long)stack+THREAD_SIZE)
693 return 0;
694 rip = *(u64 *)(fp+8);
695 if (!in_sched_functions(rip))
696 return rip;
697 fp = *(u64 *)fp;
698 } while (count++ < 16);
699 return 0;
702 long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
704 int ret = 0;
705 int doit = task == current;
706 int cpu;
708 switch (code) {
709 case ARCH_SET_GS:
710 if (addr >= TASK_SIZE_OF(task))
711 return -EPERM;
712 cpu = get_cpu();
713 /* handle small bases via the GDT because that's faster to
714 switch. */
715 if (addr <= 0xffffffff) {
716 set_32bit_tls(task, GS_TLS, addr);
717 if (doit) {
718 load_TLS(&task->thread, cpu);
719 load_gs_index(GS_TLS_SEL);
721 task->thread.gsindex = GS_TLS_SEL;
722 task->thread.gs = 0;
723 } else {
724 task->thread.gsindex = 0;
725 task->thread.gs = addr;
726 if (doit) {
727 load_gs_index(0);
728 ret = checking_wrmsrl(MSR_KERNEL_GS_BASE, addr);
731 put_cpu();
732 break;
733 case ARCH_SET_FS:
734 /* Not strictly needed for fs, but do it for symmetry
735 with gs */
736 if (addr >= TASK_SIZE_OF(task))
737 return -EPERM;
738 cpu = get_cpu();
739 /* handle small bases via the GDT because that's faster to
740 switch. */
741 if (addr <= 0xffffffff) {
742 set_32bit_tls(task, FS_TLS, addr);
743 if (doit) {
744 load_TLS(&task->thread, cpu);
745 asm volatile("movl %0,%%fs" :: "r"(FS_TLS_SEL));
747 task->thread.fsindex = FS_TLS_SEL;
748 task->thread.fs = 0;
749 } else {
750 task->thread.fsindex = 0;
751 task->thread.fs = addr;
752 if (doit) {
753 /* set the selector to 0 to not confuse
754 __switch_to */
755 asm volatile("movl %0,%%fs" :: "r" (0));
756 ret = checking_wrmsrl(MSR_FS_BASE, addr);
759 put_cpu();
760 break;
761 case ARCH_GET_FS: {
762 unsigned long base;
763 if (task->thread.fsindex == FS_TLS_SEL)
764 base = read_32bit_tls(task, FS_TLS);
765 else if (doit)
766 rdmsrl(MSR_FS_BASE, base);
767 else
768 base = task->thread.fs;
769 ret = put_user(base, (unsigned long __user *)addr);
770 break;
772 case ARCH_GET_GS: {
773 unsigned long base;
774 if (task->thread.gsindex == GS_TLS_SEL)
775 base = read_32bit_tls(task, GS_TLS);
776 else if (doit)
777 rdmsrl(MSR_KERNEL_GS_BASE, base);
778 else
779 base = task->thread.gs;
780 ret = put_user(base, (unsigned long __user *)addr);
781 break;
784 default:
785 ret = -EINVAL;
786 break;
789 return ret;
792 long sys_arch_prctl(int code, unsigned long addr)
794 return do_arch_prctl(current, code, addr);
798 * Capture the user space registers if the task is not running (in user space)
800 int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
802 struct pt_regs *pp, ptregs;
804 pp = (struct pt_regs *)(tsk->thread.rsp0);
805 --pp;
807 ptregs = *pp;
808 ptregs.cs &= 0xffff;
809 ptregs.ss &= 0xffff;
811 elf_core_copy_regs(regs, &ptregs);
813 return 1;
816 unsigned long arch_align_stack(unsigned long sp)
818 if (randomize_va_space)
819 sp -= get_random_int() % 8192;
820 return sp & ~0xf;