2 * Copyright (C) 1995 Linus Torvalds
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
9 * This file handles the architecture-dependent parts of process handling..
14 #include <linux/cpu.h>
15 #include <linux/errno.h>
16 #include <linux/sched.h>
18 #include <linux/kernel.h>
20 #include <linux/elfcore.h>
21 #include <linux/smp.h>
22 #include <linux/stddef.h>
23 #include <linux/slab.h>
24 #include <linux/vmalloc.h>
25 #include <linux/user.h>
26 #include <linux/a.out.h>
27 #include <linux/interrupt.h>
28 #include <linux/utsname.h>
29 #include <linux/delay.h>
30 #include <linux/reboot.h>
31 #include <linux/init.h>
32 #include <linux/mc146818rtc.h>
33 #include <linux/module.h>
34 #include <linux/kallsyms.h>
35 #include <linux/ptrace.h>
36 #include <linux/random.h>
37 #include <linux/personality.h>
38 #include <linux/tick.h>
39 #include <linux/percpu.h>
41 #include <asm/uaccess.h>
42 #include <asm/pgtable.h>
43 #include <asm/system.h>
46 #include <asm/processor.h>
50 #ifdef CONFIG_MATH_EMULATION
51 #include <asm/math_emu.h>
54 #include <linux/err.h>
56 #include <asm/tlbflush.h>
58 #include <asm/kdebug.h>
60 asmlinkage
void ret_from_fork(void) __asm__("ret_from_fork");
62 static int hlt_counter
;
64 unsigned long boot_option_idle_override
= 0;
65 EXPORT_SYMBOL(boot_option_idle_override
);
67 DEFINE_PER_CPU(struct task_struct
*, current_task
) = &init_task
;
68 EXPORT_PER_CPU_SYMBOL(current_task
);
70 DEFINE_PER_CPU(int, cpu_number
);
71 EXPORT_PER_CPU_SYMBOL(cpu_number
);
74 * Return saved PC of a blocked thread.
76 unsigned long thread_saved_pc(struct task_struct
*tsk
)
78 return ((unsigned long *)tsk
->thread
.esp
)[3];
82 * Powermanagement idle function, if any..
84 void (*pm_idle
)(void);
85 EXPORT_SYMBOL(pm_idle
);
86 static DEFINE_PER_CPU(unsigned int, cpu_idle_state
);
88 void disable_hlt(void)
93 EXPORT_SYMBOL(disable_hlt
);
100 EXPORT_SYMBOL(enable_hlt
);
103 * We use this if we don't have any better
106 void default_idle(void)
108 if (!hlt_counter
&& boot_cpu_data
.hlt_works_ok
) {
109 current_thread_info()->status
&= ~TS_POLLING
;
111 * TS_POLLING-cleared state must be visible before we
117 if (!need_resched()) {
122 t0n
= ktime_to_ns(t0
);
123 safe_halt(); /* enables interrupts racelessly */
126 t1n
= ktime_to_ns(t1
);
127 sched_clock_idle_wakeup_event(t1n
- t0n
);
130 current_thread_info()->status
|= TS_POLLING
;
132 /* loop is done by the caller */
136 #ifdef CONFIG_APM_MODULE
137 EXPORT_SYMBOL(default_idle
);
141 * On SMP it's slightly faster (but much more power-consuming!)
142 * to poll the ->work.need_resched flag instead of waiting for the
143 * cross-CPU IPI to arrive. Use this option with caution.
145 static void poll_idle (void)
150 #ifdef CONFIG_HOTPLUG_CPU
152 /* We don't actually take CPU down, just spin without interrupts. */
153 static inline void play_dead(void)
155 /* This must be done before dead CPU ack */
160 __get_cpu_var(cpu_state
) = CPU_DEAD
;
163 * With physical CPU hotplug, we should halt the cpu
170 static inline void play_dead(void)
174 #endif /* CONFIG_HOTPLUG_CPU */
177 * The idle thread. There's no useful work to be
178 * done, so just try to conserve power and have a
179 * low exit latency (ie sit in a loop waiting for
180 * somebody to say that they'd like to reschedule)
184 int cpu
= smp_processor_id();
186 current_thread_info()->status
|= TS_POLLING
;
188 /* endless idle loop with no priority at all */
190 tick_nohz_stop_sched_tick();
191 while (!need_resched()) {
194 if (__get_cpu_var(cpu_idle_state
))
195 __get_cpu_var(cpu_idle_state
) = 0;
204 if (cpu_is_offline(cpu
))
207 __get_cpu_var(irq_stat
).idle_timestamp
= jiffies
;
210 tick_nohz_restart_sched_tick();
211 preempt_enable_no_resched();
217 static void do_nothing(void *unused
)
221 void cpu_idle_wait(void)
223 unsigned int cpu
, this_cpu
= get_cpu();
224 cpumask_t map
, tmp
= current
->cpus_allowed
;
226 set_cpus_allowed(current
, cpumask_of_cpu(this_cpu
));
230 for_each_online_cpu(cpu
) {
231 per_cpu(cpu_idle_state
, cpu
) = 1;
235 __get_cpu_var(cpu_idle_state
) = 0;
240 for_each_online_cpu(cpu
) {
241 if (cpu_isset(cpu
, map
) && !per_cpu(cpu_idle_state
, cpu
))
244 cpus_and(map
, map
, cpu_online_map
);
246 * We waited 1 sec, if a CPU still did not call idle
247 * it may be because it is in idle and not waking up
248 * because it has nothing to do.
249 * Give all the remaining CPUS a kick.
251 smp_call_function_mask(map
, do_nothing
, 0, 0);
252 } while (!cpus_empty(map
));
254 set_cpus_allowed(current
, tmp
);
256 EXPORT_SYMBOL_GPL(cpu_idle_wait
);
259 * This uses new MONITOR/MWAIT instructions on P4 processors with PNI,
260 * which can obviate IPI to trigger checking of need_resched.
261 * We execute MONITOR against need_resched and enter optimized wait state
262 * through MWAIT. Whenever someone changes need_resched, we would be woken
263 * up from MWAIT (without an IPI).
265 * New with Core Duo processors, MWAIT can take some hints based on CPU
268 void mwait_idle_with_hints(unsigned long ax
, unsigned long cx
)
270 if (!need_resched()) {
271 __monitor((void *)¤t_thread_info()->flags
, 0, 0);
278 /* Default MONITOR/MWAIT with no hints, used for default C1 state */
279 static void mwait_idle(void)
282 mwait_idle_with_hints(0, 0);
285 void __cpuinit
select_idle_routine(const struct cpuinfo_x86
*c
)
287 if (cpu_has(c
, X86_FEATURE_MWAIT
)) {
288 printk("monitor/mwait feature present.\n");
290 * Skip, if setup has overridden idle.
291 * One CPU supports mwait => All CPUs supports mwait
294 printk("using mwait in idle threads.\n");
295 pm_idle
= mwait_idle
;
300 static int __init
idle_setup(char *str
)
302 if (!strcmp(str
, "poll")) {
303 printk("using polling idle threads.\n");
305 #ifdef CONFIG_X86_SMP
306 if (smp_num_siblings
> 1)
307 printk("WARNING: polling idle and HT enabled, performance may degrade.\n");
309 } else if (!strcmp(str
, "mwait"))
314 boot_option_idle_override
= 1;
317 early_param("idle", idle_setup
);
319 void __show_registers(struct pt_regs
*regs
, int all
)
321 unsigned long cr0
= 0L, cr2
= 0L, cr3
= 0L, cr4
= 0L;
322 unsigned long d0
, d1
, d2
, d3
, d6
, d7
;
324 unsigned short ss
, gs
;
326 if (user_mode_vm(regs
)) {
328 ss
= regs
->ss
& 0xffff;
331 sp
= (unsigned long) (®s
->sp
);
337 printk("Pid: %d, comm: %s %s (%s %.*s)\n",
338 task_pid_nr(current
), current
->comm
,
339 print_tainted(), init_utsname()->release
,
340 (int)strcspn(init_utsname()->version
, " "),
341 init_utsname()->version
);
343 printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
344 0xffff & regs
->cs
, regs
->ip
, regs
->flags
,
346 print_symbol("EIP is at %s\n", regs
->ip
);
348 printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
349 regs
->ax
, regs
->bx
, regs
->cx
, regs
->dx
);
350 printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
351 regs
->si
, regs
->di
, regs
->bp
, sp
);
352 printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
353 regs
->ds
& 0xffff, regs
->es
& 0xffff,
354 regs
->fs
& 0xffff, gs
, ss
);
362 cr4
= read_cr4_safe();
363 printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
370 printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
375 printk("DR6: %08lx DR7: %08lx\n",
379 void show_regs(struct pt_regs
*regs
)
381 __show_registers(regs
, 1);
382 show_trace(NULL
, regs
, ®s
->sp
);
386 * This gets run with %bx containing the
387 * function to call, and %dx containing
390 extern void kernel_thread_helper(void);
393 * Create a kernel thread
395 int kernel_thread(int (*fn
)(void *), void * arg
, unsigned long flags
)
399 memset(®s
, 0, sizeof(regs
));
401 regs
.bx
= (unsigned long) fn
;
402 regs
.dx
= (unsigned long) arg
;
406 regs
.fs
= __KERNEL_PERCPU
;
408 regs
.ip
= (unsigned long) kernel_thread_helper
;
409 regs
.cs
= __KERNEL_CS
| get_kernel_rpl();
410 regs
.flags
= X86_EFLAGS_IF
| X86_EFLAGS_SF
| X86_EFLAGS_PF
| 0x2;
412 /* Ok, create the new process.. */
413 return do_fork(flags
| CLONE_VM
| CLONE_UNTRACED
, 0, ®s
, 0, NULL
, NULL
);
415 EXPORT_SYMBOL(kernel_thread
);
418 * Free current thread data structures etc..
420 void exit_thread(void)
422 /* The process may have allocated an io port bitmap... nuke it. */
423 if (unlikely(test_thread_flag(TIF_IO_BITMAP
))) {
424 struct task_struct
*tsk
= current
;
425 struct thread_struct
*t
= &tsk
->thread
;
427 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
429 kfree(t
->io_bitmap_ptr
);
430 t
->io_bitmap_ptr
= NULL
;
431 clear_thread_flag(TIF_IO_BITMAP
);
433 * Careful, clear this in the TSS too:
435 memset(tss
->io_bitmap
, 0xff, tss
->io_bitmap_max
);
436 t
->io_bitmap_max
= 0;
437 tss
->io_bitmap_owner
= NULL
;
438 tss
->io_bitmap_max
= 0;
439 tss
->x86_tss
.io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
;
444 void flush_thread(void)
446 struct task_struct
*tsk
= current
;
448 memset(tsk
->thread
.debugreg
, 0, sizeof(unsigned long)*8);
449 memset(tsk
->thread
.tls_array
, 0, sizeof(tsk
->thread
.tls_array
));
450 clear_tsk_thread_flag(tsk
, TIF_DEBUG
);
452 * Forget coprocessor state..
458 void release_thread(struct task_struct
*dead_task
)
460 BUG_ON(dead_task
->mm
);
461 release_vm86_irqs(dead_task
);
465 * This gets called before we allocate a new thread and copy
466 * the current task into it.
468 void prepare_to_copy(struct task_struct
*tsk
)
473 int copy_thread(int nr
, unsigned long clone_flags
, unsigned long sp
,
474 unsigned long unused
,
475 struct task_struct
* p
, struct pt_regs
* regs
)
477 struct pt_regs
* childregs
;
478 struct task_struct
*tsk
;
481 childregs
= task_pt_regs(p
);
486 p
->thread
.esp
= (unsigned long) childregs
;
487 p
->thread
.esp0
= (unsigned long) (childregs
+1);
489 p
->thread
.eip
= (unsigned long) ret_from_fork
;
491 savesegment(gs
,p
->thread
.gs
);
494 if (unlikely(test_tsk_thread_flag(tsk
, TIF_IO_BITMAP
))) {
495 p
->thread
.io_bitmap_ptr
= kmemdup(tsk
->thread
.io_bitmap_ptr
,
496 IO_BITMAP_BYTES
, GFP_KERNEL
);
497 if (!p
->thread
.io_bitmap_ptr
) {
498 p
->thread
.io_bitmap_max
= 0;
501 set_tsk_thread_flag(p
, TIF_IO_BITMAP
);
507 * Set a new TLS for the child thread?
509 if (clone_flags
& CLONE_SETTLS
)
510 err
= do_set_thread_area(p
, -1,
511 (struct user_desc __user
*)childregs
->si
, 0);
513 if (err
&& p
->thread
.io_bitmap_ptr
) {
514 kfree(p
->thread
.io_bitmap_ptr
);
515 p
->thread
.io_bitmap_max
= 0;
521 * fill in the user structure for a core dump..
523 void dump_thread(struct pt_regs
* regs
, struct user
* dump
)
527 /* changed the size calculations - should hopefully work better. lbt */
528 dump
->magic
= CMAGIC
;
529 dump
->start_code
= 0;
530 dump
->start_stack
= regs
->sp
& ~(PAGE_SIZE
- 1);
531 dump
->u_tsize
= ((unsigned long) current
->mm
->end_code
) >> PAGE_SHIFT
;
532 dump
->u_dsize
= ((unsigned long) (current
->mm
->brk
+ (PAGE_SIZE
-1))) >> PAGE_SHIFT
;
533 dump
->u_dsize
-= dump
->u_tsize
;
535 for (i
= 0; i
< 8; i
++)
536 dump
->u_debugreg
[i
] = current
->thread
.debugreg
[i
];
538 if (dump
->start_stack
< TASK_SIZE
)
539 dump
->u_ssize
= ((unsigned long) (TASK_SIZE
- dump
->start_stack
)) >> PAGE_SHIFT
;
541 dump
->regs
.ebx
= regs
->bx
;
542 dump
->regs
.ecx
= regs
->cx
;
543 dump
->regs
.edx
= regs
->dx
;
544 dump
->regs
.esi
= regs
->si
;
545 dump
->regs
.edi
= regs
->di
;
546 dump
->regs
.ebp
= regs
->bp
;
547 dump
->regs
.eax
= regs
->ax
;
548 dump
->regs
.ds
= regs
->ds
;
549 dump
->regs
.es
= regs
->es
;
550 dump
->regs
.fs
= regs
->fs
;
551 savesegment(gs
,dump
->regs
.gs
);
552 dump
->regs
.orig_eax
= regs
->orig_ax
;
553 dump
->regs
.eip
= regs
->ip
;
554 dump
->regs
.cs
= regs
->cs
;
555 dump
->regs
.eflags
= regs
->flags
;
556 dump
->regs
.esp
= regs
->sp
;
557 dump
->regs
.ss
= regs
->ss
;
559 dump
->u_fpvalid
= dump_fpu (regs
, &dump
->i387
);
561 EXPORT_SYMBOL(dump_thread
);
564 * Capture the user space registers if the task is not running (in user space)
566 int dump_task_regs(struct task_struct
*tsk
, elf_gregset_t
*regs
)
568 struct pt_regs ptregs
= *task_pt_regs(tsk
);
574 elf_core_copy_regs(regs
, &ptregs
);
579 #ifdef CONFIG_SECCOMP
580 void hard_disable_TSC(void)
582 write_cr4(read_cr4() | X86_CR4_TSD
);
584 void disable_TSC(void)
587 if (!test_and_set_thread_flag(TIF_NOTSC
))
589 * Must flip the CPU state synchronously with
590 * TIF_NOTSC in the current running context.
595 void hard_enable_TSC(void)
597 write_cr4(read_cr4() & ~X86_CR4_TSD
);
599 #endif /* CONFIG_SECCOMP */
602 __switch_to_xtra(struct task_struct
*prev_p
, struct task_struct
*next_p
,
603 struct tss_struct
*tss
)
605 struct thread_struct
*prev
, *next
;
607 prev
= &prev_p
->thread
;
608 next
= &next_p
->thread
;
610 if (next
->debugctlmsr
!= prev
->debugctlmsr
)
611 wrmsr(MSR_IA32_DEBUGCTLMSR
, next
->debugctlmsr
, 0);
613 if (test_tsk_thread_flag(next_p
, TIF_DEBUG
)) {
614 set_debugreg(next
->debugreg
[0], 0);
615 set_debugreg(next
->debugreg
[1], 1);
616 set_debugreg(next
->debugreg
[2], 2);
617 set_debugreg(next
->debugreg
[3], 3);
619 set_debugreg(next
->debugreg
[6], 6);
620 set_debugreg(next
->debugreg
[7], 7);
623 #ifdef CONFIG_SECCOMP
624 if (test_tsk_thread_flag(prev_p
, TIF_NOTSC
) ^
625 test_tsk_thread_flag(next_p
, TIF_NOTSC
)) {
626 /* prev and next are different */
627 if (test_tsk_thread_flag(next_p
, TIF_NOTSC
))
634 if (!test_tsk_thread_flag(next_p
, TIF_IO_BITMAP
)) {
636 * Disable the bitmap via an invalid offset. We still cache
637 * the previous bitmap owner and the IO bitmap contents:
639 tss
->x86_tss
.io_bitmap_base
= INVALID_IO_BITMAP_OFFSET
;
643 if (likely(next
== tss
->io_bitmap_owner
)) {
645 * Previous owner of the bitmap (hence the bitmap content)
646 * matches the next task, we dont have to do anything but
647 * to set a valid offset in the TSS:
649 tss
->x86_tss
.io_bitmap_base
= IO_BITMAP_OFFSET
;
653 * Lazy TSS's I/O bitmap copy. We set an invalid offset here
654 * and we let the task to get a GPF in case an I/O instruction
655 * is performed. The handler of the GPF will verify that the
656 * faulting task has a valid I/O bitmap and, it true, does the
657 * real copy and restart the instruction. This will save us
658 * redundant copies when the currently switched task does not
659 * perform any I/O during its timeslice.
661 tss
->x86_tss
.io_bitmap_base
= INVALID_IO_BITMAP_OFFSET_LAZY
;
665 * switch_to(x,yn) should switch tasks from x to y.
667 * We fsave/fwait so that an exception goes off at the right time
668 * (as a call from the fsave or fwait in effect) rather than to
669 * the wrong process. Lazy FP saving no longer makes any sense
670 * with modern CPU's, and this simplifies a lot of things (SMP
671 * and UP become the same).
673 * NOTE! We used to use the x86 hardware context switching. The
674 * reason for not using it any more becomes apparent when you
675 * try to recover gracefully from saved state that is no longer
676 * valid (stale segment register values in particular). With the
677 * hardware task-switch, there is no way to fix up bad state in
678 * a reasonable manner.
680 * The fact that Intel documents the hardware task-switching to
681 * be slow is a fairly red herring - this code is not noticeably
682 * faster. However, there _is_ some room for improvement here,
683 * so the performance issues may eventually be a valid point.
684 * More important, however, is the fact that this allows us much
687 * The return value (in %ax) will be the "prev" task after
688 * the task-switch, and shows up in ret_from_fork in entry.S,
691 struct task_struct fastcall
* __switch_to(struct task_struct
*prev_p
, struct task_struct
*next_p
)
693 struct thread_struct
*prev
= &prev_p
->thread
,
694 *next
= &next_p
->thread
;
695 int cpu
= smp_processor_id();
696 struct tss_struct
*tss
= &per_cpu(init_tss
, cpu
);
698 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
700 __unlazy_fpu(prev_p
);
703 /* we're going to use this soon, after a few expensive things */
704 if (next_p
->fpu_counter
> 5)
705 prefetch(&next
->i387
.fxsave
);
710 load_esp0(tss
, next
);
713 * Save away %gs. No need to save %fs, as it was saved on the
714 * stack on entry. No need to save %es and %ds, as those are
715 * always kernel segments while inside the kernel. Doing this
716 * before setting the new TLS descriptors avoids the situation
717 * where we temporarily have non-reloadable segments in %fs
718 * and %gs. This could be an issue if the NMI handler ever
719 * used %fs or %gs (it does not today), or if the kernel is
720 * running inside of a hypervisor layer.
722 savesegment(gs
, prev
->gs
);
725 * Load the per-thread Thread-Local Storage descriptor.
730 * Restore IOPL if needed. In normal use, the flags restore
731 * in the switch assembly will handle this. But if the kernel
732 * is running virtualized at a non-zero CPL, the popf will
733 * not restore flags, so it must be done in a separate step.
735 if (get_kernel_rpl() && unlikely(prev
->iopl
!= next
->iopl
))
736 set_iopl_mask(next
->iopl
);
739 * Now maybe handle debug registers and/or IO bitmaps
741 if (unlikely(task_thread_info(prev_p
)->flags
& _TIF_WORK_CTXSW_PREV
||
742 task_thread_info(next_p
)->flags
& _TIF_WORK_CTXSW_NEXT
))
743 __switch_to_xtra(prev_p
, next_p
, tss
);
746 * Leave lazy mode, flushing any hypercalls made here.
747 * This must be done before restoring TLS segments so
748 * the GDT and LDT are properly updated, and must be
749 * done before math_state_restore, so the TS bit is up
752 arch_leave_lazy_cpu_mode();
754 /* If the task has used fpu the last 5 timeslices, just do a full
755 * restore of the math state immediately to avoid the trap; the
756 * chances of needing FPU soon are obviously high now
758 if (next_p
->fpu_counter
> 5)
759 math_state_restore();
762 * Restore %gs if needed (which is common)
764 if (prev
->gs
| next
->gs
)
765 loadsegment(gs
, next
->gs
);
767 x86_write_percpu(current_task
, next_p
);
772 asmlinkage
int sys_fork(struct pt_regs regs
)
774 return do_fork(SIGCHLD
, regs
.sp
, ®s
, 0, NULL
, NULL
);
777 asmlinkage
int sys_clone(struct pt_regs regs
)
779 unsigned long clone_flags
;
781 int __user
*parent_tidptr
, *child_tidptr
;
783 clone_flags
= regs
.bx
;
785 parent_tidptr
= (int __user
*)regs
.dx
;
786 child_tidptr
= (int __user
*)regs
.di
;
789 return do_fork(clone_flags
, newsp
, ®s
, 0, parent_tidptr
, child_tidptr
);
793 * This is trivial, and on the face of it looks like it
794 * could equally well be done in user mode.
796 * Not so, for quite unobvious reasons - register pressure.
797 * In user mode vfork() cannot have a stack frame, and if
798 * done by calling the "clone()" system call directly, you
799 * do not have enough call-clobbered registers to hold all
800 * the information you need.
802 asmlinkage
int sys_vfork(struct pt_regs regs
)
804 return do_fork(CLONE_VFORK
| CLONE_VM
| SIGCHLD
, regs
.sp
, ®s
, 0, NULL
, NULL
);
808 * sys_execve() executes a new program.
810 asmlinkage
int sys_execve(struct pt_regs regs
)
815 filename
= getname((char __user
*) regs
.bx
);
816 error
= PTR_ERR(filename
);
817 if (IS_ERR(filename
))
819 error
= do_execve(filename
,
820 (char __user
* __user
*) regs
.cx
,
821 (char __user
* __user
*) regs
.dx
,
824 /* Make sure we don't return using sysenter.. */
825 set_thread_flag(TIF_IRET
);
832 #define top_esp (THREAD_SIZE - sizeof(unsigned long))
833 #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
835 unsigned long get_wchan(struct task_struct
*p
)
837 unsigned long bp
, sp
, ip
;
838 unsigned long stack_page
;
840 if (!p
|| p
== current
|| p
->state
== TASK_RUNNING
)
842 stack_page
= (unsigned long)task_stack_page(p
);
844 if (!stack_page
|| sp
< stack_page
|| sp
> top_esp
+stack_page
)
846 /* include/asm-i386/system.h:switch_to() pushes bp last. */
847 bp
= *(unsigned long *) sp
;
849 if (bp
< stack_page
|| bp
> top_ebp
+stack_page
)
851 ip
= *(unsigned long *) (bp
+4);
852 if (!in_sched_functions(ip
))
854 bp
= *(unsigned long *) bp
;
855 } while (count
++ < 16);
859 unsigned long arch_align_stack(unsigned long sp
)
861 if (!(current
->personality
& ADDR_NO_RANDOMIZE
) && randomize_va_space
)
862 sp
-= get_random_int() % 8192;
866 unsigned long arch_randomize_brk(struct mm_struct
*mm
)
868 unsigned long range_end
= mm
->brk
+ 0x02000000;
869 return randomize_range(mm
->brk
, range_end
, 0) ? : mm
->brk
;