Merge commit '74ecdb5171c9f3673b9393b1a3dc6f3a65e93895'
[unleashed.git] / arch / x86 / kernel / ml / swtch.s
blobbc740c13c9353cd5784a5fd5748d4a68c7e3b6d7
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2018 Joyent, Inc.
31 * Process switching routines.
34 #include "assym.h"
36 #include <sys/asm_linkage.h>
37 #include <sys/asm_misc.h>
38 #include <sys/regset.h>
39 #include <sys/privregs.h>
40 #include <sys/stack.h>
41 #include <sys/segments.h>
42 #include <sys/psw.h>
45 * resume(thread_id_t t);
47 * a thread can only run on one processor at a time. there
48 * exists a window on MPs where the current thread on one
49 * processor is capable of being dispatched by another processor.
50 * some overlap between outgoing and incoming threads can happen
51 * when they are the same thread. in this case where the threads
52 * are the same, resume() on one processor will spin on the incoming
53 * thread until resume() on the other processor has finished with
54 * the outgoing thread.
56 * The MMU context changes when the resuming thread resides in a different
57 * process. Kernel threads are known by resume to reside in process 0.
58 * The MMU context, therefore, only changes when resuming a thread in
59 * a process different from curproc.
61 * resume_from_intr() is called when the thread being resumed was not
62 * passivated by resume (e.g. was interrupted). This means that the
63 * resume lock is already held and that a restore context is not needed.
64 * Also, the MMU context is not changed on the resume in this case.
66 * resume_from_zombie() is the same as resume except the calling thread
67 * is a zombie and must be put on the deathrow list after the CPU is
68 * off the stack.
72 #if LWP_PCB_FPU != 0
73 #error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
74 #endif /* LWP_PCB_FPU != 0 */
77 #if defined(__amd64)
80 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
82 * The stack frame must be created before the save of %rsp so that tracebacks
83 * of swtch()ed-out processes show the process as having last called swtch().
85 #define SAVE_REGS(thread_t, retaddr) \
86 movq %rbp, T_RBP(thread_t); \
87 movq %rbx, T_RBX(thread_t); \
88 movq %r12, T_R12(thread_t); \
89 movq %r13, T_R13(thread_t); \
90 movq %r14, T_R14(thread_t); \
91 movq %r15, T_R15(thread_t); \
92 pushq %rbp; \
93 movq %rsp, %rbp; \
94 movq %rsp, T_SP(thread_t); \
95 movq retaddr, T_PC(thread_t); \
96 movq %rdi, %r12; \
97 call __dtrace_probe___sched_off__cpu
100 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
102 * We load up %rsp from the label_t as part of the context switch, so
103 * we don't repeat that here.
105 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
106 * already has the effect of putting the stack back the way it was when
107 * we came in.
109 #define RESTORE_REGS(scratch_reg) \
110 movq %gs:CPU_THREAD, scratch_reg; \
111 movq T_RBP(scratch_reg), %rbp; \
112 movq T_RBX(scratch_reg), %rbx; \
113 movq T_R12(scratch_reg), %r12; \
114 movq T_R13(scratch_reg), %r13; \
115 movq T_R14(scratch_reg), %r14; \
116 movq T_R15(scratch_reg), %r15
119 * Get pointer to a thread's hat structure
121 #define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
122 movq T_PROCP(thread_t), hatp; \
123 movq P_AS(hatp), scratch_reg; \
124 movq A_HAT(scratch_reg), hatp
126 #define TSC_READ() \
127 call tsc_read; \
128 movq %rax, %r14;
131 * If we are resuming an interrupt thread, store a timestamp in the thread
132 * structure. If an interrupt occurs between tsc_read() and its subsequent
133 * store, the timestamp will be stale by the time it is stored. We can detect
134 * this by doing a compare-and-swap on the thread's timestamp, since any
135 * interrupt occurring in this window will put a new timestamp in the thread's
136 * t_intr_start field.
138 #define STORE_INTR_START(thread_t) \
139 testw $T_INTR_THREAD, T_FLAGS(thread_t); \
140 jz 1f; \
141 0: \
142 TSC_READ(); \
143 movq T_INTR_START(thread_t), %rax; \
144 cmpxchgq %r14, T_INTR_START(thread_t); \
145 jnz 0b; \
148 #elif defined (__i386)
151 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
153 * The stack frame must be created before the save of %esp so that tracebacks
154 * of swtch()ed-out processes show the process as having last called swtch().
156 #define SAVE_REGS(thread_t, retaddr) \
157 movl %ebp, T_EBP(thread_t); \
158 movl %ebx, T_EBX(thread_t); \
159 movl %esi, T_ESI(thread_t); \
160 movl %edi, T_EDI(thread_t); \
161 pushl %ebp; \
162 movl %esp, %ebp; \
163 movl %esp, T_SP(thread_t); \
164 movl retaddr, T_PC(thread_t); \
165 movl 8(%ebp), %edi; \
166 pushl %edi; \
167 call __dtrace_probe___sched_off__cpu; \
168 addl $CLONGSIZE, %esp
171 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
173 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
174 * already has the effect of putting the stack back the way it was when
175 * we came in.
177 #define RESTORE_REGS(scratch_reg) \
178 movl %gs:CPU_THREAD, scratch_reg; \
179 movl T_EBP(scratch_reg), %ebp; \
180 movl T_EBX(scratch_reg), %ebx; \
181 movl T_ESI(scratch_reg), %esi; \
182 movl T_EDI(scratch_reg), %edi
185 * Get pointer to a thread's hat structure
187 #define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
188 movl T_PROCP(thread_t), hatp; \
189 movl P_AS(hatp), scratch_reg; \
190 movl A_HAT(scratch_reg), hatp
193 * If we are resuming an interrupt thread, store a timestamp in the thread
194 * structure. If an interrupt occurs between tsc_read() and its subsequent
195 * store, the timestamp will be stale by the time it is stored. We can detect
196 * this by doing a compare-and-swap on the thread's timestamp, since any
197 * interrupt occurring in this window will put a new timestamp in the thread's
198 * t_intr_start field.
200 #define STORE_INTR_START(thread_t) \
201 testw $T_INTR_THREAD, T_FLAGS(thread_t); \
202 jz 1f; \
203 pushl %ecx; \
204 0: \
205 pushl T_INTR_START(thread_t); \
206 pushl T_INTR_START+4(thread_t); \
207 call tsc_read; \
208 movl %eax, %ebx; \
209 movl %edx, %ecx; \
210 popl %edx; \
211 popl %eax; \
212 cmpxchg8b T_INTR_START(thread_t); \
213 jnz 0b; \
214 popl %ecx; \
217 #endif /* __amd64 */
220 #if defined(__amd64)
222 .global kpti_enable
224 ENTRY(resume)
225 movq %gs:CPU_THREAD, %rax
226 leaq resume_return(%rip), %r11
229 * Deal with SMAP here. A thread may be switched out at any point while
230 * it is executing. The thread could be under on_fault() or it could be
231 * pre-empted while performing a copy interruption. If this happens and
232 * we're not in the context of an interrupt which happens to handle
233 * saving and restoring rflags correctly, we may lose our SMAP related
234 * state.
236 * To handle this, as part of being switched out, we first save whether
237 * or not userland access is allowed ($PS_ACHK in rflags) and store that
238 * in t_useracc on the kthread_t and unconditionally enable SMAP to
239 * protect the system.
241 * Later, when the thread finishes resuming, we potentially disable smap
242 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
243 * more information on rflags and SMAP.
245 pushfq
246 popq %rsi
247 andq $PS_ACHK, %rsi
248 movq %rsi, T_USERACC(%rax)
249 call smap_enable
252 * Save non-volatile registers, and set return address for current
253 * thread to resume_return.
255 * %r12 = t (new thread) when done
257 SAVE_REGS(%rax, %r11)
260 LOADCPU(%r15) /* %r15 = CPU */
261 movq CPU_THREAD(%r15), %r13 /* %r13 = curthread */
264 * Call savectx if thread has installed context ops.
266 * Note that if we have floating point context, the save op
267 * (either fpsave_begin or fpxsave_begin) will issue the
268 * async save instruction (fnsave or fxsave respectively)
269 * that we fwait for below.
271 cmpq $0, T_CTX(%r13) /* should current thread savectx? */
272 je .nosavectx /* skip call when zero */
274 movq %r13, %rdi /* arg = thread pointer */
275 call savectx /* call ctx ops */
276 .nosavectx:
279 * Call savepctx if process has installed context ops.
281 movq T_PROCP(%r13), %r14 /* %r14 = proc */
282 cmpq $0, P_PCTX(%r14) /* should current thread savectx? */
283 je .nosavepctx /* skip call when zero */
285 movq %r14, %rdi /* arg = proc pointer */
286 call savepctx /* call ctx ops */
287 .nosavepctx:
290 * Temporarily switch to the idle thread's stack
292 movq CPU_IDLE_THREAD(%r15), %rax /* idle thread pointer */
295 * Set the idle thread as the current thread
297 movq T_SP(%rax), %rsp /* It is safe to set rsp */
298 movq %rax, CPU_THREAD(%r15)
301 * Switch in the hat context for the new thread
304 GET_THREAD_HATP(%rdi, %r12, %r11)
305 call hat_switch
308 * Clear and unlock previous thread's t_lock
309 * to allow it to be dispatched by another processor.
311 movb $0, T_LOCK(%r13)
314 * IMPORTANT: Registers at this point must be:
315 * %r12 = new thread
317 * Here we are in the idle thread, have dropped the old thread.
319 ALTENTRY(_resume_from_idle)
321 * spin until dispatched thread's mutex has
322 * been unlocked. this mutex is unlocked when
323 * it becomes safe for the thread to run.
325 .lock_thread_mutex:
326 lock
327 btsl $0, T_LOCK(%r12) /* attempt to lock new thread's mutex */
328 jnc .thread_mutex_locked /* got it */
330 .spin_thread_mutex:
331 pause
332 cmpb $0, T_LOCK(%r12) /* check mutex status */
333 jz .lock_thread_mutex /* clear, retry lock */
334 jmp .spin_thread_mutex /* still locked, spin... */
336 .thread_mutex_locked:
338 * Fix CPU structure to indicate new running thread.
339 * Set pointer in new thread to the CPU structure.
341 LOADCPU(%r13) /* load current CPU pointer */
342 cmpq %r13, T_CPU(%r12)
343 je .setup_cpu
345 /* cp->cpu_stats.sys.cpumigrate++ */
346 incq CPU_STATS_SYS_CPUMIGRATE(%r13)
347 movq %r13, T_CPU(%r12) /* set new thread's CPU pointer */
349 .setup_cpu:
351 * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
352 * structure. If this thread doesn't have a regs structure above
353 * the stack -- that is, if lwp_stk_init() was never called for the
354 * thread -- this will set rsp0 to the wrong value, but it's harmless
355 * as it's a kernel thread, and it won't actually attempt to implicitly
356 * use the rsp0 via a privilege change.
358 * Note that when we have KPTI enabled on amd64, we never use this
359 * value at all (since all the interrupts have an IST set).
361 movq CPU_TSS(%r13), %r14
362 #if !defined(__xpv)
363 cmpq $1, kpti_enable
364 jne 1f
365 leaq CPU_KPTI_TR_RSP(%r13), %rax
366 jmp 2f
368 movq T_STACK(%r12), %rax
369 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
371 movq %rax, TSS_RSP0(%r14)
372 #else
373 movq T_STACK(%r12), %rax
374 addq $REGSIZE+MINFRAME, %rax /* to the bottom of thread stack */
375 movl $KDS_SEL, %edi
376 movq %rax, %rsi
377 call HYPERVISOR_stack_switch
378 #endif /* __xpv */
380 movq %r12, CPU_THREAD(%r13) /* set CPU's thread pointer */
381 mfence /* synchronize with mutex_exit() */
382 xorl %ebp, %ebp /* make $<threadlist behave better */
383 movq T_LWP(%r12), %rax /* set associated lwp to */
384 movq %rax, CPU_LWP(%r13) /* CPU's lwp ptr */
386 movq T_SP(%r12), %rsp /* switch to outgoing thread's stack */
387 movq T_PC(%r12), %r13 /* saved return addr */
390 * Call restorectx if context ops have been installed.
392 cmpq $0, T_CTX(%r12) /* should resumed thread restorectx? */
393 jz .norestorectx /* skip call when zero */
394 movq %r12, %rdi /* arg = thread pointer */
395 call restorectx /* call ctx ops */
396 .norestorectx:
399 * Call restorepctx if context ops have been installed for the proc.
401 movq T_PROCP(%r12), %rcx
402 cmpq $0, P_PCTX(%rcx)
403 jz .norestorepctx
404 movq %rcx, %rdi
405 call restorepctx
406 .norestorepctx:
408 STORE_INTR_START(%r12)
411 * If we came into swtch with the ability to access userland pages, go
412 * ahead and restore that fact by disabling SMAP. Clear the indicator
413 * flag out of paranoia.
415 movq T_USERACC(%r12), %rax /* should we disable smap? */
416 cmpq $0, %rax /* skip call when zero */
417 jz .nosmap
418 xorq %rax, %rax
419 movq %rax, T_USERACC(%r12)
420 call smap_disable
421 .nosmap:
424 * Restore non-volatile registers, then have spl0 return to the
425 * resuming thread's PC after first setting the priority as low as
426 * possible and blocking all interrupt threads that may be active.
428 movq %r13, %rax /* save return address */
429 RESTORE_REGS(%r11)
430 pushq %rax /* push return address for spl0() */
431 call __dtrace_probe___sched_on__cpu
432 jmp spl0
434 resume_return:
436 * Remove stack frame created in SAVE_REGS()
438 addq $CLONGSIZE, %rsp
440 SET_SIZE(_resume_from_idle)
441 SET_SIZE(resume)
443 #elif defined (__i386)
445 ENTRY(resume)
446 movl %gs:CPU_THREAD, %eax
447 movl $resume_return, %ecx
450 * Save non-volatile registers, and set return address for current
451 * thread to resume_return.
453 * %edi = t (new thread) when done.
455 SAVE_REGS(%eax, %ecx)
457 LOADCPU(%ebx) /* %ebx = CPU */
458 movl CPU_THREAD(%ebx), %esi /* %esi = curthread */
460 #ifdef DEBUG
461 call assert_ints_enabled /* panics if we are cli'd */
462 #endif
464 * Call savectx if thread has installed context ops.
466 * Note that if we have floating point context, the save op
467 * (either fpsave_begin or fpxsave_begin) will issue the
468 * async save instruction (fnsave or fxsave respectively)
469 * that we fwait for below.
471 movl T_CTX(%esi), %eax /* should current thread savectx? */
472 testl %eax, %eax
473 jz .nosavectx /* skip call when zero */
474 pushl %esi /* arg = thread pointer */
475 call savectx /* call ctx ops */
476 addl $4, %esp /* restore stack pointer */
477 .nosavectx:
480 * Call savepctx if process has installed context ops.
482 movl T_PROCP(%esi), %eax /* %eax = proc */
483 cmpl $0, P_PCTX(%eax) /* should current thread savectx? */
484 je .nosavepctx /* skip call when zero */
485 pushl %eax /* arg = proc pointer */
486 call savepctx /* call ctx ops */
487 addl $4, %esp
488 .nosavepctx:
491 * Temporarily switch to the idle thread's stack
493 movl CPU_IDLE_THREAD(%ebx), %eax /* idle thread pointer */
496 * Set the idle thread as the current thread
498 movl T_SP(%eax), %esp /* It is safe to set esp */
499 movl %eax, CPU_THREAD(%ebx)
501 /* switch in the hat context for the new thread */
502 GET_THREAD_HATP(%ecx, %edi, %ecx)
503 pushl %ecx
504 call hat_switch
505 addl $4, %esp
508 * Clear and unlock previous thread's t_lock
509 * to allow it to be dispatched by another processor.
511 movb $0, T_LOCK(%esi)
514 * IMPORTANT: Registers at this point must be:
515 * %edi = new thread
517 * Here we are in the idle thread, have dropped the old thread.
519 ALTENTRY(_resume_from_idle)
521 * spin until dispatched thread's mutex has
522 * been unlocked. this mutex is unlocked when
523 * it becomes safe for the thread to run.
525 .L4:
526 lock
527 btsl $0, T_LOCK(%edi) /* lock new thread's mutex */
528 jc .L4_2 /* lock did not succeed */
531 * Fix CPU structure to indicate new running thread.
532 * Set pointer in new thread to the CPU structure.
534 LOADCPU(%esi) /* load current CPU pointer */
535 movl T_STACK(%edi), %eax /* here to use v pipeline of */
536 /* Pentium. Used few lines below */
537 cmpl %esi, T_CPU(%edi)
538 jne .L5_2
539 .L5_1:
541 * Setup esp0 (kernel stack) in TSS to curthread's stack.
542 * (Note: Since we don't have saved 'regs' structure for all
543 * the threads we can't easily determine if we need to
544 * change esp0. So, we simply change the esp0 to bottom
545 * of the thread stack and it will work for all cases.)
547 movl CPU_TSS(%esi), %ecx
548 addl $REGSIZE+MINFRAME, %eax /* to the bottom of thread stack */
549 #if !defined(__xpv)
550 movl %eax, TSS_ESP0(%ecx)
551 #else
552 pushl %eax
553 pushl $KDS_SEL
554 call HYPERVISOR_stack_switch
555 addl $8, %esp
556 #endif /* __xpv */
558 movl %edi, CPU_THREAD(%esi) /* set CPU's thread pointer */
559 mfence /* synchronize with mutex_exit() */
560 xorl %ebp, %ebp /* make $<threadlist behave better */
561 movl T_LWP(%edi), %eax /* set associated lwp to */
562 movl %eax, CPU_LWP(%esi) /* CPU's lwp ptr */
564 movl T_SP(%edi), %esp /* switch to outgoing thread's stack */
565 movl T_PC(%edi), %esi /* saved return addr */
568 * Call restorectx if context ops have been installed.
570 movl T_CTX(%edi), %eax /* should resumed thread restorectx? */
571 testl %eax, %eax
572 jz .norestorectx /* skip call when zero */
573 pushl %edi /* arg = thread pointer */
574 call restorectx /* call ctx ops */
575 addl $4, %esp /* restore stack pointer */
576 .norestorectx:
579 * Call restorepctx if context ops have been installed for the proc.
581 movl T_PROCP(%edi), %eax
582 cmpl $0, P_PCTX(%eax)
583 je .norestorepctx
584 pushl %eax /* arg = proc pointer */
585 call restorepctx
586 addl $4, %esp /* restore stack pointer */
587 .norestorepctx:
589 STORE_INTR_START(%edi)
592 * Restore non-volatile registers, then have spl0 return to the
593 * resuming thread's PC after first setting the priority as low as
594 * possible and blocking all interrupt threads that may be active.
596 movl %esi, %eax /* save return address */
597 RESTORE_REGS(%ecx)
598 pushl %eax /* push return address for spl0() */
599 call __dtrace_probe___sched_on__cpu
600 jmp spl0
602 resume_return:
604 * Remove stack frame created in SAVE_REGS()
606 addl $CLONGSIZE, %esp
609 .L4_2:
610 pause
611 cmpb $0, T_LOCK(%edi)
612 je .L4
613 jmp .L4_2
615 .L5_2:
616 /* cp->cpu_stats.sys.cpumigrate++ */
617 addl $1, CPU_STATS_SYS_CPUMIGRATE(%esi)
618 adcl $0, CPU_STATS_SYS_CPUMIGRATE+4(%esi)
619 movl %esi, T_CPU(%edi) /* set new thread's CPU pointer */
620 jmp .L5_1
622 SET_SIZE(_resume_from_idle)
623 SET_SIZE(resume)
625 #endif /* __amd64 */
628 #if defined(__amd64)
630 ENTRY(resume_from_zombie)
631 movq %gs:CPU_THREAD, %rax
632 leaq resume_from_zombie_return(%rip), %r11
635 * Save non-volatile registers, and set return address for current
636 * thread to resume_from_zombie_return.
638 * %r12 = t (new thread) when done
640 SAVE_REGS(%rax, %r11)
642 movq %gs:CPU_THREAD, %r13 /* %r13 = curthread */
644 /* clean up the fp unit. It might be left enabled */
646 #if defined(__xpv) /* XXPV XXtclayton */
648 * Remove this after bringup.
649 * (Too many #gp's for an instrumented hypervisor.)
651 STTS(%rax)
652 #else
653 movq %cr0, %rax
654 testq $CR0_TS, %rax
655 jnz .zfpu_disabled /* if TS already set, nothing to do */
656 fninit /* init fpu & discard pending error */
657 orq $CR0_TS, %rax
658 movq %rax, %cr0
659 .zfpu_disabled:
661 #endif /* __xpv */
664 * Temporarily switch to the idle thread's stack so that the zombie
665 * thread's stack can be reclaimed by the reaper.
667 movq %gs:CPU_IDLE_THREAD, %rax /* idle thread pointer */
668 movq T_SP(%rax), %rsp /* get onto idle thread stack */
671 * Sigh. If the idle thread has never run thread_start()
672 * then t_sp is mis-aligned by thread_load().
674 andq $_BITNOT(STACK_ALIGN-1), %rsp
677 * Set the idle thread as the current thread.
679 movq %rax, %gs:CPU_THREAD
681 /* switch in the hat context for the new thread */
682 GET_THREAD_HATP(%rdi, %r12, %r11)
683 call hat_switch
686 * Put the zombie on death-row.
688 movq %r13, %rdi
689 call reapq_add
691 jmp _resume_from_idle /* finish job of resume */
693 resume_from_zombie_return:
694 RESTORE_REGS(%r11) /* restore non-volatile registers */
695 call __dtrace_probe___sched_on__cpu
698 * Remove stack frame created in SAVE_REGS()
700 addq $CLONGSIZE, %rsp
702 SET_SIZE(resume_from_zombie)
704 #elif defined (__i386)
706 ENTRY(resume_from_zombie)
707 movl %gs:CPU_THREAD, %eax
708 movl $resume_from_zombie_return, %ecx
711 * Save non-volatile registers, and set return address for current
712 * thread to resume_from_zombie_return.
714 * %edi = t (new thread) when done.
716 SAVE_REGS(%eax, %ecx)
718 #ifdef DEBUG
719 call assert_ints_enabled /* panics if we are cli'd */
720 #endif
721 movl %gs:CPU_THREAD, %esi /* %esi = curthread */
723 /* clean up the fp unit. It might be left enabled */
725 movl %cr0, %eax
726 testl $CR0_TS, %eax
727 jnz .zfpu_disabled /* if TS already set, nothing to do */
728 fninit /* init fpu & discard pending error */
729 orl $CR0_TS, %eax
730 movl %eax, %cr0
731 .zfpu_disabled:
734 * Temporarily switch to the idle thread's stack so that the zombie
735 * thread's stack can be reclaimed by the reaper.
737 movl %gs:CPU_IDLE_THREAD, %eax /* idle thread pointer */
738 movl T_SP(%eax), %esp /* get onto idle thread stack */
741 * Set the idle thread as the current thread.
743 movl %eax, %gs:CPU_THREAD
746 * switch in the hat context for the new thread
748 GET_THREAD_HATP(%ecx, %edi, %ecx)
749 pushl %ecx
750 call hat_switch
751 addl $4, %esp
754 * Put the zombie on death-row.
756 pushl %esi
757 call reapq_add
758 addl $4, %esp
759 jmp _resume_from_idle /* finish job of resume */
761 resume_from_zombie_return:
762 RESTORE_REGS(%ecx) /* restore non-volatile registers */
763 call __dtrace_probe___sched_on__cpu
766 * Remove stack frame created in SAVE_REGS()
768 addl $CLONGSIZE, %esp
770 SET_SIZE(resume_from_zombie)
772 #endif /* __amd64 */
775 #if defined(__amd64)
777 ENTRY(resume_from_intr)
778 movq %gs:CPU_THREAD, %rax
779 leaq resume_from_intr_return(%rip), %r11
782 * Save non-volatile registers, and set return address for current
783 * thread to resume_from_intr_return.
785 * %r12 = t (new thread) when done
787 SAVE_REGS(%rax, %r11)
789 movq %gs:CPU_THREAD, %r13 /* %r13 = curthread */
790 movq %r12, %gs:CPU_THREAD /* set CPU's thread pointer */
791 mfence /* synchronize with mutex_exit() */
792 movq T_SP(%r12), %rsp /* restore resuming thread's sp */
793 xorl %ebp, %ebp /* make $<threadlist behave better */
796 * Unlock outgoing thread's mutex dispatched by another processor.
798 xorl %eax, %eax
799 xchgb %al, T_LOCK(%r13)
801 STORE_INTR_START(%r12)
804 * Restore non-volatile registers, then have spl0 return to the
805 * resuming thread's PC after first setting the priority as low as
806 * possible and blocking all interrupt threads that may be active.
808 movq T_PC(%r12), %rax /* saved return addr */
809 RESTORE_REGS(%r11);
810 pushq %rax /* push return address for spl0() */
811 call __dtrace_probe___sched_on__cpu
812 jmp spl0
814 resume_from_intr_return:
816 * Remove stack frame created in SAVE_REGS()
818 addq $CLONGSIZE, %rsp
820 SET_SIZE(resume_from_intr)
822 #elif defined (__i386)
824 ENTRY(resume_from_intr)
825 movl %gs:CPU_THREAD, %eax
826 movl $resume_from_intr_return, %ecx
829 * Save non-volatile registers, and set return address for current
830 * thread to resume_return.
832 * %edi = t (new thread) when done.
834 SAVE_REGS(%eax, %ecx)
836 #ifdef DEBUG
837 call assert_ints_enabled /* panics if we are cli'd */
838 #endif
839 movl %gs:CPU_THREAD, %esi /* %esi = curthread */
840 movl %edi, %gs:CPU_THREAD /* set CPU's thread pointer */
841 mfence /* synchronize with mutex_exit() */
842 movl T_SP(%edi), %esp /* restore resuming thread's sp */
843 xorl %ebp, %ebp /* make $<threadlist behave better */
846 * Unlock outgoing thread's mutex dispatched by another processor.
848 xorl %eax,%eax
849 xchgb %al, T_LOCK(%esi)
851 STORE_INTR_START(%edi)
854 * Restore non-volatile registers, then have spl0 return to the
855 * resuming thread's PC after first setting the priority as low as
856 * possible and blocking all interrupt threads that may be active.
858 movl T_PC(%edi), %eax /* saved return addr */
859 RESTORE_REGS(%ecx)
860 pushl %eax /* push return address for spl0() */
861 call __dtrace_probe___sched_on__cpu
862 jmp spl0
864 resume_from_intr_return:
866 * Remove stack frame created in SAVE_REGS()
868 addl $CLONGSIZE, %esp
870 SET_SIZE(resume_from_intr)
872 #endif /* __amd64 */
875 #if defined(__amd64)
877 ENTRY(thread_start)
878 popq %rax /* start() */
879 popq %rdi /* arg */
880 popq %rsi /* len */
881 movq %rsp, %rbp
882 call *%rax
883 call thread_exit /* destroy thread if it returns. */
884 /*NOTREACHED*/
885 SET_SIZE(thread_start)
887 #elif defined(__i386)
889 ENTRY(thread_start)
890 popl %eax
891 movl %esp, %ebp
892 addl $8, %ebp
893 call *%eax
894 addl $8, %esp
895 call thread_exit /* destroy thread if it returns. */
896 /*NOTREACHED*/
897 SET_SIZE(thread_start)
899 #endif /* __i386 */