4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2007 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 * Copyright (c) 2018 Joyent, Inc.
31 * Process switching routines.
36 #include <sys/asm_linkage.h>
37 #include <sys/asm_misc.h>
38 #include <sys/regset.h>
39 #include <sys/privregs.h>
40 #include <sys/stack.h>
41 #include <sys/segments.h>
45 * resume(thread_id_t t);
47 * a thread can only run on one processor at a time. there
48 * exists a window on MPs where the current thread on one
49 * processor is capable of being dispatched by another processor.
50 * some overlap between outgoing and incoming threads can happen
51 * when they are the same thread. in this case where the threads
52 * are the same, resume() on one processor will spin on the incoming
53 * thread until resume() on the other processor has finished with
54 * the outgoing thread.
56 * The MMU context changes when the resuming thread resides in a different
57 * process. Kernel threads are known by resume to reside in process 0.
58 * The MMU context, therefore, only changes when resuming a thread in
59 * a process different from curproc.
61 * resume_from_intr() is called when the thread being resumed was not
62 * passivated by resume (e.g. was interrupted). This means that the
63 * resume lock is already held and that a restore context is not needed.
64 * Also, the MMU context is not changed on the resume in this case.
66 * resume_from_zombie() is the same as resume except the calling thread
67 * is a zombie and must be put on the deathrow list after the CPU is
73 #error LWP_PCB_FPU MUST be defined as 0 for code in swtch.s to work
74 #endif /* LWP_PCB_FPU != 0 */
80 * Save non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
82 * The stack frame must be created before the save of %rsp so that tracebacks
83 * of swtch()ed-out processes show the process as having last called swtch().
85 #define SAVE_REGS(thread_t, retaddr) \
86 movq
%rbp
, T_RBP
(thread_t
); \
87 movq
%rbx
, T_RBX
(thread_t
); \
88 movq
%r12, T_R12
(thread_t
); \
89 movq
%r13, T_R13
(thread_t
); \
90 movq
%r14, T_R14
(thread_t
); \
91 movq
%r15, T_R15
(thread_t
); \
94 movq
%rsp
, T_SP
(thread_t
); \
95 movq retaddr
, T_PC
(thread_t
); \
97 call __dtrace_probe___sched_off__cpu
100 * Restore non-volatile regs other than %rsp (%rbx, %rbp, and %r12 - %r15)
102 * We load up %rsp from the label_t as part of the context switch, so
103 * we don't repeat that here.
105 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
106 * already has the effect of putting the stack back the way it was when
109 #define RESTORE_REGS(scratch_reg) \
110 movq
%gs
:CPU_THREAD
, scratch_reg; \
111 movq T_RBP
(scratch_reg
), %rbp; \
112 movq T_RBX
(scratch_reg
), %rbx; \
113 movq T_R12
(scratch_reg
), %r12; \
114 movq T_R13
(scratch_reg
), %r13; \
115 movq T_R14
(scratch_reg
), %r14; \
116 movq T_R15
(scratch_reg
), %r15
119 * Get pointer to a thread's hat structure
121 #define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
122 movq T_PROCP
(thread_t
), hatp; \
123 movq P_AS
(hatp
), scratch_reg; \
124 movq A_HAT
(scratch_reg
), hatp
131 * If we are resuming an interrupt thread, store a timestamp in the thread
132 * structure. If an interrupt occurs between tsc_read() and its subsequent
133 * store, the timestamp will be stale by the time it is stored. We can detect
134 * this by doing a compare-and-swap on the thread's timestamp, since any
135 * interrupt occurring in this window will put a new timestamp in the thread's
136 * t_intr_start field.
138 #define STORE_INTR_START(thread_t) \
139 testw $T_INTR_THREAD
, T_FLAGS
(thread_t
); \
143 movq T_INTR_START
(thread_t
), %rax; \
144 cmpxchgq
%r14, T_INTR_START
(thread_t
); \
148 #elif defined (__i386)
151 * Save non-volatile registers (%ebp, %esi, %edi and %ebx)
153 * The stack frame must be created before the save of %esp so that tracebacks
154 * of swtch()ed-out processes show the process as having last called swtch().
156 #define SAVE_REGS(thread_t, retaddr) \
157 movl
%ebp
, T_EBP
(thread_t
); \
158 movl
%ebx
, T_EBX
(thread_t
); \
159 movl
%esi
, T_ESI
(thread_t
); \
160 movl
%edi
, T_EDI
(thread_t
); \
163 movl
%esp
, T_SP
(thread_t
); \
164 movl retaddr
, T_PC
(thread_t
); \
165 movl
8(%ebp
), %edi; \
167 call __dtrace_probe___sched_off__cpu; \
168 addl $CLONGSIZE
, %esp
171 * Restore non-volatile registers (%ebp, %esi, %edi and %ebx)
173 * We don't do a 'leave,' because reloading %rsp/%rbp from the label_t
174 * already has the effect of putting the stack back the way it was when
177 #define RESTORE_REGS(scratch_reg) \
178 movl
%gs
:CPU_THREAD
, scratch_reg; \
179 movl T_EBP
(scratch_reg
), %ebp; \
180 movl T_EBX
(scratch_reg
), %ebx; \
181 movl T_ESI
(scratch_reg
), %esi; \
182 movl T_EDI
(scratch_reg
), %edi
185 * Get pointer to a thread's hat structure
187 #define GET_THREAD_HATP(hatp, thread_t, scratch_reg) \
188 movl T_PROCP
(thread_t
), hatp; \
189 movl P_AS
(hatp
), scratch_reg; \
190 movl A_HAT
(scratch_reg
), hatp
193 * If we are resuming an interrupt thread, store a timestamp in the thread
194 * structure. If an interrupt occurs between tsc_read() and its subsequent
195 * store, the timestamp will be stale by the time it is stored. We can detect
196 * this by doing a compare-and-swap on the thread's timestamp, since any
197 * interrupt occurring in this window will put a new timestamp in the thread's
198 * t_intr_start field.
200 #define STORE_INTR_START(thread_t) \
201 testw $T_INTR_THREAD
, T_FLAGS
(thread_t
); \
205 pushl T_INTR_START
(thread_t
); \
206 pushl T_INTR_START+
4(thread_t
); \
212 cmpxchg8b T_INTR_START
(thread_t
); \
225 movq
%gs
:CPU_THREAD
, %rax
226 leaq resume_return
(%rip
), %r11
229 * Deal with SMAP here. A thread may be switched out at any point while
230 * it is executing. The thread could be under on_fault() or it could be
231 * pre-empted while performing a copy interruption. If this happens and
232 * we're not in the context of an interrupt which happens to handle
233 * saving and restoring rflags correctly, we may lose our SMAP related
236 * To handle this, as part of being switched out, we first save whether
237 * or not userland access is allowed ($PS_ACHK in rflags) and store that
238 * in t_useracc on the kthread_t and unconditionally enable SMAP to
239 * protect the system.
241 * Later, when the thread finishes resuming, we potentially disable smap
242 * if PS_ACHK was present in rflags. See uts/intel/ia32/ml/copy.s for
243 * more information on rflags and SMAP.
248 movq
%rsi
, T_USERACC
(%rax
)
252 * Save non-volatile registers, and set return address for current
253 * thread to resume_return.
255 * %r12 = t (new thread) when done
257 SAVE_REGS
(%rax
, %r11)
260 LOADCPU
(%r15) /* %r15 = CPU */
261 movq CPU_THREAD
(%r15), %r13 /* %r13 = curthread */
264 * Call savectx if thread has installed context ops.
266 * Note that if we have floating point context, the save op
267 * (either fpsave_begin or fpxsave_begin) will issue the
268 * async save instruction (fnsave or fxsave respectively)
269 * that we fwait for below.
271 cmpq $
0, T_CTX
(%r13) /* should current thread savectx? */
272 je
.nosavectx /* skip call when zero */
274 movq
%r13, %rdi
/* arg = thread pointer */
275 call savectx
/* call ctx ops */
279 * Call savepctx if process has installed context ops.
281 movq T_PROCP
(%r13), %r14 /* %r14 = proc */
282 cmpq $
0, P_PCTX
(%r14) /* should current thread savectx? */
283 je
.nosavepctx /* skip call when zero */
285 movq
%r14, %rdi
/* arg = proc pointer */
286 call savepctx
/* call ctx ops */
290 * Temporarily switch to the idle thread's stack
292 movq CPU_IDLE_THREAD
(%r15), %rax
/* idle thread pointer */
295 * Set the idle thread as the current thread
297 movq T_SP
(%rax
), %rsp
/* It is safe to set rsp */
298 movq
%rax
, CPU_THREAD
(%r15)
301 * Switch in the hat context for the new thread
304 GET_THREAD_HATP
(%rdi
, %r12, %r11)
308 * Clear and unlock previous thread's t_lock
309 * to allow it to be dispatched by another processor.
311 movb $
0, T_LOCK
(%r13)
314 * IMPORTANT: Registers at this point must be:
317 * Here we are in the idle thread, have dropped the old thread.
319 ALTENTRY
(_resume_from_idle
)
321 * spin until dispatched thread's mutex has
322 * been unlocked. this mutex is unlocked when
323 * it becomes safe for the thread to run.
327 btsl $
0, T_LOCK
(%r12) /* attempt to lock new thread's mutex */
328 jnc
.thread_mutex_locked /* got it */
332 cmpb $
0, T_LOCK
(%r12) /* check mutex status */
333 jz
.lock_thread_mutex /* clear, retry lock */
334 jmp
.spin_thread_mutex /* still locked, spin... */
336 .thread_mutex_locked:
338 * Fix CPU structure to indicate new running thread.
339 * Set pointer in new thread to the CPU structure.
341 LOADCPU
(%r13) /* load current CPU pointer */
342 cmpq
%r13, T_CPU
(%r12)
345 /* cp->cpu_stats.sys.cpumigrate++ */
346 incq CPU_STATS_SYS_CPUMIGRATE
(%r13)
347 movq
%r13, T_CPU
(%r12) /* set new thread's CPU pointer */
351 * Setup rsp0 (kernel stack) in TSS to curthread's saved regs
352 * structure. If this thread doesn't have a regs structure above
353 * the stack -- that is, if lwp_stk_init() was never called for the
354 * thread -- this will set rsp0 to the wrong value, but it's harmless
355 * as it's a kernel thread, and it won't actually attempt to implicitly
356 * use the rsp0 via a privilege change.
358 * Note that when we have KPTI enabled on amd64, we never use this
359 * value at all (since all the interrupts have an IST set).
361 movq CPU_TSS
(%r13), %r14
365 leaq CPU_KPTI_TR_RSP
(%r13), %rax
368 movq T_STACK
(%r12), %rax
369 addq $REGSIZE+MINFRAME
, %rax
/* to the bottom of thread stack */
371 movq
%rax
, TSS_RSP0
(%r14)
373 movq T_STACK
(%r12), %rax
374 addq $REGSIZE+MINFRAME
, %rax
/* to the bottom of thread stack */
377 call HYPERVISOR_stack_switch
380 movq
%r12, CPU_THREAD
(%r13) /* set CPU's thread pointer */
381 mfence
/* synchronize with mutex_exit() */
382 xorl
%ebp
, %ebp
/* make $<threadlist behave better */
383 movq T_LWP
(%r12), %rax
/* set associated lwp to */
384 movq
%rax
, CPU_LWP
(%r13) /* CPU's lwp ptr */
386 movq T_SP
(%r12), %rsp
/* switch to outgoing thread's stack */
387 movq T_PC
(%r12), %r13 /* saved return addr */
390 * Call restorectx if context ops have been installed.
392 cmpq $
0, T_CTX
(%r12) /* should resumed thread restorectx? */
393 jz
.norestorectx /* skip call when zero */
394 movq
%r12, %rdi
/* arg = thread pointer */
395 call restorectx
/* call ctx ops */
399 * Call restorepctx if context ops have been installed for the proc.
401 movq T_PROCP
(%r12), %rcx
402 cmpq $
0, P_PCTX
(%rcx
)
408 STORE_INTR_START
(%r12)
411 * If we came into swtch with the ability to access userland pages, go
412 * ahead and restore that fact by disabling SMAP. Clear the indicator
413 * flag out of paranoia.
415 movq T_USERACC
(%r12), %rax
/* should we disable smap? */
416 cmpq $
0, %rax
/* skip call when zero */
419 movq
%rax
, T_USERACC
(%r12)
424 * Restore non-volatile registers, then have spl0 return to the
425 * resuming thread's PC after first setting the priority as low as
426 * possible and blocking all interrupt threads that may be active.
428 movq
%r13, %rax
/* save return address */
430 pushq
%rax
/* push return address for spl0() */
431 call __dtrace_probe___sched_on__cpu
436 * Remove stack frame created in SAVE_REGS()
438 addq $CLONGSIZE
, %rsp
440 SET_SIZE
(_resume_from_idle
)
443 #elif defined (__i386)
446 movl
%gs
:CPU_THREAD
, %eax
447 movl $resume_return
, %ecx
450 * Save non-volatile registers, and set return address for current
451 * thread to resume_return.
453 * %edi = t (new thread) when done.
455 SAVE_REGS
(%eax
, %ecx
)
457 LOADCPU
(%ebx
) /* %ebx = CPU */
458 movl CPU_THREAD
(%ebx
), %esi
/* %esi = curthread */
461 call assert_ints_enabled
/* panics if we are cli'd */
464 * Call savectx if thread has installed context ops.
466 * Note that if we have floating point context, the save op
467 * (either fpsave_begin or fpxsave_begin) will issue the
468 * async save instruction (fnsave or fxsave respectively)
469 * that we fwait for below.
471 movl T_CTX
(%esi
), %eax
/* should current thread savectx? */
473 jz
.nosavectx /* skip call when zero */
474 pushl
%esi
/* arg = thread pointer */
475 call savectx
/* call ctx ops */
476 addl $
4, %esp
/* restore stack pointer */
480 * Call savepctx if process has installed context ops.
482 movl T_PROCP
(%esi
), %eax
/* %eax = proc */
483 cmpl $
0, P_PCTX
(%eax
) /* should current thread savectx? */
484 je
.nosavepctx /* skip call when zero */
485 pushl
%eax
/* arg = proc pointer */
486 call savepctx
/* call ctx ops */
491 * Temporarily switch to the idle thread's stack
493 movl CPU_IDLE_THREAD
(%ebx
), %eax
/* idle thread pointer */
496 * Set the idle thread as the current thread
498 movl T_SP
(%eax
), %esp
/* It is safe to set esp */
499 movl
%eax
, CPU_THREAD
(%ebx
)
501 /* switch in the hat context for the new thread */
502 GET_THREAD_HATP
(%ecx
, %edi
, %ecx
)
508 * Clear and unlock previous thread's t_lock
509 * to allow it to be dispatched by another processor.
511 movb $
0, T_LOCK
(%esi
)
514 * IMPORTANT: Registers at this point must be:
517 * Here we are in the idle thread, have dropped the old thread.
519 ALTENTRY
(_resume_from_idle
)
521 * spin until dispatched thread's mutex has
522 * been unlocked. this mutex is unlocked when
523 * it becomes safe for the thread to run.
527 btsl $
0, T_LOCK
(%edi
) /* lock new thread's mutex */
528 jc
.L4_2 /* lock did not succeed */
531 * Fix CPU structure to indicate new running thread.
532 * Set pointer in new thread to the CPU structure.
534 LOADCPU
(%esi
) /* load current CPU pointer */
535 movl T_STACK
(%edi
), %eax
/* here to use v pipeline of */
536 /* Pentium. Used few lines below */
537 cmpl %esi
, T_CPU
(%edi
)
541 * Setup esp0 (kernel stack) in TSS to curthread's stack.
542 * (Note: Since we don't have saved 'regs' structure for all
543 * the threads we can't easily determine if we need to
544 * change esp0. So, we simply change the esp0 to bottom
545 * of the thread stack and it will work for all cases.)
547 movl CPU_TSS
(%esi
), %ecx
548 addl $REGSIZE+MINFRAME
, %eax
/* to the bottom of thread stack */
550 movl
%eax
, TSS_ESP0
(%ecx
)
554 call HYPERVISOR_stack_switch
558 movl
%edi
, CPU_THREAD
(%esi
) /* set CPU's thread pointer */
559 mfence
/* synchronize with mutex_exit() */
560 xorl
%ebp
, %ebp
/* make $<threadlist behave better */
561 movl T_LWP
(%edi
), %eax
/* set associated lwp to */
562 movl
%eax
, CPU_LWP
(%esi
) /* CPU's lwp ptr */
564 movl T_SP
(%edi
), %esp
/* switch to outgoing thread's stack */
565 movl T_PC
(%edi
), %esi
/* saved return addr */
568 * Call restorectx if context ops have been installed.
570 movl T_CTX
(%edi
), %eax
/* should resumed thread restorectx? */
572 jz
.norestorectx /* skip call when zero */
573 pushl
%edi
/* arg = thread pointer */
574 call restorectx
/* call ctx ops */
575 addl $
4, %esp
/* restore stack pointer */
579 * Call restorepctx if context ops have been installed for the proc.
581 movl T_PROCP
(%edi
), %eax
582 cmpl $
0, P_PCTX
(%eax
)
584 pushl
%eax
/* arg = proc pointer */
586 addl $
4, %esp
/* restore stack pointer */
589 STORE_INTR_START
(%edi
)
592 * Restore non-volatile registers, then have spl0 return to the
593 * resuming thread's PC after first setting the priority as low as
594 * possible and blocking all interrupt threads that may be active.
596 movl
%esi
, %eax
/* save return address */
598 pushl
%eax
/* push return address for spl0() */
599 call __dtrace_probe___sched_on__cpu
604 * Remove stack frame created in SAVE_REGS()
606 addl $CLONGSIZE
, %esp
611 cmpb $
0, T_LOCK
(%edi
)
616 /* cp->cpu_stats.sys.cpumigrate++ */
617 addl $
1, CPU_STATS_SYS_CPUMIGRATE
(%esi
)
618 adcl $
0, CPU_STATS_SYS_CPUMIGRATE+
4(%esi
)
619 movl
%esi
, T_CPU
(%edi
) /* set new thread's CPU pointer */
622 SET_SIZE
(_resume_from_idle
)
630 ENTRY
(resume_from_zombie
)
631 movq
%gs
:CPU_THREAD
, %rax
632 leaq resume_from_zombie_return
(%rip
), %r11
635 * Save non-volatile registers, and set return address for current
636 * thread to resume_from_zombie_return.
638 * %r12 = t (new thread) when done
640 SAVE_REGS
(%rax
, %r11)
642 movq
%gs
:CPU_THREAD
, %r13 /* %r13 = curthread */
644 /* clean up the fp unit. It might be left enabled */
646 #if defined(__xpv) /* XXPV XXtclayton */
648 * Remove this after bringup.
649 * (Too many #gp's for an instrumented hypervisor.)
655 jnz
.zfpu_disabled /* if TS already set, nothing to do */
656 fninit
/* init fpu & discard pending error */
664 * Temporarily switch to the idle thread's stack so that the zombie
665 * thread's stack can be reclaimed by the reaper.
667 movq
%gs
:CPU_IDLE_THREAD
, %rax
/* idle thread pointer */
668 movq T_SP
(%rax
), %rsp
/* get onto idle thread stack */
671 * Sigh. If the idle thread has never run thread_start()
672 * then t_sp is mis-aligned by thread_load().
674 andq $_BITNOT
(STACK_ALIGN-
1), %rsp
677 * Set the idle thread as the current thread.
679 movq
%rax
, %gs
:CPU_THREAD
681 /* switch in the hat context for the new thread */
682 GET_THREAD_HATP
(%rdi
, %r12, %r11)
686 * Put the zombie on death-row.
691 jmp _resume_from_idle
/* finish job of resume */
693 resume_from_zombie_return
:
694 RESTORE_REGS
(%r11) /* restore non-volatile registers */
695 call __dtrace_probe___sched_on__cpu
698 * Remove stack frame created in SAVE_REGS()
700 addq $CLONGSIZE
, %rsp
702 SET_SIZE
(resume_from_zombie
)
704 #elif defined (__i386)
706 ENTRY
(resume_from_zombie
)
707 movl
%gs
:CPU_THREAD
, %eax
708 movl $resume_from_zombie_return
, %ecx
711 * Save non-volatile registers, and set return address for current
712 * thread to resume_from_zombie_return.
714 * %edi = t (new thread) when done.
716 SAVE_REGS
(%eax
, %ecx
)
719 call assert_ints_enabled
/* panics if we are cli'd */
721 movl
%gs
:CPU_THREAD
, %esi
/* %esi = curthread */
723 /* clean up the fp unit. It might be left enabled */
727 jnz
.zfpu_disabled /* if TS already set, nothing to do */
728 fninit
/* init fpu & discard pending error */
734 * Temporarily switch to the idle thread's stack so that the zombie
735 * thread's stack can be reclaimed by the reaper.
737 movl
%gs
:CPU_IDLE_THREAD
, %eax
/* idle thread pointer */
738 movl T_SP
(%eax
), %esp
/* get onto idle thread stack */
741 * Set the idle thread as the current thread.
743 movl
%eax
, %gs
:CPU_THREAD
746 * switch in the hat context for the new thread
748 GET_THREAD_HATP
(%ecx
, %edi
, %ecx
)
754 * Put the zombie on death-row.
759 jmp _resume_from_idle
/* finish job of resume */
761 resume_from_zombie_return
:
762 RESTORE_REGS
(%ecx
) /* restore non-volatile registers */
763 call __dtrace_probe___sched_on__cpu
766 * Remove stack frame created in SAVE_REGS()
768 addl $CLONGSIZE
, %esp
770 SET_SIZE
(resume_from_zombie
)
777 ENTRY
(resume_from_intr
)
778 movq
%gs
:CPU_THREAD
, %rax
779 leaq resume_from_intr_return
(%rip
), %r11
782 * Save non-volatile registers, and set return address for current
783 * thread to resume_from_intr_return.
785 * %r12 = t (new thread) when done
787 SAVE_REGS
(%rax
, %r11)
789 movq
%gs
:CPU_THREAD
, %r13 /* %r13 = curthread */
790 movq
%r12, %gs
:CPU_THREAD
/* set CPU's thread pointer */
791 mfence
/* synchronize with mutex_exit() */
792 movq T_SP
(%r12), %rsp
/* restore resuming thread's sp */
793 xorl
%ebp
, %ebp
/* make $<threadlist behave better */
796 * Unlock outgoing thread's mutex dispatched by another processor.
799 xchgb
%al
, T_LOCK
(%r13)
801 STORE_INTR_START
(%r12)
804 * Restore non-volatile registers, then have spl0 return to the
805 * resuming thread's PC after first setting the priority as low as
806 * possible and blocking all interrupt threads that may be active.
808 movq T_PC
(%r12), %rax
/* saved return addr */
810 pushq
%rax
/* push return address for spl0() */
811 call __dtrace_probe___sched_on__cpu
814 resume_from_intr_return
:
816 * Remove stack frame created in SAVE_REGS()
818 addq $CLONGSIZE
, %rsp
820 SET_SIZE
(resume_from_intr
)
822 #elif defined (__i386)
824 ENTRY
(resume_from_intr
)
825 movl
%gs
:CPU_THREAD
, %eax
826 movl $resume_from_intr_return
, %ecx
829 * Save non-volatile registers, and set return address for current
830 * thread to resume_return.
832 * %edi = t (new thread) when done.
834 SAVE_REGS
(%eax
, %ecx
)
837 call assert_ints_enabled
/* panics if we are cli'd */
839 movl
%gs
:CPU_THREAD
, %esi
/* %esi = curthread */
840 movl
%edi
, %gs
:CPU_THREAD
/* set CPU's thread pointer */
841 mfence
/* synchronize with mutex_exit() */
842 movl T_SP
(%edi
), %esp
/* restore resuming thread's sp */
843 xorl
%ebp
, %ebp
/* make $<threadlist behave better */
846 * Unlock outgoing thread's mutex dispatched by another processor.
849 xchgb
%al
, T_LOCK
(%esi
)
851 STORE_INTR_START
(%edi
)
854 * Restore non-volatile registers, then have spl0 return to the
855 * resuming thread's PC after first setting the priority as low as
856 * possible and blocking all interrupt threads that may be active.
858 movl T_PC
(%edi
), %eax
/* saved return addr */
860 pushl
%eax
/* push return address for spl0() */
861 call __dtrace_probe___sched_on__cpu
864 resume_from_intr_return
:
866 * Remove stack frame created in SAVE_REGS()
868 addl $CLONGSIZE
, %esp
870 SET_SIZE
(resume_from_intr
)
878 popq
%rax
/* start() */
883 call thread_exit
/* destroy thread if it returns. */
885 SET_SIZE
(thread_start
)
887 #elif defined(__i386)
895 call thread_exit
/* destroy thread if it returns. */
897 SET_SIZE
(thread_start
)