1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go select statements.
10 "runtime/internal/sys"
14 // For gccgo, use go:linkname to rename compiler-called functions to
15 // themselves, so that the compiler will export them.
17 //go:linkname newselect runtime.newselect
18 //go:linkname selectdefault runtime.selectdefault
19 //go:linkname selectsend runtime.selectsend
20 //go:linkname selectrecv runtime.selectrecv
21 //go:linkname selectgo runtime.selectgo
23 const debugSelect
= false
33 // Select statement header.
35 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
37 tcase
uint16 // total count of scase[]
38 ncase
uint16 // currently filled scase[]
39 pollorder
*uint16 // case poll order
40 lockorder
*uint16 // channel lock order
41 scase
[1]scase
// one per case (in order of appearance)
44 // Select case descriptor.
46 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
48 elem unsafe
.Pointer
// data element
50 pc
uintptr // return pc (for race detector / msan)
52 receivedp
*bool // pointer to received bool, if any
57 chansendpc
= funcPC(chansend
)
58 chanrecvpc
= funcPC(chanrecv
)
61 func selectsize(size
uintptr) uintptr {
62 selsize
:= unsafe
.Sizeof(hselect
{}) +
63 (size
-1)*unsafe
.Sizeof(hselect
{}.scase
[0]) +
64 size
*unsafe
.Sizeof(*hselect
{}.lockorder
) +
65 size
*unsafe
.Sizeof(*hselect
{}.pollorder
)
66 return round(selsize
, sys
.Int64Align
)
69 func newselect(sel
*hselect
, selsize
int64, size
int32) {
70 if selsize
!= int64(selectsize(uintptr(size
))) {
71 print("runtime: bad select size ", selsize
, ", want ", selectsize(uintptr(size
)), "\n")
72 throw("bad select size")
74 if size
!= int32(uint16(size
)) {
75 throw("select size too large")
77 sel
.tcase
= uint16(size
)
79 sel
.lockorder
= (*uint16)(add(unsafe
.Pointer(&sel
.scase
), uintptr(size
)*unsafe
.Sizeof(hselect
{}.scase
[0])))
80 sel
.pollorder
= (*uint16)(add(unsafe
.Pointer(sel
.lockorder
), uintptr(size
)*unsafe
.Sizeof(*hselect
{}.lockorder
)))
82 // For gccgo the temporary variable will not have been zeroed.
83 memclrNoHeapPointers(unsafe
.Pointer(&sel
.scase
), uintptr(size
)*unsafe
.Sizeof(hselect
{}.scase
[0])+uintptr(size
)*unsafe
.Sizeof(*hselect
{}.lockorder
)+uintptr(size
)*unsafe
.Sizeof(*hselect
{}.pollorder
))
86 print("newselect s=", sel
, " size=", size
, "\n")
90 func selectsend(sel
*hselect
, c
*hchan
, elem unsafe
.Pointer
) {
91 pc
:= getcallerpc(unsafe
.Pointer(&sel
))
94 throw("selectsend: too many cases")
100 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
107 print("selectsend s=", sel
, " pc=", hex(cas
.pc
), " chan=", cas
.c
, "\n")
111 func selectrecv(sel
*hselect
, c
*hchan
, elem unsafe
.Pointer
, received
*bool) {
112 pc
:= getcallerpc(unsafe
.Pointer(&sel
))
115 throw("selectrecv: too many cases")
121 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
126 cas
.receivedp
= received
129 print("selectrecv s=", sel
, " pc=", hex(cas
.pc
), " chan=", cas
.c
, "\n")
133 func selectdefault(sel
*hselect
) {
134 pc
:= getcallerpc(unsafe
.Pointer(&sel
))
137 throw("selectdefault: too many cases")
140 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
143 cas
.kind
= caseDefault
146 print("selectdefault s=", sel
, " pc=", hex(cas
.pc
), "\n")
150 func sellock(scases
[]scase
, lockorder
[]uint16) {
152 for _
, o
:= range lockorder
{
154 if c0
!= nil && c0
!= c
{
161 func selunlock(scases
[]scase
, lockorder
[]uint16) {
162 // We must be very careful here to not touch sel after we have unlocked
163 // the last lock, because sel can be freed right after the last unlock.
164 // Consider the following situation.
165 // First M calls runtime·park() in runtime·selectgo() passing the sel.
166 // Once runtime·park() has unlocked the last lock, another M makes
167 // the G that calls select runnable again and schedules it for execution.
168 // When the G runs on another M, it locks all the locks and frees sel.
169 // Now if the first M touches sel, it will access freed memory.
170 for i
:= len(scases
) - 1; i
>= 0; i
-- {
171 c
:= scases
[lockorder
[i
]].c
175 if i
> 0 && c
== scases
[lockorder
[i
-1]].c
{
176 continue // will unlock it on the next iteration
182 func selparkcommit(gp
*g
, _ unsafe
.Pointer
) bool {
183 // This must not access gp's stack (see gopark). In
184 // particular, it must not access the *hselect. That's okay,
185 // because by the time this is called, gp.waiting has all
186 // channels in lock order.
188 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
189 if sg
.c
!= lastc
&& lastc
!= nil {
190 // As soon as we unlock the channel, fields in
191 // any sudog with that channel may change,
192 // including c and waitlink. Since multiple
193 // sudogs may have the same channel, we unlock
194 // only after we've passed the last instance
207 gopark(nil, nil, "select (no cases)", traceEvGoStop
, 1) // forever
210 // selectgo implements the select statement.
212 // *sel is on the current goroutine's stack (regardless of any
213 // escaping in selectgo).
215 // selectgo returns the index of the chosen scase, which matches the
216 // ordinal position of its respective select{recv,send,default} call.
217 func selectgo(sel
*hselect
) int {
219 print("select: sel=", sel
, "\n")
221 if sel
.ncase
!= sel
.tcase
{
222 throw("selectgo: case count mismatch")
225 scaseslice
:= slice
{unsafe
.Pointer(&sel
.scase
), int(sel
.ncase
), int(sel
.ncase
)}
226 scases
:= *(*[]scase
)(unsafe
.Pointer(&scaseslice
))
229 if blockprofilerate
> 0 {
231 for i
:= 0; i
< int(sel
.ncase
); i
++ {
232 scases
[i
].releasetime
= -1
236 // The compiler rewrites selects that statically have
237 // only 0 or 1 cases plus default into simpler constructs.
238 // The only way we can end up with such small sel.ncase
239 // values here is for a larger select in which most channels
240 // have been nilled out. The general code handles those
241 // cases correctly, and they are rare enough not to bother
242 // optimizing (and needing to test).
244 // generate permuted order
245 pollslice
:= slice
{unsafe
.Pointer(sel
.pollorder
), int(sel
.ncase
), int(sel
.ncase
)}
246 pollorder
:= *(*[]uint16)(unsafe
.Pointer(&pollslice
))
247 for i
:= 1; i
< int(sel
.ncase
); i
++ {
248 j
:= fastrandn(uint32(i
+ 1))
249 pollorder
[i
] = pollorder
[j
]
250 pollorder
[j
] = uint16(i
)
253 // sort the cases by Hchan address to get the locking order.
254 // simple heap sort, to guarantee n log n time and constant stack footprint.
255 lockslice
:= slice
{unsafe
.Pointer(sel
.lockorder
), int(sel
.ncase
), int(sel
.ncase
)}
256 lockorder
:= *(*[]uint16)(unsafe
.Pointer(&lockslice
))
257 for i
:= 0; i
< int(sel
.ncase
); i
++ {
259 // Start with the pollorder to permute cases on the same channel.
260 c
:= scases
[pollorder
[i
]].c
261 for j
> 0 && scases
[lockorder
[(j
-1)/2]].c
.sortkey() < c
.sortkey() {
263 lockorder
[j
] = lockorder
[k
]
266 lockorder
[j
] = pollorder
[i
]
268 for i
:= int(sel
.ncase
) - 1; i
>= 0; i
-- {
271 lockorder
[i
] = lockorder
[0]
278 if k
+1 < i
&& scases
[lockorder
[k
]].c
.sortkey() < scases
[lockorder
[k
+1]].c
.sortkey() {
281 if c
.sortkey() < scases
[lockorder
[k
]].c
.sortkey() {
282 lockorder
[j
] = lockorder
[k
]
291 for i := 0; i+1 < int(sel.ncase); i++ {
292 if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
293 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
294 throw("select: broken sort")
299 // lock all the channels involved in the select
300 sellock(scases
, lockorder
)
315 // pass 1 - look for something already waiting
320 for i
:= 0; i
< int(sel
.ncase
); i
++ {
321 casi
= int(pollorder
[i
])
330 sg
= c
.sendq
.dequeue()
343 racereadpc(unsafe
.Pointer(c
), cas
.pc
, chansendpc
)
348 sg
= c
.recvq
.dequeue()
352 if c
.qcount
< c
.dataqsiz
{
363 selunlock(scases
, lockorder
)
369 // pass 2 - enqueue on all chans
372 if gp
.waiting
!= nil {
373 throw("gp.waiting != nil")
376 for _
, casei
:= range lockorder
{
379 if cas
.kind
== caseNil
{
385 // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
386 sg
.selectdone
= (*uint32)(noescape(unsafe
.Pointer(&done
)))
387 // No stack splits between assigning elem and enqueuing
388 // sg on gp.waiting where copystack can find it.
395 // Construct waiting list in lock order.
408 // wait for someone to wake us up
410 gopark(selparkcommit
, nil, "select", traceEvGoBlockSelect
, 1)
412 // While we were asleep, some goroutine came along and completed
413 // one of the cases in the select and woke us up (called ready).
414 // As part of that process, the goroutine did a cas on done above
415 // (aka *sg.selectdone for all queued sg) to win the right to
416 // complete the select. Now done = 1.
418 // If we copy (grow) our own stack, we will update the
419 // selectdone pointers inside the gp.waiting sudog list to point
420 // at the new stack. Another goroutine attempting to
421 // complete one of our (still linked in) select cases might
422 // see the new selectdone pointer (pointing at the new stack)
423 // before the new stack has real data; if the new stack has done = 0
424 // (before the old values are copied over), the goroutine might
425 // do a cas via sg.selectdone and incorrectly believe that it has
426 // won the right to complete the select, executing a second
427 // communication and attempting to wake us (call ready) again.
429 // Then things break.
431 // The best break is that the goroutine doing ready sees the
432 // _Gcopystack status and throws, as in #17007.
433 // A worse break would be for us to continue on, start running real code,
434 // block in a semaphore acquisition (sema.go), and have the other
435 // goroutine wake us up without having really acquired the semaphore.
436 // That would result in the goroutine spuriously running and then
437 // queue up another spurious wakeup when the semaphore really is ready.
438 // In general the situation can cascade until something notices the
439 // problem and causes a crash.
441 // A stack shrink does not have this problem, because it locks
442 // all the channels that are involved first, blocking out the
443 // possibility of a cas on selectdone.
445 // A stack growth before gopark above does not have this
446 // problem, because we hold those channel locks (released by
449 // A stack growth after sellock below does not have this
450 // problem, because again we hold those channel locks.
452 // The only problem is a stack growth during sellock.
453 // To keep that from happening, run sellock on the system stack.
455 // It might be that we could avoid this if copystack copied the
456 // stack before calling adjustsudogs. In that case,
457 // syncadjustsudogs would need to recopy the tiny part that
458 // it copies today, resulting in a little bit of extra copying.
460 // An even better fix, not for the week before a release candidate,
461 // would be to put space in every sudog and make selectdone
462 // point at (say) the space in the first sudog.
465 sellock(scases
, lockorder
)
468 sg
= (*sudog
)(gp
.param
)
471 // pass 3 - dequeue from unsuccessful chans
472 // otherwise they stack up on quiet channels
473 // record the successful case, if any.
474 // We singly-linked up the SudoGs in lock order.
478 // Clear all elem before unlinking from gp.waiting.
479 for sg1
:= gp
.waiting
; sg1
!= nil; sg1
= sg1
.waitlink
{
486 for _
, casei
:= range lockorder
{
488 if k
.kind
== caseNil
{
491 if sglist
.releasetime
> 0 {
492 k
.releasetime
= sglist
.releasetime
495 // sg has already been dequeued by the G that woke us up.
500 if k
.kind
== caseSend
{
501 c
.sendq
.dequeueSudoG(sglist
)
503 c
.recvq
.dequeueSudoG(sglist
)
506 sgnext
= sglist
.waitlink
507 sglist
.waitlink
= nil
513 // We can wake up with gp.param == nil (so cas == nil)
514 // when a channel involved in the select has been closed.
515 // It is easiest to loop and re-run the operation;
516 // we'll see that it's now closed.
517 // Maybe some day we can signal the close explicitly,
518 // but we'd have to distinguish close-on-reader from close-on-writer.
519 // It's easiest not to duplicate the code and just recheck above.
520 // We know that something closed, and things never un-close,
521 // so we won't block again.
528 print("wait-return: sel=", sel
, " c=", c
, " cas=", cas
, " kind=", cas
.kind
, "\n")
531 if cas
.kind
== caseRecv
{
532 if cas
.receivedp
!= nil {
533 *cas
.receivedp
= true
538 if cas
.kind
== caseRecv
&& cas
.elem
!= nil {
539 raceWriteObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chanrecvpc
)
540 } else if cas
.kind
== caseSend
{
541 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
545 if cas
.kind
== caseRecv
&& cas
.elem
!= nil {
546 msanwrite(cas
.elem
, c
.elemtype
.size
)
547 } else if cas
.kind
== caseSend
{
548 msanread(cas
.elem
, c
.elemtype
.size
)
552 selunlock(scases
, lockorder
)
556 // can receive from buffer
559 raceWriteObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chanrecvpc
)
561 raceacquire(chanbuf(c
, c
.recvx
))
562 racerelease(chanbuf(c
, c
.recvx
))
564 if msanenabled
&& cas
.elem
!= nil {
565 msanwrite(cas
.elem
, c
.elemtype
.size
)
567 if cas
.receivedp
!= nil {
568 *cas
.receivedp
= true
570 qp
= chanbuf(c
, c
.recvx
)
572 typedmemmove(c
.elemtype
, cas
.elem
, qp
)
574 typedmemclr(c
.elemtype
, qp
)
576 if c
.recvx
== c
.dataqsiz
{
580 selunlock(scases
, lockorder
)
584 // can send to buffer
586 raceacquire(chanbuf(c
, c
.sendx
))
587 racerelease(chanbuf(c
, c
.sendx
))
588 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
591 msanread(cas
.elem
, c
.elemtype
.size
)
593 typedmemmove(c
.elemtype
, chanbuf(c
, c
.sendx
), cas
.elem
)
595 if c
.sendx
== c
.dataqsiz
{
599 selunlock(scases
, lockorder
)
603 // can receive from sleeping sender (sg)
604 recv(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
606 print("syncrecv: sel=", sel
, " c=", c
, "\n")
608 if cas
.receivedp
!= nil {
609 *cas
.receivedp
= true
614 // read at end of closed channel
615 selunlock(scases
, lockorder
)
616 if cas
.receivedp
!= nil {
617 *cas
.receivedp
= false
620 typedmemclr(c
.elemtype
, cas
.elem
)
623 raceacquire(unsafe
.Pointer(c
))
628 // can send to a sleeping receiver (sg)
630 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
633 msanread(cas
.elem
, c
.elemtype
.size
)
635 send(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
637 print("syncsend: sel=", sel
, " c=", c
, "\n")
642 if cas
.releasetime
> 0 {
643 blockevent(cas
.releasetime
-t0
, 1)
648 // send on closed channel
649 selunlock(scases
, lockorder
)
650 panic(plainError("send on closed channel"))
653 func (c
*hchan
) sortkey() uintptr {
654 // TODO(khr): if we have a moving garbage collector, we'll need to
655 // change this function.
656 return uintptr(unsafe
.Pointer(c
))
659 // A runtimeSelect is a single case passed to rselect.
660 // This must match ../reflect/value.go:/runtimeSelect
661 type runtimeSelect
struct {
663 typ unsafe
.Pointer
// channel type (not used here)
665 val unsafe
.Pointer
// ptr to data (SendDir) or ptr to receive buffer (RecvDir)
668 // These values must match ../reflect/value.go:/SelectDir.
673 selectSend
// case Chan <- Send
674 selectRecv
// case <-Chan:
675 selectDefault
// default
678 //go:linkname reflect_rselect reflect.rselect
679 func reflect_rselect(cases
[]runtimeSelect
) (chosen
int, recvOK
bool) {
680 // flagNoScan is safe here, because all objects are also referenced from cases.
681 size
:= selectsize(uintptr(len(cases
)))
682 sel
:= (*hselect
)(mallocgc(size
, nil, true))
683 newselect(sel
, int64(size
), int32(len(cases
)))
685 for i
:= range cases
{
691 selectsend(sel
, rc
.ch
, rc
.val
)
693 selectrecv(sel
, rc
.ch
, rc
.val
, r
)
697 chosen
= selectgo(sel
)
702 func (q
*waitq
) dequeueSudoG(sgp
*sudog
) {
728 // x==y==nil. Either sgp is the only element in the queue,
729 // or it has already been removed. Use q.first to disambiguate.