1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go select statements.
10 "runtime/internal/sys"
14 // For gccgo, use go:linkname to rename compiler-called functions to
15 // themselves, so that the compiler will export them.
17 //go:linkname newselect runtime.newselect
18 //go:linkname selectdefault runtime.selectdefault
19 //go:linkname selectsend runtime.selectsend
20 //go:linkname selectrecv runtime.selectrecv
21 //go:linkname selectgo runtime.selectgo
23 const debugSelect
= false
33 // Select statement header.
35 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
37 tcase
uint16 // total count of scase[]
38 ncase
uint16 // currently filled scase[]
39 pollorder
*uint16 // case poll order
40 lockorder
*uint16 // channel lock order
41 scase
[1]scase
// one per case (in order of appearance)
44 // Select case descriptor.
46 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
48 elem unsafe
.Pointer
// data element
50 pc
uintptr // return pc (for race detector / msan)
52 receivedp
*bool // pointer to received bool, if any
57 chansendpc
= funcPC(chansend
)
58 chanrecvpc
= funcPC(chanrecv
)
61 func selectsize(size
uintptr) uintptr {
62 selsize
:= unsafe
.Sizeof(hselect
{}) +
63 (size
-1)*unsafe
.Sizeof(hselect
{}.scase
[0]) +
64 size
*unsafe
.Sizeof(*hselect
{}.lockorder
) +
65 size
*unsafe
.Sizeof(*hselect
{}.pollorder
)
66 return round(selsize
, sys
.Int64Align
)
69 func newselect(sel
*hselect
, selsize
int64, size
int32) {
70 if selsize
!= int64(selectsize(uintptr(size
))) {
71 print("runtime: bad select size ", selsize
, ", want ", selectsize(uintptr(size
)), "\n")
72 throw("bad select size")
74 if size
!= int32(uint16(size
)) {
75 throw("select size too large")
77 sel
.tcase
= uint16(size
)
79 sel
.lockorder
= (*uint16)(add(unsafe
.Pointer(&sel
.scase
), uintptr(size
)*unsafe
.Sizeof(hselect
{}.scase
[0])))
80 sel
.pollorder
= (*uint16)(add(unsafe
.Pointer(sel
.lockorder
), uintptr(size
)*unsafe
.Sizeof(*hselect
{}.lockorder
)))
82 // For gccgo the temporary variable will not have been zeroed.
83 memclrNoHeapPointers(unsafe
.Pointer(&sel
.scase
), uintptr(size
)*unsafe
.Sizeof(hselect
{}.scase
[0])+uintptr(size
)*unsafe
.Sizeof(*hselect
{}.lockorder
)+uintptr(size
)*unsafe
.Sizeof(*hselect
{}.pollorder
))
86 print("newselect s=", sel
, " size=", size
, "\n")
90 func selectsend(sel
*hselect
, c
*hchan
, elem unsafe
.Pointer
) {
94 throw("selectsend: too many cases")
100 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
107 print("selectsend s=", sel
, " pc=", hex(cas
.pc
), " chan=", cas
.c
, "\n")
111 func selectrecv(sel
*hselect
, c
*hchan
, elem unsafe
.Pointer
, received
*bool) {
115 throw("selectrecv: too many cases")
121 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
126 cas
.receivedp
= received
129 print("selectrecv s=", sel
, " pc=", hex(cas
.pc
), " chan=", cas
.c
, "\n")
133 func selectdefault(sel
*hselect
) {
137 throw("selectdefault: too many cases")
140 cas
:= (*scase
)(add(unsafe
.Pointer(&sel
.scase
), uintptr(i
)*unsafe
.Sizeof(sel
.scase
[0])))
143 cas
.kind
= caseDefault
146 print("selectdefault s=", sel
, " pc=", hex(cas
.pc
), "\n")
150 func sellock(scases
[]scase
, lockorder
[]uint16) {
152 for _
, o
:= range lockorder
{
154 if c0
!= nil && c0
!= c
{
161 func selunlock(scases
[]scase
, lockorder
[]uint16) {
162 // We must be very careful here to not touch sel after we have unlocked
163 // the last lock, because sel can be freed right after the last unlock.
164 // Consider the following situation.
165 // First M calls runtime·park() in runtime·selectgo() passing the sel.
166 // Once runtime·park() has unlocked the last lock, another M makes
167 // the G that calls select runnable again and schedules it for execution.
168 // When the G runs on another M, it locks all the locks and frees sel.
169 // Now if the first M touches sel, it will access freed memory.
170 for i
:= len(scases
) - 1; i
>= 0; i
-- {
171 c
:= scases
[lockorder
[i
]].c
175 if i
> 0 && c
== scases
[lockorder
[i
-1]].c
{
176 continue // will unlock it on the next iteration
182 func selparkcommit(gp
*g
, _ unsafe
.Pointer
) bool {
183 // This must not access gp's stack (see gopark). In
184 // particular, it must not access the *hselect. That's okay,
185 // because by the time this is called, gp.waiting has all
186 // channels in lock order.
188 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
189 if sg
.c
!= lastc
&& lastc
!= nil {
190 // As soon as we unlock the channel, fields in
191 // any sudog with that channel may change,
192 // including c and waitlink. Since multiple
193 // sudogs may have the same channel, we unlock
194 // only after we've passed the last instance
207 gopark(nil, nil, "select (no cases)", traceEvGoStop
, 1) // forever
210 // selectgo implements the select statement.
212 // *sel is on the current goroutine's stack (regardless of any
213 // escaping in selectgo).
215 // selectgo returns the index of the chosen scase, which matches the
216 // ordinal position of its respective select{recv,send,default} call.
217 func selectgo(sel
*hselect
) int {
219 print("select: sel=", sel
, "\n")
221 if sel
.ncase
!= sel
.tcase
{
222 throw("selectgo: case count mismatch")
225 scaseslice
:= slice
{unsafe
.Pointer(&sel
.scase
), int(sel
.ncase
), int(sel
.ncase
)}
226 scases
:= *(*[]scase
)(unsafe
.Pointer(&scaseslice
))
229 if blockprofilerate
> 0 {
231 for i
:= 0; i
< int(sel
.ncase
); i
++ {
232 scases
[i
].releasetime
= -1
236 // The compiler rewrites selects that statically have
237 // only 0 or 1 cases plus default into simpler constructs.
238 // The only way we can end up with such small sel.ncase
239 // values here is for a larger select in which most channels
240 // have been nilled out. The general code handles those
241 // cases correctly, and they are rare enough not to bother
242 // optimizing (and needing to test).
244 // generate permuted order
245 pollslice
:= slice
{unsafe
.Pointer(sel
.pollorder
), int(sel
.ncase
), int(sel
.ncase
)}
246 pollorder
:= *(*[]uint16)(unsafe
.Pointer(&pollslice
))
247 for i
:= 1; i
< int(sel
.ncase
); i
++ {
248 j
:= fastrandn(uint32(i
+ 1))
249 pollorder
[i
] = pollorder
[j
]
250 pollorder
[j
] = uint16(i
)
253 // sort the cases by Hchan address to get the locking order.
254 // simple heap sort, to guarantee n log n time and constant stack footprint.
255 lockslice
:= slice
{unsafe
.Pointer(sel
.lockorder
), int(sel
.ncase
), int(sel
.ncase
)}
256 lockorder
:= *(*[]uint16)(unsafe
.Pointer(&lockslice
))
257 for i
:= 0; i
< int(sel
.ncase
); i
++ {
259 // Start with the pollorder to permute cases on the same channel.
260 c
:= scases
[pollorder
[i
]].c
261 for j
> 0 && scases
[lockorder
[(j
-1)/2]].c
.sortkey() < c
.sortkey() {
263 lockorder
[j
] = lockorder
[k
]
266 lockorder
[j
] = pollorder
[i
]
268 for i
:= int(sel
.ncase
) - 1; i
>= 0; i
-- {
271 lockorder
[i
] = lockorder
[0]
278 if k
+1 < i
&& scases
[lockorder
[k
]].c
.sortkey() < scases
[lockorder
[k
+1]].c
.sortkey() {
281 if c
.sortkey() < scases
[lockorder
[k
]].c
.sortkey() {
282 lockorder
[j
] = lockorder
[k
]
291 for i := 0; i+1 < int(sel.ncase); i++ {
292 if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
293 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
294 throw("select: broken sort")
299 // lock all the channels involved in the select
300 sellock(scases
, lockorder
)
314 // pass 1 - look for something already waiting
319 for i
:= 0; i
< int(sel
.ncase
); i
++ {
320 casi
= int(pollorder
[i
])
329 sg
= c
.sendq
.dequeue()
342 racereadpc(unsafe
.Pointer(c
), cas
.pc
, chansendpc
)
347 sg
= c
.recvq
.dequeue()
351 if c
.qcount
< c
.dataqsiz
{
362 selunlock(scases
, lockorder
)
368 // pass 2 - enqueue on all chans
370 if gp
.waiting
!= nil {
371 throw("gp.waiting != nil")
374 for _
, casei
:= range lockorder
{
377 if cas
.kind
== caseNil
{
384 // No stack splits between assigning elem and enqueuing
385 // sg on gp.waiting where copystack can find it.
392 // Construct waiting list in lock order.
405 // wait for someone to wake us up
407 gopark(selparkcommit
, nil, "select", traceEvGoBlockSelect
, 1)
409 sellock(scases
, lockorder
)
412 sg
= (*sudog
)(gp
.param
)
415 // pass 3 - dequeue from unsuccessful chans
416 // otherwise they stack up on quiet channels
417 // record the successful case, if any.
418 // We singly-linked up the SudoGs in lock order.
422 // Clear all elem before unlinking from gp.waiting.
423 for sg1
:= gp
.waiting
; sg1
!= nil; sg1
= sg1
.waitlink
{
430 for _
, casei
:= range lockorder
{
432 if k
.kind
== caseNil
{
435 if sglist
.releasetime
> 0 {
436 k
.releasetime
= sglist
.releasetime
439 // sg has already been dequeued by the G that woke us up.
444 if k
.kind
== caseSend
{
445 c
.sendq
.dequeueSudoG(sglist
)
447 c
.recvq
.dequeueSudoG(sglist
)
450 sgnext
= sglist
.waitlink
451 sglist
.waitlink
= nil
457 // We can wake up with gp.param == nil (so cas == nil)
458 // when a channel involved in the select has been closed.
459 // It is easiest to loop and re-run the operation;
460 // we'll see that it's now closed.
461 // Maybe some day we can signal the close explicitly,
462 // but we'd have to distinguish close-on-reader from close-on-writer.
463 // It's easiest not to duplicate the code and just recheck above.
464 // We know that something closed, and things never un-close,
465 // so we won't block again.
472 print("wait-return: sel=", sel
, " c=", c
, " cas=", cas
, " kind=", cas
.kind
, "\n")
475 if cas
.kind
== caseRecv
&& cas
.receivedp
!= nil {
476 *cas
.receivedp
= true
480 if cas
.kind
== caseRecv
&& cas
.elem
!= nil {
481 raceWriteObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chanrecvpc
)
482 } else if cas
.kind
== caseSend
{
483 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
487 if cas
.kind
== caseRecv
&& cas
.elem
!= nil {
488 msanwrite(cas
.elem
, c
.elemtype
.size
)
489 } else if cas
.kind
== caseSend
{
490 msanread(cas
.elem
, c
.elemtype
.size
)
494 selunlock(scases
, lockorder
)
498 // can receive from buffer
501 raceWriteObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chanrecvpc
)
503 raceacquire(chanbuf(c
, c
.recvx
))
504 racerelease(chanbuf(c
, c
.recvx
))
506 if msanenabled
&& cas
.elem
!= nil {
507 msanwrite(cas
.elem
, c
.elemtype
.size
)
509 if cas
.receivedp
!= nil {
510 *cas
.receivedp
= true
512 qp
= chanbuf(c
, c
.recvx
)
514 typedmemmove(c
.elemtype
, cas
.elem
, qp
)
516 typedmemclr(c
.elemtype
, qp
)
518 if c
.recvx
== c
.dataqsiz
{
522 selunlock(scases
, lockorder
)
526 // can send to buffer
528 raceacquire(chanbuf(c
, c
.sendx
))
529 racerelease(chanbuf(c
, c
.sendx
))
530 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
533 msanread(cas
.elem
, c
.elemtype
.size
)
535 typedmemmove(c
.elemtype
, chanbuf(c
, c
.sendx
), cas
.elem
)
537 if c
.sendx
== c
.dataqsiz
{
541 selunlock(scases
, lockorder
)
545 // can receive from sleeping sender (sg)
546 recv(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
548 print("syncrecv: sel=", sel
, " c=", c
, "\n")
550 if cas
.receivedp
!= nil {
551 *cas
.receivedp
= true
556 // read at end of closed channel
557 selunlock(scases
, lockorder
)
558 if cas
.receivedp
!= nil {
559 *cas
.receivedp
= false
562 typedmemclr(c
.elemtype
, cas
.elem
)
565 raceacquire(unsafe
.Pointer(c
))
570 // can send to a sleeping receiver (sg)
572 raceReadObjectPC(c
.elemtype
, cas
.elem
, cas
.pc
, chansendpc
)
575 msanread(cas
.elem
, c
.elemtype
.size
)
577 send(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
579 print("syncsend: sel=", sel
, " c=", c
, "\n")
584 if cas
.releasetime
> 0 {
585 blockevent(cas
.releasetime
-t0
, 1)
590 // send on closed channel
591 selunlock(scases
, lockorder
)
592 panic(plainError("send on closed channel"))
595 func (c
*hchan
) sortkey() uintptr {
596 // TODO(khr): if we have a moving garbage collector, we'll need to
597 // change this function.
598 return uintptr(unsafe
.Pointer(c
))
601 // A runtimeSelect is a single case passed to rselect.
602 // This must match ../reflect/value.go:/runtimeSelect
603 type runtimeSelect
struct {
605 typ unsafe
.Pointer
// channel type (not used here)
607 val unsafe
.Pointer
// ptr to data (SendDir) or ptr to receive buffer (RecvDir)
610 // These values must match ../reflect/value.go:/SelectDir.
615 selectSend
// case Chan <- Send
616 selectRecv
// case <-Chan:
617 selectDefault
// default
620 //go:linkname reflect_rselect reflect.rselect
621 func reflect_rselect(cases
[]runtimeSelect
) (chosen
int, recvOK
bool) {
622 // flagNoScan is safe here, because all objects are also referenced from cases.
623 size
:= selectsize(uintptr(len(cases
)))
624 sel
:= (*hselect
)(mallocgc(size
, nil, true))
625 newselect(sel
, int64(size
), int32(len(cases
)))
627 for i
:= range cases
{
633 selectsend(sel
, rc
.ch
, rc
.val
)
635 selectrecv(sel
, rc
.ch
, rc
.val
, r
)
639 chosen
= selectgo(sel
)
644 func (q
*waitq
) dequeueSudoG(sgp
*sudog
) {
670 // x==y==nil. Either sgp is the only element in the queue,
671 // or it has already been removed. Use q.first to disambiguate.