1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
7 // This file contains the implementation of Go select statements.
13 // For gccgo, use go:linkname to rename compiler-called functions to
14 // themselves, so that the compiler will export them.
16 //go:linkname selectgo runtime.selectgo
18 const debugSelect
= false
22 // Changes here must also be made in src/cmd/compile/internal/gc/select.go's walkselect.
30 // Select case descriptor.
32 // Changes here must also be made in src/cmd/internal/gc/select.go's scasetype.
35 elem unsafe
.Pointer
// data element
40 func sellock(scases
[]scase
, lockorder
[]uint16) {
42 for _
, o
:= range lockorder
{
44 if c0
!= nil && c0
!= c
{
51 func selunlock(scases
[]scase
, lockorder
[]uint16) {
52 // We must be very careful here to not touch sel after we have unlocked
53 // the last lock, because sel can be freed right after the last unlock.
54 // Consider the following situation.
55 // First M calls runtime·park() in runtime·selectgo() passing the sel.
56 // Once runtime·park() has unlocked the last lock, another M makes
57 // the G that calls select runnable again and schedules it for execution.
58 // When the G runs on another M, it locks all the locks and frees sel.
59 // Now if the first M touches sel, it will access freed memory.
60 for i
:= len(scases
) - 1; i
>= 0; i
-- {
61 c
:= scases
[lockorder
[i
]].c
65 if i
> 0 && c
== scases
[lockorder
[i
-1]].c
{
66 continue // will unlock it on the next iteration
72 func selparkcommit(gp
*g
, _ unsafe
.Pointer
) bool {
73 // This must not access gp's stack (see gopark). In
74 // particular, it must not access the *hselect. That's okay,
75 // because by the time this is called, gp.waiting has all
76 // channels in lock order.
78 for sg
:= gp
.waiting
; sg
!= nil; sg
= sg
.waitlink
{
79 if sg
.c
!= lastc
&& lastc
!= nil {
80 // As soon as we unlock the channel, fields in
81 // any sudog with that channel may change,
82 // including c and waitlink. Since multiple
83 // sudogs may have the same channel, we unlock
84 // only after we've passed the last instance
97 gopark(nil, nil, waitReasonSelectNoCases
, traceEvGoStop
, 1) // forever
100 // selectgo implements the select statement.
102 // cas0 points to an array of type [ncases]scase, and order0 points to
103 // an array of type [2*ncases]uint16. Both reside on the goroutine's
104 // stack (regardless of any escaping in selectgo).
106 // selectgo returns the index of the chosen scase, which matches the
107 // ordinal position of its respective select{recv,send,default} call.
108 // Also, if the chosen scase was a receive operation, it returns whether
109 // a value was received.
110 func selectgo(cas0
*scase
, order0
*uint16, ncases
int) (int, bool) {
112 print("select: cas0=", cas0
, "\n")
115 cas1
:= (*[1 << 16]scase
)(unsafe
.Pointer(cas0
))
116 order1
:= (*[1 << 17]uint16)(unsafe
.Pointer(order0
))
118 scases
:= cas1
[:ncases
:ncases
]
119 pollorder
:= order1
[:ncases
:ncases
]
120 lockorder
:= order1
[ncases
:][:ncases
:ncases
]
122 // Replace send/receive cases involving nil channels with
123 // caseNil so logic below can assume non-nil channel.
124 for i
:= range scases
{
126 if cas
.c
== nil && cas
.kind
!= caseDefault
{
132 if blockprofilerate
> 0 {
134 for i
:= 0; i
< ncases
; i
++ {
135 scases
[i
].releasetime
= -1
139 // The compiler rewrites selects that statically have
140 // only 0 or 1 cases plus default into simpler constructs.
141 // The only way we can end up with such small sel.ncase
142 // values here is for a larger select in which most channels
143 // have been nilled out. The general code handles those
144 // cases correctly, and they are rare enough not to bother
145 // optimizing (and needing to test).
147 // needed for gccgo, which doesn't zero pollorder
152 // generate permuted order
153 for i
:= 1; i
< ncases
; i
++ {
154 j
:= fastrandn(uint32(i
+ 1))
155 pollorder
[i
] = pollorder
[j
]
156 pollorder
[j
] = uint16(i
)
159 // sort the cases by Hchan address to get the locking order.
160 // simple heap sort, to guarantee n log n time and constant stack footprint.
161 for i
:= 0; i
< ncases
; i
++ {
163 // Start with the pollorder to permute cases on the same channel.
164 c
:= scases
[pollorder
[i
]].c
165 for j
> 0 && scases
[lockorder
[(j
-1)/2]].c
.sortkey() < c
.sortkey() {
167 lockorder
[j
] = lockorder
[k
]
170 lockorder
[j
] = pollorder
[i
]
172 for i
:= ncases
- 1; i
>= 0; i
-- {
175 lockorder
[i
] = lockorder
[0]
182 if k
+1 < i
&& scases
[lockorder
[k
]].c
.sortkey() < scases
[lockorder
[k
+1]].c
.sortkey() {
185 if c
.sortkey() < scases
[lockorder
[k
]].c
.sortkey() {
186 lockorder
[j
] = lockorder
[k
]
196 for i
:= 0; i
+1 < ncases
; i
++ {
197 if scases
[lockorder
[i
]].c
.sortkey() > scases
[lockorder
[i
+1]].c
.sortkey() {
198 print("i=", i
, " x=", lockorder
[i
], " y=", lockorder
[i
+1], "\n")
199 throw("select: broken sort")
204 // lock all the channels involved in the select
205 sellock(scases
, lockorder
)
219 // pass 1 - look for something already waiting
225 for i
:= 0; i
< ncases
; i
++ {
226 casi
= int(pollorder
[i
])
235 sg
= c
.sendq
.dequeue()
250 sg
= c
.recvq
.dequeue()
254 if c
.qcount
< c
.dataqsiz
{
265 selunlock(scases
, lockorder
)
271 // pass 2 - enqueue on all chans
273 if gp
.waiting
!= nil {
274 throw("gp.waiting != nil")
277 for _
, casei
:= range lockorder
{
280 if cas
.kind
== caseNil
{
287 // No stack splits between assigning elem and enqueuing
288 // sg on gp.waiting where copystack can find it.
295 // Construct waiting list in lock order.
308 // wait for someone to wake us up
310 gopark(selparkcommit
, nil, waitReasonSelect
, traceEvGoBlockSelect
, 1)
312 sellock(scases
, lockorder
)
315 sg
= (*sudog
)(gp
.param
)
318 // pass 3 - dequeue from unsuccessful chans
319 // otherwise they stack up on quiet channels
320 // record the successful case, if any.
321 // We singly-linked up the SudoGs in lock order.
325 // Clear all elem before unlinking from gp.waiting.
326 for sg1
:= gp
.waiting
; sg1
!= nil; sg1
= sg1
.waitlink
{
333 for _
, casei
:= range lockorder
{
335 if k
.kind
== caseNil
{
338 if sglist
.releasetime
> 0 {
339 k
.releasetime
= sglist
.releasetime
342 // sg has already been dequeued by the G that woke us up.
347 if k
.kind
== caseSend
{
348 c
.sendq
.dequeueSudoG(sglist
)
350 c
.recvq
.dequeueSudoG(sglist
)
353 sgnext
= sglist
.waitlink
354 sglist
.waitlink
= nil
360 // We can wake up with gp.param == nil (so cas == nil)
361 // when a channel involved in the select has been closed.
362 // It is easiest to loop and re-run the operation;
363 // we'll see that it's now closed.
364 // Maybe some day we can signal the close explicitly,
365 // but we'd have to distinguish close-on-reader from close-on-writer.
366 // It's easiest not to duplicate the code and just recheck above.
367 // We know that something closed, and things never un-close,
368 // so we won't block again.
375 print("wait-return: cas0=", cas0
, " c=", c
, " cas=", cas
, " kind=", cas
.kind
, "\n")
378 if cas
.kind
== caseRecv
{
382 selunlock(scases
, lockorder
)
386 // can receive from buffer
388 qp
= chanbuf(c
, c
.recvx
)
390 typedmemmove(c
.elemtype
, cas
.elem
, qp
)
392 typedmemclr(c
.elemtype
, qp
)
394 if c
.recvx
== c
.dataqsiz
{
398 selunlock(scases
, lockorder
)
402 // can send to buffer
403 typedmemmove(c
.elemtype
, chanbuf(c
, c
.sendx
), cas
.elem
)
405 if c
.sendx
== c
.dataqsiz
{
409 selunlock(scases
, lockorder
)
413 // can receive from sleeping sender (sg)
414 recv(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
416 print("syncrecv: cas0=", cas0
, " c=", c
, "\n")
422 // read at end of closed channel
423 selunlock(scases
, lockorder
)
426 typedmemclr(c
.elemtype
, cas
.elem
)
429 raceacquire(unsafe
.Pointer(c
))
434 // can send to a sleeping receiver (sg)
435 send(c
, sg
, cas
.elem
, func() { selunlock(scases
, lockorder
) }, 2)
437 print("syncsend: cas0=", cas0
, " c=", c
, "\n")
442 if cas
.releasetime
> 0 {
443 blockevent(cas
.releasetime
-t0
, 1)
446 // Check preemption, since unlike gc we don't check on every call.
447 // A test case for this one is BenchmarkPingPongHog in proc_test.go.
448 if dfl
!= nil && getg().preempt
{
455 // send on closed channel
456 selunlock(scases
, lockorder
)
457 panic(plainError("send on closed channel"))
460 func (c
*hchan
) sortkey() uintptr {
461 // TODO(khr): if we have a moving garbage collector, we'll need to
462 // change this function.
463 return uintptr(unsafe
.Pointer(c
))
466 // A runtimeSelect is a single case passed to rselect.
467 // This must match ../reflect/value.go:/runtimeSelect
468 type runtimeSelect
struct {
470 typ unsafe
.Pointer
// channel type (not used here)
472 val unsafe
.Pointer
// ptr to data (SendDir) or ptr to receive buffer (RecvDir)
475 // These values must match ../reflect/value.go:/SelectDir.
480 selectSend
// case Chan <- Send
481 selectRecv
// case <-Chan:
482 selectDefault
// default
485 //go:linkname reflect_rselect reflect.rselect
486 func reflect_rselect(cases
[]runtimeSelect
) (int, bool) {
490 sel
:= make([]scase
, len(cases
))
491 order
:= make([]uint16, 2*len(cases
))
492 for i
:= range cases
{
496 sel
[i
] = scase
{kind
: caseDefault
}
498 sel
[i
] = scase
{kind
: caseSend
, c
: rc
.ch
, elem
: rc
.val
}
500 sel
[i
] = scase
{kind
: caseRecv
, c
: rc
.ch
, elem
: rc
.val
}
504 return selectgo(&sel
[0], &order
[0], len(cases
))
507 func (q
*waitq
) dequeueSudoG(sgp
*sudog
) {
533 // x==y==nil. Either sgp is the only element in the queue,
534 // or it has already been removed. Use q.first to disambiguate.