libgo: update to go1.9
[official-gcc.git] / libgo / go / runtime / select.go
blob9f8ac49d972c6aee7339f3c0c81a6ad0f3544078
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 // This file contains the implementation of Go select statements.
9 import (
10 "runtime/internal/sys"
11 "unsafe"
14 // For gccgo, use go:linkname to rename compiler-called functions to
15 // themselves, so that the compiler will export them.
17 //go:linkname newselect runtime.newselect
18 //go:linkname selectdefault runtime.selectdefault
19 //go:linkname selectsend runtime.selectsend
20 //go:linkname selectrecv runtime.selectrecv
21 //go:linkname selectgo runtime.selectgo
23 const debugSelect = false
25 const (
26 // scase.kind
27 caseNil = iota
28 caseRecv
29 caseSend
30 caseDefault
33 // Select statement header.
34 // Known to compiler.
35 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
36 type hselect struct {
37 tcase uint16 // total count of scase[]
38 ncase uint16 // currently filled scase[]
39 pollorder *uint16 // case poll order
40 lockorder *uint16 // channel lock order
41 scase [1]scase // one per case (in order of appearance)
44 // Select case descriptor.
45 // Known to compiler.
46 // Changes here must also be made in src/cmd/internal/gc/select.go's selecttype.
47 type scase struct {
48 elem unsafe.Pointer // data element
49 c *hchan // chan
50 pc uintptr // return pc (for race detector / msan)
51 kind uint16
52 receivedp *bool // pointer to received bool, if any
53 releasetime int64
56 var (
57 chansendpc = funcPC(chansend)
58 chanrecvpc = funcPC(chanrecv)
61 func selectsize(size uintptr) uintptr {
62 selsize := unsafe.Sizeof(hselect{}) +
63 (size-1)*unsafe.Sizeof(hselect{}.scase[0]) +
64 size*unsafe.Sizeof(*hselect{}.lockorder) +
65 size*unsafe.Sizeof(*hselect{}.pollorder)
66 return round(selsize, sys.Int64Align)
69 func newselect(sel *hselect, selsize int64, size int32) {
70 if selsize != int64(selectsize(uintptr(size))) {
71 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
72 throw("bad select size")
74 if size != int32(uint16(size)) {
75 throw("select size too large")
77 sel.tcase = uint16(size)
78 sel.ncase = 0
79 sel.lockorder = (*uint16)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])))
80 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)))
82 // For gccgo the temporary variable will not have been zeroed.
83 memclrNoHeapPointers(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(hselect{}.scase[0])+uintptr(size)*unsafe.Sizeof(*hselect{}.lockorder)+uintptr(size)*unsafe.Sizeof(*hselect{}.pollorder))
85 if debugSelect {
86 print("newselect s=", sel, " size=", size, "\n")
90 func selectsend(sel *hselect, c *hchan, elem unsafe.Pointer) {
91 pc := getcallerpc(unsafe.Pointer(&sel))
92 i := sel.ncase
93 if i >= sel.tcase {
94 throw("selectsend: too many cases")
96 sel.ncase = i + 1
97 if c == nil {
98 return
100 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
101 cas.pc = pc
102 cas.c = c
103 cas.kind = caseSend
104 cas.elem = elem
106 if debugSelect {
107 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n")
111 func selectrecv(sel *hselect, c *hchan, elem unsafe.Pointer, received *bool) {
112 pc := getcallerpc(unsafe.Pointer(&sel))
113 i := sel.ncase
114 if i >= sel.tcase {
115 throw("selectrecv: too many cases")
117 sel.ncase = i + 1
118 if c == nil {
119 return
121 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
122 cas.pc = pc
123 cas.c = c
124 cas.kind = caseRecv
125 cas.elem = elem
126 cas.receivedp = received
128 if debugSelect {
129 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas.c, "\n")
133 func selectdefault(sel *hselect) {
134 pc := getcallerpc(unsafe.Pointer(&sel))
135 i := sel.ncase
136 if i >= sel.tcase {
137 throw("selectdefault: too many cases")
139 sel.ncase = i + 1
140 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
141 cas.pc = pc
142 cas.c = nil
143 cas.kind = caseDefault
145 if debugSelect {
146 print("selectdefault s=", sel, " pc=", hex(cas.pc), "\n")
150 func sellock(scases []scase, lockorder []uint16) {
151 var c *hchan
152 for _, o := range lockorder {
153 c0 := scases[o].c
154 if c0 != nil && c0 != c {
155 c = c0
156 lock(&c.lock)
161 func selunlock(scases []scase, lockorder []uint16) {
162 // We must be very careful here to not touch sel after we have unlocked
163 // the last lock, because sel can be freed right after the last unlock.
164 // Consider the following situation.
165 // First M calls runtime·park() in runtime·selectgo() passing the sel.
166 // Once runtime·park() has unlocked the last lock, another M makes
167 // the G that calls select runnable again and schedules it for execution.
168 // When the G runs on another M, it locks all the locks and frees sel.
169 // Now if the first M touches sel, it will access freed memory.
170 for i := len(scases) - 1; i >= 0; i-- {
171 c := scases[lockorder[i]].c
172 if c == nil {
173 break
175 if i > 0 && c == scases[lockorder[i-1]].c {
176 continue // will unlock it on the next iteration
178 unlock(&c.lock)
182 func selparkcommit(gp *g, _ unsafe.Pointer) bool {
183 // This must not access gp's stack (see gopark). In
184 // particular, it must not access the *hselect. That's okay,
185 // because by the time this is called, gp.waiting has all
186 // channels in lock order.
187 var lastc *hchan
188 for sg := gp.waiting; sg != nil; sg = sg.waitlink {
189 if sg.c != lastc && lastc != nil {
190 // As soon as we unlock the channel, fields in
191 // any sudog with that channel may change,
192 // including c and waitlink. Since multiple
193 // sudogs may have the same channel, we unlock
194 // only after we've passed the last instance
195 // of a channel.
196 unlock(&lastc.lock)
198 lastc = sg.c
200 if lastc != nil {
201 unlock(&lastc.lock)
203 return true
206 func block() {
207 gopark(nil, nil, "select (no cases)", traceEvGoStop, 1) // forever
210 // selectgo implements the select statement.
212 // *sel is on the current goroutine's stack (regardless of any
213 // escaping in selectgo).
215 // selectgo returns the index of the chosen scase, which matches the
216 // ordinal position of its respective select{recv,send,default} call.
217 func selectgo(sel *hselect) int {
218 if debugSelect {
219 print("select: sel=", sel, "\n")
221 if sel.ncase != sel.tcase {
222 throw("selectgo: case count mismatch")
225 scaseslice := slice{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
226 scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
228 var t0 int64
229 if blockprofilerate > 0 {
230 t0 = cputicks()
231 for i := 0; i < int(sel.ncase); i++ {
232 scases[i].releasetime = -1
236 // The compiler rewrites selects that statically have
237 // only 0 or 1 cases plus default into simpler constructs.
238 // The only way we can end up with such small sel.ncase
239 // values here is for a larger select in which most channels
240 // have been nilled out. The general code handles those
241 // cases correctly, and they are rare enough not to bother
242 // optimizing (and needing to test).
244 // generate permuted order
245 pollslice := slice{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
246 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
247 for i := 1; i < int(sel.ncase); i++ {
248 j := fastrandn(uint32(i + 1))
249 pollorder[i] = pollorder[j]
250 pollorder[j] = uint16(i)
253 // sort the cases by Hchan address to get the locking order.
254 // simple heap sort, to guarantee n log n time and constant stack footprint.
255 lockslice := slice{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
256 lockorder := *(*[]uint16)(unsafe.Pointer(&lockslice))
257 for i := 0; i < int(sel.ncase); i++ {
258 j := i
259 // Start with the pollorder to permute cases on the same channel.
260 c := scases[pollorder[i]].c
261 for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {
262 k := (j - 1) / 2
263 lockorder[j] = lockorder[k]
264 j = k
266 lockorder[j] = pollorder[i]
268 for i := int(sel.ncase) - 1; i >= 0; i-- {
269 o := lockorder[i]
270 c := scases[o].c
271 lockorder[i] = lockorder[0]
272 j := 0
273 for {
274 k := j*2 + 1
275 if k >= i {
276 break
278 if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() {
281 if c.sortkey() < scases[lockorder[k]].c.sortkey() {
282 lockorder[j] = lockorder[k]
283 j = k
284 continue
286 break
288 lockorder[j] = o
291 for i := 0; i+1 < int(sel.ncase); i++ {
292 if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() {
293 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
294 throw("select: broken sort")
299 // lock all the channels involved in the select
300 sellock(scases, lockorder)
302 var (
303 gp *g
304 done uint32
305 sg *sudog
306 c *hchan
307 k *scase
308 sglist *sudog
309 sgnext *sudog
310 qp unsafe.Pointer
311 nextp **sudog
314 loop:
315 // pass 1 - look for something already waiting
316 var dfli int
317 var dfl *scase
318 var casi int
319 var cas *scase
320 for i := 0; i < int(sel.ncase); i++ {
321 casi = int(pollorder[i])
322 cas = &scases[casi]
323 c = cas.c
325 switch cas.kind {
326 case caseNil:
327 continue
329 case caseRecv:
330 sg = c.sendq.dequeue()
331 if sg != nil {
332 goto recv
334 if c.qcount > 0 {
335 goto bufrecv
337 if c.closed != 0 {
338 goto rclose
341 case caseSend:
342 if raceenabled {
343 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
345 if c.closed != 0 {
346 goto sclose
348 sg = c.recvq.dequeue()
349 if sg != nil {
350 goto send
352 if c.qcount < c.dataqsiz {
353 goto bufsend
356 case caseDefault:
357 dfli = casi
358 dfl = cas
362 if dfl != nil {
363 selunlock(scases, lockorder)
364 casi = dfli
365 cas = dfl
366 goto retc
369 // pass 2 - enqueue on all chans
370 gp = getg()
371 done = 0
372 if gp.waiting != nil {
373 throw("gp.waiting != nil")
375 nextp = &gp.waiting
376 for _, casei := range lockorder {
377 casi = int(casei)
378 cas = &scases[casi]
379 if cas.kind == caseNil {
380 continue
382 c = cas.c
383 sg := acquireSudog()
384 sg.g = gp
385 // Note: selectdone is adjusted for stack copies in stack1.go:adjustsudogs
386 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
387 // No stack splits between assigning elem and enqueuing
388 // sg on gp.waiting where copystack can find it.
389 sg.elem = cas.elem
390 sg.releasetime = 0
391 if t0 != 0 {
392 sg.releasetime = -1
394 sg.c = c
395 // Construct waiting list in lock order.
396 *nextp = sg
397 nextp = &sg.waitlink
399 switch cas.kind {
400 case caseRecv:
401 c.recvq.enqueue(sg)
403 case caseSend:
404 c.sendq.enqueue(sg)
408 // wait for someone to wake us up
409 gp.param = nil
410 gopark(selparkcommit, nil, "select", traceEvGoBlockSelect, 1)
412 // While we were asleep, some goroutine came along and completed
413 // one of the cases in the select and woke us up (called ready).
414 // As part of that process, the goroutine did a cas on done above
415 // (aka *sg.selectdone for all queued sg) to win the right to
416 // complete the select. Now done = 1.
418 // If we copy (grow) our own stack, we will update the
419 // selectdone pointers inside the gp.waiting sudog list to point
420 // at the new stack. Another goroutine attempting to
421 // complete one of our (still linked in) select cases might
422 // see the new selectdone pointer (pointing at the new stack)
423 // before the new stack has real data; if the new stack has done = 0
424 // (before the old values are copied over), the goroutine might
425 // do a cas via sg.selectdone and incorrectly believe that it has
426 // won the right to complete the select, executing a second
427 // communication and attempting to wake us (call ready) again.
429 // Then things break.
431 // The best break is that the goroutine doing ready sees the
432 // _Gcopystack status and throws, as in #17007.
433 // A worse break would be for us to continue on, start running real code,
434 // block in a semaphore acquisition (sema.go), and have the other
435 // goroutine wake us up without having really acquired the semaphore.
436 // That would result in the goroutine spuriously running and then
437 // queue up another spurious wakeup when the semaphore really is ready.
438 // In general the situation can cascade until something notices the
439 // problem and causes a crash.
441 // A stack shrink does not have this problem, because it locks
442 // all the channels that are involved first, blocking out the
443 // possibility of a cas on selectdone.
445 // A stack growth before gopark above does not have this
446 // problem, because we hold those channel locks (released by
447 // selparkcommit).
449 // A stack growth after sellock below does not have this
450 // problem, because again we hold those channel locks.
452 // The only problem is a stack growth during sellock.
453 // To keep that from happening, run sellock on the system stack.
455 // It might be that we could avoid this if copystack copied the
456 // stack before calling adjustsudogs. In that case,
457 // syncadjustsudogs would need to recopy the tiny part that
458 // it copies today, resulting in a little bit of extra copying.
460 // An even better fix, not for the week before a release candidate,
461 // would be to put space in every sudog and make selectdone
462 // point at (say) the space in the first sudog.
464 systemstack(func() {
465 sellock(scases, lockorder)
468 sg = (*sudog)(gp.param)
469 gp.param = nil
471 // pass 3 - dequeue from unsuccessful chans
472 // otherwise they stack up on quiet channels
473 // record the successful case, if any.
474 // We singly-linked up the SudoGs in lock order.
475 casi = -1
476 cas = nil
477 sglist = gp.waiting
478 // Clear all elem before unlinking from gp.waiting.
479 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
480 sg1.selectdone = nil
481 sg1.elem = nil
482 sg1.c = nil
484 gp.waiting = nil
486 for _, casei := range lockorder {
487 k = &scases[casei]
488 if k.kind == caseNil {
489 continue
491 if sglist.releasetime > 0 {
492 k.releasetime = sglist.releasetime
494 if sg == sglist {
495 // sg has already been dequeued by the G that woke us up.
496 casi = int(casei)
497 cas = k
498 } else {
499 c = k.c
500 if k.kind == caseSend {
501 c.sendq.dequeueSudoG(sglist)
502 } else {
503 c.recvq.dequeueSudoG(sglist)
506 sgnext = sglist.waitlink
507 sglist.waitlink = nil
508 releaseSudog(sglist)
509 sglist = sgnext
512 if cas == nil {
513 // We can wake up with gp.param == nil (so cas == nil)
514 // when a channel involved in the select has been closed.
515 // It is easiest to loop and re-run the operation;
516 // we'll see that it's now closed.
517 // Maybe some day we can signal the close explicitly,
518 // but we'd have to distinguish close-on-reader from close-on-writer.
519 // It's easiest not to duplicate the code and just recheck above.
520 // We know that something closed, and things never un-close,
521 // so we won't block again.
522 goto loop
525 c = cas.c
527 if debugSelect {
528 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
531 if cas.kind == caseRecv {
532 if cas.receivedp != nil {
533 *cas.receivedp = true
537 if raceenabled {
538 if cas.kind == caseRecv && cas.elem != nil {
539 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
540 } else if cas.kind == caseSend {
541 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
544 if msanenabled {
545 if cas.kind == caseRecv && cas.elem != nil {
546 msanwrite(cas.elem, c.elemtype.size)
547 } else if cas.kind == caseSend {
548 msanread(cas.elem, c.elemtype.size)
552 selunlock(scases, lockorder)
553 goto retc
555 bufrecv:
556 // can receive from buffer
557 if raceenabled {
558 if cas.elem != nil {
559 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
561 raceacquire(chanbuf(c, c.recvx))
562 racerelease(chanbuf(c, c.recvx))
564 if msanenabled && cas.elem != nil {
565 msanwrite(cas.elem, c.elemtype.size)
567 if cas.receivedp != nil {
568 *cas.receivedp = true
570 qp = chanbuf(c, c.recvx)
571 if cas.elem != nil {
572 typedmemmove(c.elemtype, cas.elem, qp)
574 typedmemclr(c.elemtype, qp)
575 c.recvx++
576 if c.recvx == c.dataqsiz {
577 c.recvx = 0
579 c.qcount--
580 selunlock(scases, lockorder)
581 goto retc
583 bufsend:
584 // can send to buffer
585 if raceenabled {
586 raceacquire(chanbuf(c, c.sendx))
587 racerelease(chanbuf(c, c.sendx))
588 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
590 if msanenabled {
591 msanread(cas.elem, c.elemtype.size)
593 typedmemmove(c.elemtype, chanbuf(c, c.sendx), cas.elem)
594 c.sendx++
595 if c.sendx == c.dataqsiz {
596 c.sendx = 0
598 c.qcount++
599 selunlock(scases, lockorder)
600 goto retc
602 recv:
603 // can receive from sleeping sender (sg)
604 recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
605 if debugSelect {
606 print("syncrecv: sel=", sel, " c=", c, "\n")
608 if cas.receivedp != nil {
609 *cas.receivedp = true
611 goto retc
613 rclose:
614 // read at end of closed channel
615 selunlock(scases, lockorder)
616 if cas.receivedp != nil {
617 *cas.receivedp = false
619 if cas.elem != nil {
620 typedmemclr(c.elemtype, cas.elem)
622 if raceenabled {
623 raceacquire(unsafe.Pointer(c))
625 goto retc
627 send:
628 // can send to a sleeping receiver (sg)
629 if raceenabled {
630 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
632 if msanenabled {
633 msanread(cas.elem, c.elemtype.size)
635 send(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2)
636 if debugSelect {
637 print("syncsend: sel=", sel, " c=", c, "\n")
639 goto retc
641 retc:
642 if cas.releasetime > 0 {
643 blockevent(cas.releasetime-t0, 1)
645 return casi
647 sclose:
648 // send on closed channel
649 selunlock(scases, lockorder)
650 panic(plainError("send on closed channel"))
653 func (c *hchan) sortkey() uintptr {
654 // TODO(khr): if we have a moving garbage collector, we'll need to
655 // change this function.
656 return uintptr(unsafe.Pointer(c))
659 // A runtimeSelect is a single case passed to rselect.
660 // This must match ../reflect/value.go:/runtimeSelect
661 type runtimeSelect struct {
662 dir selectDir
663 typ unsafe.Pointer // channel type (not used here)
664 ch *hchan // channel
665 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
668 // These values must match ../reflect/value.go:/SelectDir.
669 type selectDir int
671 const (
672 _ selectDir = iota
673 selectSend // case Chan <- Send
674 selectRecv // case <-Chan:
675 selectDefault // default
678 //go:linkname reflect_rselect reflect.rselect
679 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
680 // flagNoScan is safe here, because all objects are also referenced from cases.
681 size := selectsize(uintptr(len(cases)))
682 sel := (*hselect)(mallocgc(size, nil, true))
683 newselect(sel, int64(size), int32(len(cases)))
684 r := new(bool)
685 for i := range cases {
686 rc := &cases[i]
687 switch rc.dir {
688 case selectDefault:
689 selectdefault(sel)
690 case selectSend:
691 selectsend(sel, rc.ch, rc.val)
692 case selectRecv:
693 selectrecv(sel, rc.ch, rc.val, r)
697 chosen = selectgo(sel)
698 recvOK = *r
699 return
702 func (q *waitq) dequeueSudoG(sgp *sudog) {
703 x := sgp.prev
704 y := sgp.next
705 if x != nil {
706 if y != nil {
707 // middle of queue
708 x.next = y
709 y.prev = x
710 sgp.next = nil
711 sgp.prev = nil
712 return
714 // end of queue
715 x.next = nil
716 q.last = x
717 sgp.prev = nil
718 return
720 if y != nil {
721 // start of queue
722 y.prev = nil
723 q.first = y
724 sgp.next = nil
725 return
728 // x==y==nil. Either sgp is the only element in the queue,
729 // or it has already been removed. Use q.first to disambiguate.
730 if q.first == sgp {
731 q.first = nil
732 q.last = nil