2015-05-29 François Dumont fdumont@gcc.gnu.org>
[official-gcc.git] / libgo / go / runtime / select.go
blobf735a71e2f5ae0b9f704565e4ed2f121edd23d56
1 // Copyright 2009 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 // This file contains the implementation of Go select statements.
9 import "unsafe"
11 const (
12 debugSelect = false
15 var (
16 chansendpc = funcPC(chansend)
17 chanrecvpc = funcPC(chanrecv)
20 func selectsize(size uintptr) uintptr {
21 selsize := unsafe.Sizeof(_select{}) +
22 (size-1)*unsafe.Sizeof(_select{}.scase[0]) +
23 size*unsafe.Sizeof(*_select{}.lockorder) +
24 size*unsafe.Sizeof(*_select{}.pollorder)
25 return round(selsize, _Int64Align)
28 func newselect(sel *_select, selsize int64, size int32) {
29 if selsize != int64(selectsize(uintptr(size))) {
30 print("runtime: bad select size ", selsize, ", want ", selectsize(uintptr(size)), "\n")
31 gothrow("bad select size")
33 sel.tcase = uint16(size)
34 sel.ncase = 0
35 sel.lockorder = (**hchan)(add(unsafe.Pointer(&sel.scase), uintptr(size)*unsafe.Sizeof(_select{}.scase[0])))
36 sel.pollorder = (*uint16)(add(unsafe.Pointer(sel.lockorder), uintptr(size)*unsafe.Sizeof(*_select{}.lockorder)))
38 if debugSelect {
39 print("newselect s=", sel, " size=", size, "\n")
43 //go:nosplit
44 func selectsend(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) {
45 // nil cases do not compete
46 if c != nil {
47 selectsendImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
49 return
52 // cut in half to give stack a chance to split
53 func selectsendImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, so uintptr) {
54 i := sel.ncase
55 if i >= sel.tcase {
56 gothrow("selectsend: too many cases")
58 sel.ncase = i + 1
59 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
61 cas.pc = pc
62 cas._chan = c
63 cas.so = uint16(so)
64 cas.kind = _CaseSend
65 cas.elem = elem
67 if debugSelect {
68 print("selectsend s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n")
72 //go:nosplit
73 func selectrecv(sel *_select, c *hchan, elem unsafe.Pointer) (selected bool) {
74 // nil cases do not compete
75 if c != nil {
76 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, nil, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
78 return
81 //go:nosplit
82 func selectrecv2(sel *_select, c *hchan, elem unsafe.Pointer, received *bool) (selected bool) {
83 // nil cases do not compete
84 if c != nil {
85 selectrecvImpl(sel, c, getcallerpc(unsafe.Pointer(&sel)), elem, received, uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
87 return
90 func selectrecvImpl(sel *_select, c *hchan, pc uintptr, elem unsafe.Pointer, received *bool, so uintptr) {
91 i := sel.ncase
92 if i >= sel.tcase {
93 gothrow("selectrecv: too many cases")
95 sel.ncase = i + 1
96 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
97 cas.pc = pc
98 cas._chan = c
99 cas.so = uint16(so)
100 cas.kind = _CaseRecv
101 cas.elem = elem
102 cas.receivedp = received
104 if debugSelect {
105 print("selectrecv s=", sel, " pc=", hex(cas.pc), " chan=", cas._chan, " so=", cas.so, "\n")
109 //go:nosplit
110 func selectdefault(sel *_select) (selected bool) {
111 selectdefaultImpl(sel, getcallerpc(unsafe.Pointer(&sel)), uintptr(unsafe.Pointer(&selected))-uintptr(unsafe.Pointer(&sel)))
112 return
115 func selectdefaultImpl(sel *_select, callerpc uintptr, so uintptr) {
116 i := sel.ncase
117 if i >= sel.tcase {
118 gothrow("selectdefault: too many cases")
120 sel.ncase = i + 1
121 cas := (*scase)(add(unsafe.Pointer(&sel.scase), uintptr(i)*unsafe.Sizeof(sel.scase[0])))
122 cas.pc = callerpc
123 cas._chan = nil
124 cas.so = uint16(so)
125 cas.kind = _CaseDefault
127 if debugSelect {
128 print("selectdefault s=", sel, " pc=", hex(cas.pc), " so=", cas.so, "\n")
132 func sellock(sel *_select) {
133 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
134 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
135 var c *hchan
136 for _, c0 := range lockorder {
137 if c0 != nil && c0 != c {
138 c = c0
139 lock(&c.lock)
144 func selunlock(sel *_select) {
145 // We must be very careful here to not touch sel after we have unlocked
146 // the last lock, because sel can be freed right after the last unlock.
147 // Consider the following situation.
148 // First M calls runtime·park() in runtime·selectgo() passing the sel.
149 // Once runtime·park() has unlocked the last lock, another M makes
150 // the G that calls select runnable again and schedules it for execution.
151 // When the G runs on another M, it locks all the locks and frees sel.
152 // Now if the first M touches sel, it will access freed memory.
153 n := int(sel.ncase)
154 r := 0
155 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), n, n}
156 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
157 // skip the default case
158 if n > 0 && lockorder[0] == nil {
159 r = 1
161 for i := n - 1; i >= r; i-- {
162 c := lockorder[i]
163 if i > 0 && c == lockorder[i-1] {
164 continue // will unlock it on the next iteration
166 unlock(&c.lock)
170 func selparkcommit(gp *g, sel *_select) bool {
171 selunlock(sel)
172 return true
175 func block() {
176 gopark(nil, nil, "select (no cases)") // forever
179 // overwrites return pc on stack to signal which case of the select
180 // to run, so cannot appear at the top of a split stack.
181 //go:nosplit
182 func selectgo(sel *_select) {
183 pc, offset := selectgoImpl(sel)
184 *(*bool)(add(unsafe.Pointer(&sel), uintptr(offset))) = true
185 setcallerpc(unsafe.Pointer(&sel), pc)
188 // selectgoImpl returns scase.pc and scase.so for the select
189 // case which fired.
190 func selectgoImpl(sel *_select) (uintptr, uint16) {
191 if debugSelect {
192 print("select: sel=", sel, "\n")
195 scaseslice := sliceStruct{unsafe.Pointer(&sel.scase), int(sel.ncase), int(sel.ncase)}
196 scases := *(*[]scase)(unsafe.Pointer(&scaseslice))
198 var t0 int64
199 if blockprofilerate > 0 {
200 t0 = cputicks()
201 for i := 0; i < int(sel.ncase); i++ {
202 scases[i].releasetime = -1
206 // The compiler rewrites selects that statically have
207 // only 0 or 1 cases plus default into simpler constructs.
208 // The only way we can end up with such small sel.ncase
209 // values here is for a larger select in which most channels
210 // have been nilled out. The general code handles those
211 // cases correctly, and they are rare enough not to bother
212 // optimizing (and needing to test).
214 // generate permuted order
215 pollslice := sliceStruct{unsafe.Pointer(sel.pollorder), int(sel.ncase), int(sel.ncase)}
216 pollorder := *(*[]uint16)(unsafe.Pointer(&pollslice))
217 for i := 0; i < int(sel.ncase); i++ {
218 pollorder[i] = uint16(i)
220 for i := 1; i < int(sel.ncase); i++ {
221 o := pollorder[i]
222 j := int(fastrand1()) % (i + 1)
223 pollorder[i] = pollorder[j]
224 pollorder[j] = o
227 // sort the cases by Hchan address to get the locking order.
228 // simple heap sort, to guarantee n log n time and constant stack footprint.
229 lockslice := sliceStruct{unsafe.Pointer(sel.lockorder), int(sel.ncase), int(sel.ncase)}
230 lockorder := *(*[]*hchan)(unsafe.Pointer(&lockslice))
231 for i := 0; i < int(sel.ncase); i++ {
232 j := i
233 c := scases[j]._chan
234 for j > 0 && lockorder[(j-1)/2].sortkey() < c.sortkey() {
235 k := (j - 1) / 2
236 lockorder[j] = lockorder[k]
237 j = k
239 lockorder[j] = c
241 for i := int(sel.ncase) - 1; i >= 0; i-- {
242 c := lockorder[i]
243 lockorder[i] = lockorder[0]
244 j := 0
245 for {
246 k := j*2 + 1
247 if k >= i {
248 break
250 if k+1 < i && lockorder[k].sortkey() < lockorder[k+1].sortkey() {
253 if c.sortkey() < lockorder[k].sortkey() {
254 lockorder[j] = lockorder[k]
255 j = k
256 continue
258 break
260 lockorder[j] = c
263 for i := 0; i+1 < int(sel.ncase); i++ {
264 if lockorder[i].sortkey() > lockorder[i+1].sortkey() {
265 print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n")
266 gothrow("select: broken sort")
271 // lock all the channels involved in the select
272 sellock(sel)
274 var (
275 gp *g
276 done uint32
277 sg *sudog
278 c *hchan
279 k *scase
280 sglist *sudog
281 sgnext *sudog
284 loop:
285 // pass 1 - look for something already waiting
286 var dfl *scase
287 var cas *scase
288 for i := 0; i < int(sel.ncase); i++ {
289 cas = &scases[pollorder[i]]
290 c = cas._chan
292 switch cas.kind {
293 case _CaseRecv:
294 if c.dataqsiz > 0 {
295 if c.qcount > 0 {
296 goto asyncrecv
298 } else {
299 sg = c.sendq.dequeue()
300 if sg != nil {
301 goto syncrecv
304 if c.closed != 0 {
305 goto rclose
308 case _CaseSend:
309 if raceenabled {
310 racereadpc(unsafe.Pointer(c), cas.pc, chansendpc)
312 if c.closed != 0 {
313 goto sclose
315 if c.dataqsiz > 0 {
316 if c.qcount < c.dataqsiz {
317 goto asyncsend
319 } else {
320 sg = c.recvq.dequeue()
321 if sg != nil {
322 goto syncsend
326 case _CaseDefault:
327 dfl = cas
331 if dfl != nil {
332 selunlock(sel)
333 cas = dfl
334 goto retc
337 // pass 2 - enqueue on all chans
338 gp = getg()
339 done = 0
340 for i := 0; i < int(sel.ncase); i++ {
341 cas = &scases[pollorder[i]]
342 c = cas._chan
343 sg := acquireSudog()
344 sg.g = gp
345 // Note: selectdone is adjusted for stack copies in stack.c:adjustsudogs
346 sg.selectdone = (*uint32)(noescape(unsafe.Pointer(&done)))
347 sg.elem = cas.elem
348 sg.releasetime = 0
349 if t0 != 0 {
350 sg.releasetime = -1
352 sg.waitlink = gp.waiting
353 gp.waiting = sg
355 switch cas.kind {
356 case _CaseRecv:
357 c.recvq.enqueue(sg)
359 case _CaseSend:
360 c.sendq.enqueue(sg)
364 // wait for someone to wake us up
365 gp.param = nil
366 gopark(unsafe.Pointer(funcPC(selparkcommit)), unsafe.Pointer(sel), "select")
368 // someone woke us up
369 sellock(sel)
370 sg = (*sudog)(gp.param)
371 gp.param = nil
373 // pass 3 - dequeue from unsuccessful chans
374 // otherwise they stack up on quiet channels
375 // record the successful case, if any.
376 // We singly-linked up the SudoGs in case order, so when
377 // iterating through the linked list they are in reverse order.
378 cas = nil
379 sglist = gp.waiting
380 // Clear all selectdone and elem before unlinking from gp.waiting.
381 // They must be cleared before being put back into the sudog cache.
382 // Clear before unlinking, because if a stack copy happens after the unlink,
383 // they will not be updated, they will be left pointing to the old stack,
384 // which creates dangling pointers, which may be detected by the
385 // garbage collector.
386 for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink {
387 sg1.selectdone = nil
388 sg1.elem = nil
390 gp.waiting = nil
391 for i := int(sel.ncase) - 1; i >= 0; i-- {
392 k = &scases[pollorder[i]]
393 if sglist.releasetime > 0 {
394 k.releasetime = sglist.releasetime
396 if sg == sglist {
397 cas = k
398 } else {
399 c = k._chan
400 if k.kind == _CaseSend {
401 c.sendq.dequeueSudoG(sglist)
402 } else {
403 c.recvq.dequeueSudoG(sglist)
406 sgnext = sglist.waitlink
407 sglist.waitlink = nil
408 releaseSudog(sglist)
409 sglist = sgnext
412 if cas == nil {
413 goto loop
416 c = cas._chan
418 if c.dataqsiz > 0 {
419 gothrow("selectgo: shouldn't happen")
422 if debugSelect {
423 print("wait-return: sel=", sel, " c=", c, " cas=", cas, " kind=", cas.kind, "\n")
426 if cas.kind == _CaseRecv {
427 if cas.receivedp != nil {
428 *cas.receivedp = true
432 if raceenabled {
433 if cas.kind == _CaseRecv && cas.elem != nil {
434 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
435 } else if cas.kind == _CaseSend {
436 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
440 selunlock(sel)
441 goto retc
443 asyncrecv:
444 // can receive from buffer
445 if raceenabled {
446 if cas.elem != nil {
447 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
449 raceacquire(chanbuf(c, c.recvx))
450 racerelease(chanbuf(c, c.recvx))
452 if cas.receivedp != nil {
453 *cas.receivedp = true
455 if cas.elem != nil {
456 memmove(cas.elem, chanbuf(c, c.recvx), uintptr(c.elemsize))
458 memclr(chanbuf(c, c.recvx), uintptr(c.elemsize))
459 c.recvx++
460 if c.recvx == c.dataqsiz {
461 c.recvx = 0
463 c.qcount--
464 sg = c.sendq.dequeue()
465 if sg != nil {
466 gp = sg.g
467 selunlock(sel)
468 if sg.releasetime != 0 {
469 sg.releasetime = cputicks()
471 goready(gp)
472 } else {
473 selunlock(sel)
475 goto retc
477 asyncsend:
478 // can send to buffer
479 if raceenabled {
480 raceacquire(chanbuf(c, c.sendx))
481 racerelease(chanbuf(c, c.sendx))
482 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
484 memmove(chanbuf(c, c.sendx), cas.elem, uintptr(c.elemsize))
485 c.sendx++
486 if c.sendx == c.dataqsiz {
487 c.sendx = 0
489 c.qcount++
490 sg = c.recvq.dequeue()
491 if sg != nil {
492 gp = sg.g
493 selunlock(sel)
494 if sg.releasetime != 0 {
495 sg.releasetime = cputicks()
497 goready(gp)
498 } else {
499 selunlock(sel)
501 goto retc
503 syncrecv:
504 // can receive from sleeping sender (sg)
505 if raceenabled {
506 if cas.elem != nil {
507 raceWriteObjectPC(c.elemtype, cas.elem, cas.pc, chanrecvpc)
509 racesync(c, sg)
511 selunlock(sel)
512 if debugSelect {
513 print("syncrecv: sel=", sel, " c=", c, "\n")
515 if cas.receivedp != nil {
516 *cas.receivedp = true
518 if cas.elem != nil {
519 memmove(cas.elem, sg.elem, uintptr(c.elemsize))
521 sg.elem = nil
522 gp = sg.g
523 gp.param = unsafe.Pointer(sg)
524 if sg.releasetime != 0 {
525 sg.releasetime = cputicks()
527 goready(gp)
528 goto retc
530 rclose:
531 // read at end of closed channel
532 selunlock(sel)
533 if cas.receivedp != nil {
534 *cas.receivedp = false
536 if cas.elem != nil {
537 memclr(cas.elem, uintptr(c.elemsize))
539 if raceenabled {
540 raceacquire(unsafe.Pointer(c))
542 goto retc
544 syncsend:
545 // can send to sleeping receiver (sg)
546 if raceenabled {
547 raceReadObjectPC(c.elemtype, cas.elem, cas.pc, chansendpc)
548 racesync(c, sg)
550 selunlock(sel)
551 if debugSelect {
552 print("syncsend: sel=", sel, " c=", c, "\n")
554 if sg.elem != nil {
555 memmove(sg.elem, cas.elem, uintptr(c.elemsize))
557 sg.elem = nil
558 gp = sg.g
559 gp.param = unsafe.Pointer(sg)
560 if sg.releasetime != 0 {
561 sg.releasetime = cputicks()
563 goready(gp)
565 retc:
566 if cas.releasetime > 0 {
567 blockevent(cas.releasetime-t0, 2)
569 return cas.pc, cas.so
571 sclose:
572 // send on closed channel
573 selunlock(sel)
574 panic("send on closed channel")
577 func (c *hchan) sortkey() uintptr {
578 // TODO(khr): if we have a moving garbage collector, we'll need to
579 // change this function.
580 return uintptr(unsafe.Pointer(c))
583 // A runtimeSelect is a single case passed to rselect.
584 // This must match ../reflect/value.go:/runtimeSelect
585 type runtimeSelect struct {
586 dir selectDir
587 typ unsafe.Pointer // channel type (not used here)
588 ch *hchan // channel
589 val unsafe.Pointer // ptr to data (SendDir) or ptr to receive buffer (RecvDir)
592 // These values must match ../reflect/value.go:/SelectDir.
593 type selectDir int
595 const (
596 _ selectDir = iota
597 selectSend // case Chan <- Send
598 selectRecv // case <-Chan:
599 selectDefault // default
602 func reflect_rselect(cases []runtimeSelect) (chosen int, recvOK bool) {
603 // flagNoScan is safe here, because all objects are also referenced from cases.
604 size := selectsize(uintptr(len(cases)))
605 sel := (*_select)(mallocgc(size, nil, flagNoScan))
606 newselect(sel, int64(size), int32(len(cases)))
607 r := new(bool)
608 for i := range cases {
609 rc := &cases[i]
610 switch rc.dir {
611 case selectDefault:
612 selectdefaultImpl(sel, uintptr(i), 0)
613 case selectSend:
614 if rc.ch == nil {
615 break
617 selectsendImpl(sel, rc.ch, uintptr(i), rc.val, 0)
618 case selectRecv:
619 if rc.ch == nil {
620 break
622 selectrecvImpl(sel, rc.ch, uintptr(i), rc.val, r, 0)
626 pc, _ := selectgoImpl(sel)
627 chosen = int(pc)
628 recvOK = *r
629 return
632 func (q *waitq) dequeueSudoG(s *sudog) {
633 var prevsgp *sudog
634 l := &q.first
635 for {
636 sgp := *l
637 if sgp == nil {
638 return
640 if sgp == s {
641 *l = sgp.next
642 if q.last == sgp {
643 q.last = prevsgp
645 s.next = nil
646 return
648 l = &sgp.next
649 prevsgp = sgp