* typeck2.c (cxx_incomplete_type_diagnostic): Revert change and
[official-gcc.git] / libgo / go / runtime / chan.go
bloba9574dd8ac7eb7ff7ec434705c740deb2f48b2ee
1 // Copyright 2014 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 package runtime
7 // This file contains the implementation of Go channels.
9 // Invariants:
10 // At least one of c.sendq and c.recvq is empty,
11 // except for the case of an unbuffered channel with a single goroutine
12 // blocked on it for both sending and receiving using a select statement,
13 // in which case the length of c.sendq and c.recvq is limited only by the
14 // size of the select statement.
16 // For buffered channels, also:
17 // c.qcount > 0 implies that c.recvq is empty.
18 // c.qcount < c.dataqsiz implies that c.sendq is empty.
20 import (
21 "runtime/internal/atomic"
22 "unsafe"
25 // For gccgo, use go:linkname to rename compiler-called functions to
26 // themselves, so that the compiler will export them.
28 //go:linkname makechan runtime.makechan
29 //go:linkname chansend1 runtime.chansend1
30 //go:linkname chanrecv1 runtime.chanrecv1
31 //go:linkname chanrecv2 runtime.chanrecv2
32 //go:linkname closechan runtime.closechan
34 const (
35 maxAlign = 8
36 hchanSize = unsafe.Sizeof(hchan{}) + uintptr(-int(unsafe.Sizeof(hchan{}))&(maxAlign-1))
37 debugChan = false
40 type hchan struct {
41 qcount uint // total data in the queue
42 dataqsiz uint // size of the circular queue
43 buf unsafe.Pointer // points to an array of dataqsiz elements
44 elemsize uint16
45 closed uint32
46 elemtype *_type // element type
47 sendx uint // send index
48 recvx uint // receive index
49 recvq waitq // list of recv waiters
50 sendq waitq // list of send waiters
52 // lock protects all fields in hchan, as well as several
53 // fields in sudogs blocked on this channel.
55 // Do not change another G's status while holding this lock
56 // (in particular, do not ready a G), as this can deadlock
57 // with stack shrinking.
58 lock mutex
61 type waitq struct {
62 first *sudog
63 last *sudog
66 //go:linkname reflect_makechan reflect.makechan
67 func reflect_makechan(t *chantype, size int64) *hchan {
68 return makechan(t, size)
71 func makechan(t *chantype, size int64) *hchan {
72 elem := t.elem
74 // compiler checks this but be safe.
75 if elem.size >= 1<<16 {
76 throw("makechan: invalid channel element type")
78 if hchanSize%maxAlign != 0 || elem.align > maxAlign {
79 throw("makechan: bad alignment")
81 if size < 0 || int64(uintptr(size)) != size || (elem.size > 0 && uintptr(size) > (_MaxMem-hchanSize)/elem.size) {
82 panic(plainError("makechan: size out of range"))
85 var c *hchan
86 if elem.kind&kindNoPointers != 0 || size == 0 {
87 // Allocate memory in one call.
88 // Hchan does not contain pointers interesting for GC in this case:
89 // buf points into the same allocation, elemtype is persistent.
90 // SudoG's are referenced from their owning thread so they can't be collected.
91 // TODO(dvyukov,rlh): Rethink when collector can move allocated objects.
92 c = (*hchan)(mallocgc(hchanSize+uintptr(size)*elem.size, nil, true))
93 if size > 0 && elem.size != 0 {
94 c.buf = add(unsafe.Pointer(c), hchanSize)
95 } else {
96 // race detector uses this location for synchronization
97 // Also prevents us from pointing beyond the allocation (see issue 9401).
98 c.buf = unsafe.Pointer(c)
100 } else {
101 c = new(hchan)
102 c.buf = newarray(elem, int(size))
104 c.elemsize = uint16(elem.size)
105 c.elemtype = elem
106 c.dataqsiz = uint(size)
108 if debugChan {
109 print("makechan: chan=", c, "; elemsize=", elem.size, "; dataqsiz=", size, "\n")
111 return c
114 // chanbuf(c, i) is pointer to the i'th slot in the buffer.
115 func chanbuf(c *hchan, i uint) unsafe.Pointer {
116 return add(c.buf, uintptr(i)*uintptr(c.elemsize))
119 // entry point for c <- x from compiled code
120 //go:nosplit
121 func chansend1(t *chantype, c *hchan, elem unsafe.Pointer) {
122 chansend(t, c, elem, true, getcallerpc(unsafe.Pointer(&t)))
126 * generic single channel send/recv
127 * If block is not nil,
128 * then the protocol will not
129 * sleep but return if it could
130 * not complete.
132 * sleep can wake up with g.param == nil
133 * when a channel involved in the sleep has
134 * been closed. it is easiest to loop and re-run
135 * the operation; we'll see that it's now closed.
137 func chansend(t *chantype, c *hchan, ep unsafe.Pointer, block bool, callerpc uintptr) bool {
138 if raceenabled {
139 raceReadObjectPC(t.elem, ep, callerpc, funcPC(chansend))
141 if msanenabled {
142 msanread(ep, t.elem.size)
145 if c == nil {
146 if !block {
147 return false
149 gopark(nil, nil, "chan send (nil chan)", traceEvGoStop, 2)
150 throw("unreachable")
153 if debugChan {
154 print("chansend: chan=", c, "\n")
157 if raceenabled {
158 racereadpc(unsafe.Pointer(c), callerpc, funcPC(chansend))
161 // Fast path: check for failed non-blocking operation without acquiring the lock.
163 // After observing that the channel is not closed, we observe that the channel is
164 // not ready for sending. Each of these observations is a single word-sized read
165 // (first c.closed and second c.recvq.first or c.qcount depending on kind of channel).
166 // Because a closed channel cannot transition from 'ready for sending' to
167 // 'not ready for sending', even if the channel is closed between the two observations,
168 // they imply a moment between the two when the channel was both not yet closed
169 // and not ready for sending. We behave as if we observed the channel at that moment,
170 // and report that the send cannot proceed.
172 // It is okay if the reads are reordered here: if we observe that the channel is not
173 // ready for sending and then observe that it is not closed, that implies that the
174 // channel wasn't closed during the first observation.
175 if !block && c.closed == 0 && ((c.dataqsiz == 0 && c.recvq.first == nil) ||
176 (c.dataqsiz > 0 && c.qcount == c.dataqsiz)) {
177 return false
180 var t0 int64
181 if blockprofilerate > 0 {
182 t0 = cputicks()
185 lock(&c.lock)
187 if c.closed != 0 {
188 unlock(&c.lock)
189 panic(plainError("send on closed channel"))
192 if sg := c.recvq.dequeue(); sg != nil {
193 // Found a waiting receiver. We pass the value we want to send
194 // directly to the receiver, bypassing the channel buffer (if any).
195 send(c, sg, ep, func() { unlock(&c.lock) })
196 return true
199 if c.qcount < c.dataqsiz {
200 // Space is available in the channel buffer. Enqueue the element to send.
201 qp := chanbuf(c, c.sendx)
202 if raceenabled {
203 raceacquire(qp)
204 racerelease(qp)
206 typedmemmove(c.elemtype, qp, ep)
207 c.sendx++
208 if c.sendx == c.dataqsiz {
209 c.sendx = 0
211 c.qcount++
212 unlock(&c.lock)
213 return true
216 if !block {
217 unlock(&c.lock)
218 return false
221 // Block on the channel. Some receiver will complete our operation for us.
222 gp := getg()
223 mysg := acquireSudog()
224 mysg.releasetime = 0
225 if t0 != 0 {
226 mysg.releasetime = -1
228 // No stack splits between assigning elem and enqueuing mysg
229 // on gp.waiting where copystack can find it.
230 mysg.elem = ep
231 mysg.waitlink = nil
232 mysg.g = gp
233 mysg.selectdone = nil
234 mysg.c = c
235 gp.waiting = mysg
236 gp.param = nil
237 c.sendq.enqueue(mysg)
238 goparkunlock(&c.lock, "chan send", traceEvGoBlockSend, 3)
240 // someone woke us up.
241 if mysg != gp.waiting {
242 throw("G waiting list is corrupted")
244 gp.waiting = nil
245 if gp.param == nil {
246 if c.closed == 0 {
247 throw("chansend: spurious wakeup")
249 panic(plainError("send on closed channel"))
251 gp.param = nil
252 if mysg.releasetime > 0 {
253 blockevent(mysg.releasetime-t0, 2)
255 mysg.c = nil
256 releaseSudog(mysg)
257 return true
260 // send processes a send operation on an empty channel c.
261 // The value ep sent by the sender is copied to the receiver sg.
262 // The receiver is then woken up to go on its merry way.
263 // Channel c must be empty and locked. send unlocks c with unlockf.
264 // sg must already be dequeued from c.
265 // ep must be non-nil and point to the heap or the caller's stack.
266 func send(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
267 if raceenabled {
268 if c.dataqsiz == 0 {
269 racesync(c, sg)
270 } else {
271 // Pretend we go through the buffer, even though
272 // we copy directly. Note that we need to increment
273 // the head/tail locations only when raceenabled.
274 qp := chanbuf(c, c.recvx)
275 raceacquire(qp)
276 racerelease(qp)
277 raceacquireg(sg.g, qp)
278 racereleaseg(sg.g, qp)
279 c.recvx++
280 if c.recvx == c.dataqsiz {
281 c.recvx = 0
283 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
286 if sg.elem != nil {
287 sendDirect(c.elemtype, sg, ep)
288 sg.elem = nil
290 gp := sg.g
291 unlockf()
292 gp.param = unsafe.Pointer(sg)
293 if sg.releasetime != 0 {
294 sg.releasetime = cputicks()
296 goready(gp, 4)
299 // Sends and receives on unbuffered or empty-buffered channels are the
300 // only operations where one running goroutine writes to the stack of
301 // another running goroutine. The GC assumes that stack writes only
302 // happen when the goroutine is running and are only done by that
303 // goroutine. Using a write barrier is sufficient to make up for
304 // violating that assumption, but the write barrier has to work.
305 // typedmemmove will call bulkBarrierPreWrite, but the target bytes
306 // are not in the heap, so that will not help. We arrange to call
307 // memmove and typeBitsBulkBarrier instead.
309 func sendDirect(t *_type, sg *sudog, src unsafe.Pointer) {
310 // src is on our stack, dst is a slot on another stack.
312 // Once we read sg.elem out of sg, it will no longer
313 // be updated if the destination's stack gets copied (shrunk).
314 // So make sure that no preemption points can happen between read & use.
315 dst := sg.elem
316 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
317 memmove(dst, src, t.size)
320 func recvDirect(t *_type, sg *sudog, dst unsafe.Pointer) {
321 // dst is on our stack or the heap, src is on another stack.
322 // The channel is locked, so src will not move during this
323 // operation.
324 src := sg.elem
325 typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.size)
326 memmove(dst, src, t.size)
329 func closechan(c *hchan) {
330 if c == nil {
331 panic(plainError("close of nil channel"))
334 lock(&c.lock)
335 if c.closed != 0 {
336 unlock(&c.lock)
337 panic(plainError("close of closed channel"))
340 if raceenabled {
341 callerpc := getcallerpc(unsafe.Pointer(&c))
342 racewritepc(unsafe.Pointer(c), callerpc, funcPC(closechan))
343 racerelease(unsafe.Pointer(c))
346 c.closed = 1
348 var glist *g
350 // release all readers
351 for {
352 sg := c.recvq.dequeue()
353 if sg == nil {
354 break
356 if sg.elem != nil {
357 typedmemclr(c.elemtype, sg.elem)
358 sg.elem = nil
360 if sg.releasetime != 0 {
361 sg.releasetime = cputicks()
363 gp := sg.g
364 gp.param = nil
365 if raceenabled {
366 raceacquireg(gp, unsafe.Pointer(c))
368 gp.schedlink.set(glist)
369 glist = gp
372 // release all writers (they will panic)
373 for {
374 sg := c.sendq.dequeue()
375 if sg == nil {
376 break
378 sg.elem = nil
379 if sg.releasetime != 0 {
380 sg.releasetime = cputicks()
382 gp := sg.g
383 gp.param = nil
384 if raceenabled {
385 raceacquireg(gp, unsafe.Pointer(c))
387 gp.schedlink.set(glist)
388 glist = gp
390 unlock(&c.lock)
392 // Ready all Gs now that we've dropped the channel lock.
393 for glist != nil {
394 gp := glist
395 glist = glist.schedlink.ptr()
396 gp.schedlink = 0
397 goready(gp, 3)
401 // entry points for <- c from compiled code
402 //go:nosplit
403 func chanrecv1(t *chantype, c *hchan, elem unsafe.Pointer) {
404 chanrecv(t, c, elem, true)
407 //go:nosplit
408 func chanrecv2(t *chantype, c *hchan, elem unsafe.Pointer) (received bool) {
409 _, received = chanrecv(t, c, elem, true)
410 return
413 // chanrecv receives on channel c and writes the received data to ep.
414 // ep may be nil, in which case received data is ignored.
415 // If block == false and no elements are available, returns (false, false).
416 // Otherwise, if c is closed, zeros *ep and returns (true, false).
417 // Otherwise, fills in *ep with an element and returns (true, true).
418 // A non-nil ep must point to the heap or the caller's stack.
419 func chanrecv(t *chantype, c *hchan, ep unsafe.Pointer, block bool) (selected, received bool) {
420 // raceenabled: don't need to check ep, as it is always on the stack
421 // or is new memory allocated by reflect.
423 if debugChan {
424 print("chanrecv: chan=", c, "\n")
427 if c == nil {
428 if !block {
429 return
431 gopark(nil, nil, "chan receive (nil chan)", traceEvGoStop, 2)
432 throw("unreachable")
435 // Fast path: check for failed non-blocking operation without acquiring the lock.
437 // After observing that the channel is not ready for receiving, we observe that the
438 // channel is not closed. Each of these observations is a single word-sized read
439 // (first c.sendq.first or c.qcount, and second c.closed).
440 // Because a channel cannot be reopened, the later observation of the channel
441 // being not closed implies that it was also not closed at the moment of the
442 // first observation. We behave as if we observed the channel at that moment
443 // and report that the receive cannot proceed.
445 // The order of operations is important here: reversing the operations can lead to
446 // incorrect behavior when racing with a close.
447 if !block && (c.dataqsiz == 0 && c.sendq.first == nil ||
448 c.dataqsiz > 0 && atomic.Loaduint(&c.qcount) == 0) &&
449 atomic.Load(&c.closed) == 0 {
450 return
453 var t0 int64
454 if blockprofilerate > 0 {
455 t0 = cputicks()
458 lock(&c.lock)
460 if c.closed != 0 && c.qcount == 0 {
461 if raceenabled {
462 raceacquire(unsafe.Pointer(c))
464 unlock(&c.lock)
465 if ep != nil {
466 typedmemclr(c.elemtype, ep)
468 return true, false
471 if sg := c.sendq.dequeue(); sg != nil {
472 // Found a waiting sender. If buffer is size 0, receive value
473 // directly from sender. Otherwise, receive from head of queue
474 // and add sender's value to the tail of the queue (both map to
475 // the same buffer slot because the queue is full).
476 recv(c, sg, ep, func() { unlock(&c.lock) })
477 return true, true
480 if c.qcount > 0 {
481 // Receive directly from queue
482 qp := chanbuf(c, c.recvx)
483 if raceenabled {
484 raceacquire(qp)
485 racerelease(qp)
487 if ep != nil {
488 typedmemmove(c.elemtype, ep, qp)
490 typedmemclr(c.elemtype, qp)
491 c.recvx++
492 if c.recvx == c.dataqsiz {
493 c.recvx = 0
495 c.qcount--
496 unlock(&c.lock)
497 return true, true
500 if !block {
501 unlock(&c.lock)
502 return false, false
505 // no sender available: block on this channel.
506 gp := getg()
507 mysg := acquireSudog()
508 mysg.releasetime = 0
509 if t0 != 0 {
510 mysg.releasetime = -1
512 // No stack splits between assigning elem and enqueuing mysg
513 // on gp.waiting where copystack can find it.
514 mysg.elem = ep
515 mysg.waitlink = nil
516 gp.waiting = mysg
517 mysg.g = gp
518 mysg.selectdone = nil
519 mysg.c = c
520 gp.param = nil
521 c.recvq.enqueue(mysg)
522 goparkunlock(&c.lock, "chan receive", traceEvGoBlockRecv, 3)
524 // someone woke us up
525 if mysg != gp.waiting {
526 throw("G waiting list is corrupted")
528 gp.waiting = nil
529 if mysg.releasetime > 0 {
530 blockevent(mysg.releasetime-t0, 2)
532 closed := gp.param == nil
533 gp.param = nil
534 mysg.c = nil
535 releaseSudog(mysg)
536 return true, !closed
539 // recv processes a receive operation on a full channel c.
540 // There are 2 parts:
541 // 1) The value sent by the sender sg is put into the channel
542 // and the sender is woken up to go on its merry way.
543 // 2) The value received by the receiver (the current G) is
544 // written to ep.
545 // For synchronous channels, both values are the same.
546 // For asynchronous channels, the receiver gets its data from
547 // the channel buffer and the sender's data is put in the
548 // channel buffer.
549 // Channel c must be full and locked. recv unlocks c with unlockf.
550 // sg must already be dequeued from c.
551 // A non-nil ep must point to the heap or the caller's stack.
552 func recv(c *hchan, sg *sudog, ep unsafe.Pointer, unlockf func()) {
553 if c.dataqsiz == 0 {
554 if raceenabled {
555 racesync(c, sg)
557 if ep != nil {
558 // copy data from sender
559 recvDirect(c.elemtype, sg, ep)
561 } else {
562 // Queue is full. Take the item at the
563 // head of the queue. Make the sender enqueue
564 // its item at the tail of the queue. Since the
565 // queue is full, those are both the same slot.
566 qp := chanbuf(c, c.recvx)
567 if raceenabled {
568 raceacquire(qp)
569 racerelease(qp)
570 raceacquireg(sg.g, qp)
571 racereleaseg(sg.g, qp)
573 // copy data from queue to receiver
574 if ep != nil {
575 typedmemmove(c.elemtype, ep, qp)
577 // copy data from sender to queue
578 typedmemmove(c.elemtype, qp, sg.elem)
579 c.recvx++
580 if c.recvx == c.dataqsiz {
581 c.recvx = 0
583 c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz
585 sg.elem = nil
586 gp := sg.g
587 unlockf()
588 gp.param = unsafe.Pointer(sg)
589 if sg.releasetime != 0 {
590 sg.releasetime = cputicks()
592 goready(gp, 4)
595 // compiler implements
597 // select {
598 // case c <- v:
599 // ... foo
600 // default:
601 // ... bar
602 // }
604 // as
606 // if selectnbsend(c, v) {
607 // ... foo
608 // } else {
609 // ... bar
610 // }
612 func selectnbsend(t *chantype, c *hchan, elem unsafe.Pointer) (selected bool) {
613 return chansend(t, c, elem, false, getcallerpc(unsafe.Pointer(&t)))
616 // compiler implements
618 // select {
619 // case v = <-c:
620 // ... foo
621 // default:
622 // ... bar
623 // }
625 // as
627 // if selectnbrecv(&v, c) {
628 // ... foo
629 // } else {
630 // ... bar
631 // }
633 func selectnbrecv(t *chantype, elem unsafe.Pointer, c *hchan) (selected bool) {
634 selected, _ = chanrecv(t, c, elem, false)
635 return
638 // compiler implements
640 // select {
641 // case v, ok = <-c:
642 // ... foo
643 // default:
644 // ... bar
645 // }
647 // as
649 // if c != nil && selectnbrecv2(&v, &ok, c) {
650 // ... foo
651 // } else {
652 // ... bar
653 // }
655 func selectnbrecv2(t *chantype, elem unsafe.Pointer, received *bool, c *hchan) (selected bool) {
656 // TODO(khr): just return 2 values from this function, now that it is in Go.
657 selected, *received = chanrecv(t, c, elem, false)
658 return
661 //go:linkname reflect_chansend reflect.chansend
662 func reflect_chansend(t *chantype, c *hchan, elem unsafe.Pointer, nb bool) (selected bool) {
663 return chansend(t, c, elem, !nb, getcallerpc(unsafe.Pointer(&t)))
666 //go:linkname reflect_chanrecv reflect.chanrecv
667 func reflect_chanrecv(t *chantype, c *hchan, nb bool, elem unsafe.Pointer) (selected bool, received bool) {
668 return chanrecv(t, c, elem, !nb)
671 //go:linkname reflect_chanlen reflect.chanlen
672 func reflect_chanlen(c *hchan) int {
673 if c == nil {
674 return 0
676 return int(c.qcount)
679 //go:linkname reflect_chancap reflect.chancap
680 func reflect_chancap(c *hchan) int {
681 if c == nil {
682 return 0
684 return int(c.dataqsiz)
687 //go:linkname reflect_chanclose reflect.chanclose
688 func reflect_chanclose(c *hchan) {
689 closechan(c)
692 func (q *waitq) enqueue(sgp *sudog) {
693 sgp.next = nil
694 x := q.last
695 if x == nil {
696 sgp.prev = nil
697 q.first = sgp
698 q.last = sgp
699 return
701 sgp.prev = x
702 x.next = sgp
703 q.last = sgp
706 func (q *waitq) dequeue() *sudog {
707 for {
708 sgp := q.first
709 if sgp == nil {
710 return nil
712 y := sgp.next
713 if y == nil {
714 q.first = nil
715 q.last = nil
716 } else {
717 y.prev = nil
718 q.first = y
719 sgp.next = nil // mark as removed (see dequeueSudog)
722 // if sgp participates in a select and is already signaled, ignore it
723 if sgp.selectdone != nil {
724 // claim the right to signal
725 if *sgp.selectdone != 0 || !atomic.Cas(sgp.selectdone, 0, 1) {
726 continue
730 return sgp
734 func racesync(c *hchan, sg *sudog) {
735 racerelease(chanbuf(c, 0))
736 raceacquireg(sg.g, chanbuf(c, 0))
737 racereleaseg(sg.g, chanbuf(c, 0))
738 raceacquire(chanbuf(c, 0))