1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build darwin dragonfly freebsd linux netbsd openbsd windows
14 // Map gccgo field names to gc field names.
15 // Eface aka __go_empty_interface.
16 #define type __type_descriptor
19 // Integrated network poller (platform-independent part).
20 // A particular implementation (epoll/kqueue) must define the following functions:
21 // void runtime_netpollinit(void); // to initialize the poller
22 // int32 runtime_netpollopen(uintptr fd, PollDesc *pd); // to arm edge-triggered notifications
23 // and associate fd with pd.
24 // An implementation must call the following function to denote that the pd is ready.
25 // void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
31 PollDesc* link; // in pollcache, protected by pollcache.Lock
32 Lock; // protectes the following fields
35 uintptr seq; // protects from stale timers and ready notifications
36 G* rg; // G waiting for read or READY (binary semaphore)
37 Timer rt; // read deadline timer (set if rt.fv != nil)
38 int64 rd; // read deadline
39 G* wg; // the same for writes
48 // PollDesc objects must be type-stable,
49 // because we can get ready notification from epoll/kqueue
50 // after the descriptor is closed/reused.
51 // Stale notifications are detected using seq variable,
52 // seq is incremented when deadlines are changed or descriptor is reused.
55 static bool netpollblock(PollDesc*, int32);
56 static G* netpollunblock(PollDesc*, int32, bool);
57 static void deadline(int64, Eface);
58 static void readDeadline(int64, Eface);
59 static void writeDeadline(int64, Eface);
60 static PollDesc* allocPollDesc(void);
61 static intgo checkerr(PollDesc *pd, int32 mode);
63 static FuncVal deadlineFn = {(void(*)(void))deadline};
64 static FuncVal readDeadlineFn = {(void(*)(void))readDeadline};
65 static FuncVal writeDeadlineFn = {(void(*)(void))writeDeadline};
67 func runtime_pollServerInit() {
68 runtime_netpollinit();
71 func runtime_pollOpen(fd uintptr) (pd *PollDesc, errno int) {
74 if(pd->wg != nil && pd->wg != READY)
75 runtime_throw("runtime_pollOpen: blocked write on free descriptor");
76 if(pd->rg != nil && pd->rg != READY)
77 runtime_throw("runtime_pollOpen: blocked read on free descriptor");
87 errno = runtime_netpollopen(fd, pd);
90 func runtime_pollClose(pd *PollDesc) {
92 runtime_throw("runtime_pollClose: close w/o unblock");
93 if(pd->wg != nil && pd->wg != READY)
94 runtime_throw("runtime_pollClose: blocked write on closing descriptor");
95 if(pd->rg != nil && pd->rg != READY)
96 runtime_throw("runtime_pollClose: blocked read on closing descriptor");
97 runtime_netpollclose(pd->fd);
98 runtime_lock(&pollcache);
99 pd->link = pollcache.first;
100 pollcache.first = pd;
101 runtime_unlock(&pollcache);
104 func runtime_pollReset(pd *PollDesc, mode int) (err int) {
106 err = checkerr(pd, mode);
117 func runtime_pollWait(pd *PollDesc, mode int) (err int) {
119 err = checkerr(pd, mode);
121 while(!netpollblock(pd, mode)) {
122 err = checkerr(pd, mode);
125 // Can happen if timeout has fired and unblocked us,
126 // but before we had a chance to run, timeout has been reset.
127 // Pretend it has not happened and retry.
133 func runtime_pollWaitCanceled(pd *PollDesc, mode int) {
135 // wait for ioready, ignore closing or timeouts.
136 while(!netpollblock(pd, mode))
141 func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
149 pd->seq++; // invalidate current timers
150 // Reset current timers.
152 runtime_deltimer(&pd->rt);
156 runtime_deltimer(&pd->wt);
160 if(d != 0 && d <= runtime_nanotime())
162 if(mode == 'r' || mode == 'r'+'w')
164 if(mode == 'w' || mode == 'r'+'w')
166 if(pd->rd > 0 && pd->rd == pd->wd) {
167 pd->rt.fv = &deadlineFn;
168 pd->rt.when = pd->rd;
169 // Copy current seq into the timer arg.
170 // Timer func will check the seq against current descriptor seq,
171 // if they differ the descriptor was reused or timers were reset.
172 pd->rt.arg.type = (Type*)pd->seq;
173 pd->rt.arg.data = pd;
174 runtime_addtimer(&pd->rt);
177 pd->rt.fv = &readDeadlineFn;
178 pd->rt.when = pd->rd;
179 pd->rt.arg.type = (Type*)pd->seq;
180 pd->rt.arg.data = pd;
181 runtime_addtimer(&pd->rt);
184 pd->wt.fv = &writeDeadlineFn;
185 pd->wt.when = pd->wd;
186 pd->wt.arg.type = (Type*)pd->seq;
187 pd->wt.arg.data = pd;
188 runtime_addtimer(&pd->wt);
191 // If we set the new deadline in the past, unblock currently pending IO if any.
195 rg = netpollunblock(pd, 'r', false);
197 wg = netpollunblock(pd, 'w', false);
205 func runtime_pollUnblock(pd *PollDesc) {
210 runtime_throw("runtime_pollUnblock: already closing");
213 rg = netpollunblock(pd, 'r', false);
214 wg = netpollunblock(pd, 'w', false);
216 runtime_deltimer(&pd->rt);
220 runtime_deltimer(&pd->wt);
231 runtime_netpollfd(PollDesc *pd)
236 // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
238 runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
244 if(mode == 'r' || mode == 'r'+'w')
245 rg = netpollunblock(pd, 'r', true);
246 if(mode == 'w' || mode == 'r'+'w')
247 wg = netpollunblock(pd, 'w', true);
250 rg->schedlink = *gpp;
254 wg->schedlink = *gpp;
260 checkerr(PollDesc *pd, int32 mode)
263 return 1; // errClosing
264 if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
265 return 2; // errTimeout
269 // returns true if IO is ready, or false if timedout or closed
271 netpollblock(PollDesc *pd, int32 mode)
283 runtime_throw("netpollblock: double wait");
285 runtime_park(runtime_unlock, &pd->Lock, "IO wait");
287 if(runtime_g()->param)
293 netpollunblock(PollDesc *pd, int32 mode, bool ioready)
303 // Only set READY for ioready. runtime_pollWait
304 // will check for timeout/cancel before waiting.
310 // pass unblock reason onto blocked g
311 old->param = (void*)(uintptr)ioready;
317 deadlineimpl(int64 now, Eface arg, bool read, bool write)
324 pd = (PollDesc*)arg.data;
325 // This is the seq when the timer was set.
326 // If it's stale, ignore the timer event.
327 seq = (uintptr)arg.type;
331 // The descriptor was reused or timers were reset.
336 if(pd->rd <= 0 || pd->rt.fv == nil)
337 runtime_throw("deadlineimpl: inconsistent read deadline");
340 rg = netpollunblock(pd, 'r', false);
343 if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
344 runtime_throw("deadlineimpl: inconsistent write deadline");
347 wg = netpollunblock(pd, 'w', false);
357 deadline(int64 now, Eface arg)
359 deadlineimpl(now, arg, true, true);
363 readDeadline(int64 now, Eface arg)
365 deadlineimpl(now, arg, true, false);
369 writeDeadline(int64 now, Eface arg)
371 deadlineimpl(now, arg, false, true);
380 runtime_lock(&pollcache);
381 if(pollcache.first == nil) {
382 n = PageSize/sizeof(*pd);
385 // Must be in non-GC memory because can be referenced
386 // only from epoll/kqueue internals.
387 pd = runtime_persistentalloc(n*sizeof(*pd), 0, &mstats.other_sys);
388 for(i = 0; i < n; i++) {
389 pd[i].link = pollcache.first;
390 pollcache.first = &pd[i];
393 pd = pollcache.first;
394 pollcache.first = pd->link;
395 runtime_unlock(&pollcache);