2013-10-24 Jan-Benedict Glaw <jbglaw@lug-owl.de>
[official-gcc.git] / libgo / runtime / netpoll.goc
bloba0bd735f85cf9ac2f2bbfb02032089ee7bf1aee8
1 // Copyright 2013 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // +build darwin linux
7 package net
9 #include "runtime.h"
10 #include "defs.h"
11 #include "arch.h"
12 #include "malloc.h"
14 // Map gccgo field names to gc field names.
15 // Eface aka __go_empty_interface.
16 #define type __type_descriptor
17 #define data __object
19 // Integrated network poller (platform-independent part).
20 // A particular implementation (epoll/kqueue) must define the following functions:
21 // void runtime_netpollinit(void);                      // to initialize the poller
22 // int32 runtime_netpollopen(int32 fd, PollDesc *pd);   // to arm edge-triggered notifications
23                                                         // and associate fd with pd.
24 // An implementation must call the following function to denote that the pd is ready.
25 // void runtime_netpollready(G **gpp, PollDesc *pd, int32 mode);
27 #define READY ((G*)1)
29 struct PollDesc
31         PollDesc* link; // in pollcache, protected by pollcache.Lock
32         Lock;           // protectes the following fields
33         int32   fd;
34         bool    closing;
35         uintptr seq;    // protects from stale timers and ready notifications
36         G*      rg;     // G waiting for read or READY (binary semaphore)
37         Timer   rt;     // read deadline timer (set if rt.fv != nil)
38         int64   rd;     // read deadline
39         G*      wg;     // the same for writes
40         Timer   wt;
41         int64   wd;
44 static struct
46         Lock;
47         PollDesc*       first;
48         // PollDesc objects must be type-stable,
49         // because we can get ready notification from epoll/kqueue
50         // after the descriptor is closed/reused.
51         // Stale notifications are detected using seq variable,
52         // seq is incremented when deadlines are changed or descriptor is reused.
53 } pollcache;
55 static void     netpollblock(PollDesc*, int32);
56 static G*       netpollunblock(PollDesc*, int32);
57 static void     deadline(int64, Eface);
58 static void     readDeadline(int64, Eface);
59 static void     writeDeadline(int64, Eface);
60 static PollDesc*        allocPollDesc(void);
61 static intgo    checkerr(PollDesc *pd, int32 mode);
63 static FuncVal deadlineFn       = {(void(*)(void))deadline};
64 static FuncVal readDeadlineFn   = {(void(*)(void))readDeadline};
65 static FuncVal writeDeadlineFn  = {(void(*)(void))writeDeadline};
67 func runtime_pollServerInit() {
68         runtime_netpollinit();
71 func runtime_pollOpen(fd int) (pd *PollDesc, errno int) {
72         pd = allocPollDesc();
73         runtime_lock(pd);
74         if(pd->wg != nil && pd->wg != READY)
75                 runtime_throw("runtime_pollOpen: blocked write on free descriptor");
76         if(pd->rg != nil && pd->rg != READY)
77                 runtime_throw("runtime_pollOpen: blocked read on free descriptor");
78         pd->fd = fd;
79         pd->closing = false;
80         pd->seq++;
81         pd->rg = nil;
82         pd->rd = 0;
83         pd->wg = nil;
84         pd->wd = 0;
85         runtime_unlock(pd);
87         errno = runtime_netpollopen(fd, pd);
90 func runtime_pollClose(pd *PollDesc) {
91         if(!pd->closing)
92                 runtime_throw("runtime_pollClose: close w/o unblock");
93         if(pd->wg != nil && pd->wg != READY)
94                 runtime_throw("runtime_pollClose: blocked write on closing descriptor");
95         if(pd->rg != nil && pd->rg != READY)
96                 runtime_throw("runtime_pollClose: blocked read on closing descriptor");
97         runtime_netpollclose(pd->fd);
98         runtime_lock(&pollcache);
99         pd->link = pollcache.first;
100         pollcache.first = pd;
101         runtime_unlock(&pollcache);
104 func runtime_pollReset(pd *PollDesc, mode int) (err int) {
105         runtime_lock(pd);
106         err = checkerr(pd, mode);
107         if(err)
108                 goto ret;
109         if(mode == 'r')
110                 pd->rg = nil;
111         else if(mode == 'w')
112                 pd->wg = nil;
113 ret:
114         runtime_unlock(pd);
117 func runtime_pollWait(pd *PollDesc, mode int) (err int) {
118         runtime_lock(pd);
119         err = checkerr(pd, mode);
120         if(err)
121                 goto ret;
122         netpollblock(pd, mode);
123         err = checkerr(pd, mode);
124 ret:
125         runtime_unlock(pd);
128 func runtime_pollSetDeadline(pd *PollDesc, d int64, mode int) {
129         runtime_lock(pd);
130         if(pd->closing)
131                 goto ret;
132         pd->seq++;  // invalidate current timers
133         // Reset current timers.
134         if(pd->rt.fv) {
135                 runtime_deltimer(&pd->rt);
136                 pd->rt.fv = nil;
137         }
138         if(pd->wt.fv) {
139                 runtime_deltimer(&pd->wt);
140                 pd->wt.fv = nil;
141         }
142         // Setup new timers.
143         if(d != 0 && d <= runtime_nanotime()) {
144                 d = -1;
145         }
146         if(mode == 'r' || mode == 'r'+'w')
147                 pd->rd = d;
148         if(mode == 'w' || mode == 'r'+'w')
149                 pd->wd = d;
150         if(pd->rd > 0 && pd->rd == pd->wd) {
151                 pd->rt.fv = &deadlineFn;
152                 pd->rt.when = pd->rd;
153                 // Copy current seq into the timer arg.
154                 // Timer func will check the seq against current descriptor seq,
155                 // if they differ the descriptor was reused or timers were reset.
156                 pd->rt.arg.type = (Type*)pd->seq;
157                 pd->rt.arg.data = pd;
158                 runtime_addtimer(&pd->rt);
159         } else {
160                 if(pd->rd > 0) {
161                         pd->rt.fv = &readDeadlineFn;
162                         pd->rt.when = pd->rd;
163                         pd->rt.arg.type = (Type*)pd->seq;
164                         pd->rt.arg.data = pd;
165                         runtime_addtimer(&pd->rt);
166                 }
167                 if(pd->wd > 0) {
168                         pd->wt.fv = &writeDeadlineFn;
169                         pd->wt.when = pd->wd;
170                         pd->wt.arg.type = (Type*)pd->seq;
171                         pd->wt.arg.data = pd;
172                         runtime_addtimer(&pd->wt);
173                 }
174         }
175 ret:
176         runtime_unlock(pd);
179 func runtime_pollUnblock(pd *PollDesc) {
180         G *rg, *wg;
182         runtime_lock(pd);
183         if(pd->closing)
184                 runtime_throw("runtime_pollUnblock: already closing");
185         pd->closing = true;
186         pd->seq++;
187         rg = netpollunblock(pd, 'r');
188         wg = netpollunblock(pd, 'w');
189         if(pd->rt.fv) {
190                 runtime_deltimer(&pd->rt);
191                 pd->rt.fv = nil;
192         }
193         if(pd->wt.fv) {
194                 runtime_deltimer(&pd->wt);
195                 pd->wt.fv = nil;
196         }
197         runtime_unlock(pd);
198         if(rg)
199                 runtime_ready(rg);
200         if(wg)
201                 runtime_ready(wg);
204 // make pd ready, newly runnable goroutines (if any) are enqueued info gpp list
205 void
206 runtime_netpollready(G **gpp, PollDesc *pd, int32 mode)
208         G *rg, *wg;
210         rg = wg = nil;
211         runtime_lock(pd);
212         if(mode == 'r' || mode == 'r'+'w')
213                 rg = netpollunblock(pd, 'r');
214         if(mode == 'w' || mode == 'r'+'w')
215                 wg = netpollunblock(pd, 'w');
216         runtime_unlock(pd);
217         if(rg) {
218                 rg->schedlink = *gpp;
219                 *gpp = rg;
220         }
221         if(wg) {
222                 wg->schedlink = *gpp;
223                 *gpp = wg;
224         }
227 static intgo
228 checkerr(PollDesc *pd, int32 mode)
230         if(pd->closing)
231                 return 1;  // errClosing
232         if((mode == 'r' && pd->rd < 0) || (mode == 'w' && pd->wd < 0))
233                 return 2;  // errTimeout
234         return 0;
237 static void
238 netpollblock(PollDesc *pd, int32 mode)
240         G **gpp;
242         gpp = &pd->rg;
243         if(mode == 'w')
244                 gpp = &pd->wg;
245         if(*gpp == READY) {
246                 *gpp = nil;
247                 return;
248         }
249         if(*gpp != nil)
250                 runtime_throw("epoll: double wait");
251         *gpp = runtime_g();
252         runtime_park(runtime_unlock, &pd->Lock, "IO wait");
253         runtime_lock(pd);
256 static G*
257 netpollunblock(PollDesc *pd, int32 mode)
259         G **gpp, *old;
261         gpp = &pd->rg;
262         if(mode == 'w')
263                 gpp = &pd->wg;
264         if(*gpp == READY)
265                 return nil;
266         if(*gpp == nil) {
267                 *gpp = READY;
268                 return nil;
269         }
270         old = *gpp;
271         *gpp = nil;
272         return old;
275 static void
276 deadlineimpl(int64 now, Eface arg, bool read, bool write)
278         PollDesc *pd;
279         uint32 seq;
280         G *rg, *wg;
282         USED(now);
283         pd = (PollDesc*)arg.data;
284         // This is the seq when the timer was set.
285         // If it's stale, ignore the timer event.
286         seq = (uintptr)arg.type;
287         rg = wg = nil;
288         runtime_lock(pd);
289         if(seq != pd->seq) {
290                 // The descriptor was reused or timers were reset.
291                 runtime_unlock(pd);
292                 return;
293         }
294         if(read) {
295                 if(pd->rd <= 0 || pd->rt.fv == nil)
296                         runtime_throw("deadlineimpl: inconsistent read deadline");
297                 pd->rd = -1;
298                 pd->rt.fv = nil;
299                 rg = netpollunblock(pd, 'r');
300         }
301         if(write) {
302                 if(pd->wd <= 0 || (pd->wt.fv == nil && !read))
303                         runtime_throw("deadlineimpl: inconsistent write deadline");
304                 pd->wd = -1;
305                 pd->wt.fv = nil;
306                 wg = netpollunblock(pd, 'w');
307         }
308         runtime_unlock(pd);
309         if(rg)
310                 runtime_ready(rg);
311         if(wg)
312                 runtime_ready(wg);
315 static void
316 deadline(int64 now, Eface arg)
318         deadlineimpl(now, arg, true, true);
321 static void
322 readDeadline(int64 now, Eface arg)
324         deadlineimpl(now, arg, true, false);
327 static void
328 writeDeadline(int64 now, Eface arg)
330         deadlineimpl(now, arg, false, true);
333 static PollDesc*
334 allocPollDesc(void)
336         PollDesc *pd;
337         uint32 i, n;
339         runtime_lock(&pollcache);
340         if(pollcache.first == nil) {
341                 n = PageSize/sizeof(*pd);
342                 if(n == 0)
343                         n = 1;
344                 // Must be in non-GC memory because can be referenced
345                 // only from epoll/kqueue internals.
346                 pd = runtime_SysAlloc(n*sizeof(*pd));
347                 for(i = 0; i < n; i++) {
348                         pd[i].link = pollcache.first;
349                         pollcache.first = &pd[i];
350                 }
351         }
352         pd = pollcache.first;
353         pollcache.first = pd->link;
354         runtime_unlock(&pollcache);
355         return pd;