miniupnpd 1.9 (20160113)
[tomato.git] / release / src / router / miniupnpd / asyncsendto.c
blobbd21e9391933c080527650c225f0524f826122f9
1 /* $Id: asyncsendto.c,v 1.7 2015/09/03 18:19:20 nanard Exp $ */
2 /* MiniUPnP project
3 * http://miniupnp.free.fr/ or http://miniupnp.tuxfamily.org/
4 * (c) 2006-2014 Thomas Bernard
5 * This software is subject to the conditions detailed
6 * in the LICENCE file provided within the distribution */
8 #include <sys/types.h>
9 #include <sys/socket.h>
10 #include <sys/time.h>
11 #include <sys/queue.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <syslog.h>
16 #include <errno.h>
17 #include <sys/uio.h>
18 #include <netinet/in.h>
19 #include <inttypes.h>
21 #include "asyncsendto.h"
22 #include "upnputils.h"
24 /* state diagram for a packet :
26 * |
27 * V
28 * -> ESCHEDULED -> ESENDNOW -> sent
29 * ^ |
30 * | V
31 * EWAITREADY -> sent
33 struct scheduled_send {
34 LIST_ENTRY(scheduled_send) entries;
35 struct timeval ts;
36 enum {ESCHEDULED=1, EWAITREADY=2, ESENDNOW=3} state;
37 int sockfd;
38 const void * buf;
39 size_t len;
40 int flags;
41 const struct sockaddr *dest_addr;
42 socklen_t addrlen;
43 const struct sockaddr_in6 *src_addr;
44 char data[];
47 static LIST_HEAD(listhead, scheduled_send) send_list = { NULL };
50 * ssize_t sendto(int sockfd, const void *buf, size_t len, int flags,
51 * const struct sockaddr *dest_addr, socklen_t addrlen);
53 static ssize_t
54 send_from_to(int sockfd, const void *buf, size_t len, int flags,
55 const struct sockaddr_in6 *src_addr,
56 const struct sockaddr *dest_addr, socklen_t addrlen)
58 #ifdef IPV6_PKTINFO
59 if(src_addr) {
60 struct iovec iov;
61 struct in6_pktinfo ipi6;
62 uint8_t c[CMSG_SPACE(sizeof(ipi6))];
63 struct msghdr msg;
64 struct cmsghdr* cmsg;
66 iov.iov_base = (void *)buf;
67 iov.iov_len = len;
68 memset(&msg, 0, sizeof(msg));
69 msg.msg_iov = &iov;
70 msg.msg_iovlen = 1;
71 ipi6.ipi6_addr = src_addr->sin6_addr;
72 ipi6.ipi6_ifindex = src_addr->sin6_scope_id;
73 msg.msg_control = c;
74 msg.msg_controllen = sizeof(c);
75 cmsg = CMSG_FIRSTHDR(&msg);
76 cmsg->cmsg_level = IPPROTO_IPV6;
77 cmsg->cmsg_type = IPV6_PKTINFO;
78 cmsg->cmsg_len = CMSG_LEN(sizeof(ipi6));
79 memcpy(CMSG_DATA(cmsg), &ipi6, sizeof(ipi6));
80 msg.msg_name = (void *)dest_addr;
81 msg.msg_namelen = addrlen;
82 return sendmsg(sockfd, &msg, flags);
83 } else {
84 #endif /* IPV6_PKTINFO */
85 return sendto(sockfd, buf, len, flags, dest_addr, addrlen);
86 #ifdef IPV6_PKTINFO
88 #endif /* IPV6_PKTINFO */
91 /* delay = milli seconds */
92 ssize_t
93 sendto_schedule2(int sockfd, const void *buf, size_t len, int flags,
94 const struct sockaddr *dest_addr, socklen_t addrlen,
95 const struct sockaddr_in6 *src_addr,
96 unsigned int delay)
98 enum {ESCHEDULED, EWAITREADY, ESENDNOW} state;
99 ssize_t n;
100 size_t alloc_len;
101 struct timeval tv;
102 struct scheduled_send * elt;
104 if(delay == 0) {
105 /* first try to send at once */
106 n = send_from_to(sockfd, buf, len, flags, src_addr, dest_addr, addrlen);
107 if(n >= 0)
108 return n;
109 else if(errno == EAGAIN || errno == EWOULDBLOCK) {
110 /* use select() on this socket */
111 state = EWAITREADY;
112 } else if(errno == EINTR) {
113 state = ESENDNOW;
114 } else {
115 /* uncatched error */
116 return n;
118 } else {
119 state = ESCHEDULED;
122 /* schedule */
123 if(gettimeofday(&tv, 0) < 0) {
124 return -1;
126 /* allocate enough space for structure + buffers */
127 alloc_len = sizeof(struct scheduled_send) + len + addrlen;
128 if(src_addr)
129 alloc_len += sizeof(struct sockaddr_in6);
130 elt = malloc(alloc_len);
131 if(elt == NULL) {
132 syslog(LOG_ERR, "malloc failed to allocate %u bytes",
133 (unsigned)alloc_len);
134 return -1;
136 elt->state = state;
137 /* time the packet should be sent */
138 elt->ts.tv_sec = tv.tv_sec + (delay / 1000);
139 elt->ts.tv_usec = tv.tv_usec + (delay % 1000) * 1000;
140 if(elt->ts.tv_usec > 1000000) {
141 elt->ts.tv_sec++;
142 elt->ts.tv_usec -= 1000000;
144 elt->sockfd = sockfd;
145 elt->flags = flags;
146 memcpy(elt->data, dest_addr, addrlen);
147 elt->dest_addr = (struct sockaddr *)elt->data;
148 elt->addrlen = addrlen;
149 if(src_addr) {
150 elt->src_addr = (struct sockaddr_in6 *)(elt->data + addrlen);
151 memcpy((void *)elt->src_addr, src_addr, sizeof(struct sockaddr_in6));
152 elt->buf = (void *)(elt->data + addrlen + sizeof(struct sockaddr_in6));
153 } else {
154 elt->src_addr = NULL;
155 elt->buf = (void *)(elt->data + addrlen);
157 elt->len = len;
158 memcpy((void *)elt->buf, buf, len);
159 /* insert */
160 LIST_INSERT_HEAD( &send_list, elt, entries);
161 return 0;
164 /* try to send at once, and queue the packet if needed */
165 ssize_t
166 sendto_or_schedule(int sockfd, const void *buf, size_t len, int flags,
167 const struct sockaddr *dest_addr, socklen_t addrlen)
169 return sendto_schedule2(sockfd, buf, len, flags, dest_addr, addrlen, NULL, 0);
172 ssize_t
173 sendto_or_schedule2(int sockfd, const void *buf, size_t len, int flags,
174 const struct sockaddr *dest_addr, socklen_t addrlen,
175 const struct sockaddr_in6 *src_addr)
177 return sendto_schedule2(sockfd, buf, len, flags, dest_addr, addrlen, src_addr, 0);
180 /* get_next_scheduled_send() return number of scheduled send in list */
181 int get_next_scheduled_send(struct timeval * next_send)
183 int n = 0;
184 struct scheduled_send * elt;
185 if(next_send == NULL)
186 return -1;
187 for(elt = send_list.lh_first; elt != NULL; elt = elt->entries.le_next) {
188 if(n == 0 || (elt->ts.tv_sec < next_send->tv_sec) ||
189 (elt->ts.tv_sec == next_send->tv_sec && elt->ts.tv_usec < next_send->tv_usec)) {
190 next_send->tv_sec = elt->ts.tv_sec;
191 next_send->tv_usec = elt->ts.tv_usec;
193 n++;
195 return n;
198 /* update writefds for select() call
199 * return the number of packets to try to send at once */
200 int get_sendto_fds(fd_set * writefds, int * max_fd, const struct timeval * now)
202 int n = 0;
203 struct scheduled_send * elt;
204 for(elt = send_list.lh_first; elt != NULL; elt = elt->entries.le_next) {
205 if(elt->state == EWAITREADY) {
206 /* last sendto() call returned EAGAIN/EWOULDBLOCK */
207 FD_SET(elt->sockfd, writefds);
208 if(elt->sockfd > *max_fd)
209 *max_fd = elt->sockfd;
210 n++;
211 } else if((elt->ts.tv_sec < now->tv_sec) ||
212 (elt->ts.tv_sec == now->tv_sec && elt->ts.tv_usec <= now->tv_usec)) {
213 /* we waited long enough, now send ! */
214 elt->state = ESENDNOW;
215 n++;
218 return n;
221 /* executed sendto() when needed */
222 int try_sendto(fd_set * writefds)
224 int ret = 0;
225 ssize_t n;
226 struct scheduled_send * elt;
227 struct scheduled_send * next;
228 for(elt = send_list.lh_first; elt != NULL; elt = next) {
229 next = elt->entries.le_next;
230 if((elt->state == ESENDNOW) ||
231 (elt->state == EWAITREADY && FD_ISSET(elt->sockfd, writefds))) {
232 #ifdef DEBUG
233 syslog(LOG_DEBUG, "%s: %d bytes on socket %d",
234 "try_sendto", (int)elt->len, elt->sockfd);
235 #endif
236 n = send_from_to(elt->sockfd, elt->buf, elt->len, elt->flags,
237 elt->src_addr, elt->dest_addr, elt->addrlen);
238 /*n = sendto(elt->sockfd, elt->buf, elt->len, elt->flags,
239 elt->dest_addr, elt->addrlen);*/
240 if(n < 0) {
241 if(errno == EINTR) {
242 /* retry at once */
243 elt->state = ESENDNOW;
244 continue;
245 } else if(errno == EAGAIN || errno == EWOULDBLOCK) {
246 /* retry once the socket is ready for writing */
247 elt->state = EWAITREADY;
248 continue;
249 } else {
250 char addr_str[64];
251 /* uncatched error */
252 if(sockaddr_to_string(elt->dest_addr, addr_str, sizeof(addr_str)) <= 0)
253 addr_str[0] = '\0';
254 syslog(LOG_ERR, "%s(sock=%d, len=%u, dest=%s): sendto: %m",
255 "try_sendto", elt->sockfd, (unsigned)elt->len,
256 addr_str);
257 ret--;
259 } else if((int)n != (int)elt->len) {
260 syslog(LOG_WARNING, "%s: %d bytes sent out of %d",
261 "try_sendto", (int)n, (int)elt->len);
263 /* remove from the list */
264 LIST_REMOVE(elt, entries);
265 free(elt);
268 return ret;
271 /* maximum execution time for finalize_sendto() in milliseconds */
272 #define FINALIZE_SENDTO_DELAY (500)
274 /* empty the list */
275 void finalize_sendto(void)
277 ssize_t n;
278 struct scheduled_send * elt;
279 struct scheduled_send * next;
280 fd_set writefds;
281 struct timeval deadline;
282 struct timeval now;
283 struct timeval timeout;
284 int max_fd;
286 if(gettimeofday(&deadline, NULL) < 0) {
287 syslog(LOG_ERR, "gettimeofday: %m");
288 return;
290 deadline.tv_usec += FINALIZE_SENDTO_DELAY*1000;
291 if(deadline.tv_usec > 1000000) {
292 deadline.tv_sec++;
293 deadline.tv_usec -= 1000000;
295 while(send_list.lh_first) {
296 FD_ZERO(&writefds);
297 max_fd = -1;
298 for(elt = send_list.lh_first; elt != NULL; elt = next) {
299 next = elt->entries.le_next;
300 syslog(LOG_DEBUG, "finalize_sendto(): %d bytes on socket %d",
301 (int)elt->len, elt->sockfd);
302 n = send_from_to(elt->sockfd, elt->buf, elt->len, elt->flags,
303 elt->src_addr, elt->dest_addr, elt->addrlen);
304 /*n = sendto(elt->sockfd, elt->buf, elt->len, elt->flags,
305 elt->dest_addr, elt->addrlen);*/
306 if(n < 0) {
307 if(errno==EAGAIN || errno==EWOULDBLOCK) {
308 FD_SET(elt->sockfd, &writefds);
309 if(elt->sockfd > max_fd)
310 max_fd = elt->sockfd;
311 continue;
313 syslog(LOG_WARNING, "finalize_sendto(): socket=%d sendto: %m", elt->sockfd);
315 /* remove from the list */
316 LIST_REMOVE(elt, entries);
317 free(elt);
319 /* check deadline */
320 if(gettimeofday(&now, NULL) < 0) {
321 syslog(LOG_ERR, "gettimeofday: %m");
322 return;
324 if(now.tv_sec > deadline.tv_sec ||
325 (now.tv_sec == deadline.tv_sec && now.tv_usec > deadline.tv_usec)) {
326 /* deadline ! */
327 while((elt = send_list.lh_first) != NULL) {
328 LIST_REMOVE(elt, entries);
329 free(elt);
331 return;
333 /* compute timeout value */
334 timeout.tv_sec = deadline.tv_sec - now.tv_sec;
335 timeout.tv_usec = deadline.tv_usec - now.tv_usec;
336 if(timeout.tv_usec < 0) {
337 timeout.tv_sec--;
338 timeout.tv_usec += 1000000;
340 if(max_fd >= 0) {
341 if(select(max_fd + 1, NULL, &writefds, NULL, &timeout) < 0) {
342 syslog(LOG_ERR, "select: %m");
343 return;