Merge remote-tracking branch 'remotes/pmaydell/tags/pull-target-arm-20190129' into...
[qemu/ar7.git] / slirp / socket.c
blob5ffbaa064a5dae2cc8fdf0c95c4af1df69dca42c
1 /*
2 * Copyright (c) 1995 Danny Gasparovski.
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
6 */
8 #include "qemu/osdep.h"
9 #include "qemu-common.h"
10 #include "slirp.h"
11 #include "ip_icmp.h"
12 #ifdef __sun__
13 #include <sys/filio.h>
14 #endif
16 static void sofcantrcvmore(struct socket *so);
17 static void sofcantsendmore(struct socket *so);
19 struct socket *solookup(struct socket **last, struct socket *head,
20 struct sockaddr_storage *lhost, struct sockaddr_storage *fhost)
22 struct socket *so = *last;
24 /* Optimisation */
25 if (so != head && sockaddr_equal(&(so->lhost.ss), lhost)
26 && (!fhost || sockaddr_equal(&so->fhost.ss, fhost))) {
27 return so;
30 for (so = head->so_next; so != head; so = so->so_next) {
31 if (sockaddr_equal(&(so->lhost.ss), lhost)
32 && (!fhost || sockaddr_equal(&so->fhost.ss, fhost))) {
33 *last = so;
34 return so;
38 return (struct socket *)NULL;
42 * Create a new socket, initialise the fields
43 * It is the responsibility of the caller to
44 * insque() it into the correct linked-list
46 struct socket *
47 socreate(Slirp *slirp)
49 struct socket *so = g_new(struct socket, 1);
51 memset(so, 0, sizeof(struct socket));
52 so->so_state = SS_NOFDREF;
53 so->s = -1;
54 so->slirp = slirp;
55 so->pollfds_idx = -1;
57 return so;
61 * Remove references to so from the given message queue.
63 static void
64 soqfree(struct socket *so, struct quehead *qh)
66 struct mbuf *ifq;
68 for (ifq = (struct mbuf *) qh->qh_link;
69 (struct quehead *) ifq != qh;
70 ifq = ifq->ifq_next) {
71 if (ifq->ifq_so == so) {
72 struct mbuf *ifm;
73 ifq->ifq_so = NULL;
74 for (ifm = ifq->ifs_next; ifm != ifq; ifm = ifm->ifs_next) {
75 ifm->ifq_so = NULL;
82 * remque and free a socket, clobber cache
84 void
85 sofree(struct socket *so)
87 Slirp *slirp = so->slirp;
89 soqfree(so, &slirp->if_fastq);
90 soqfree(so, &slirp->if_batchq);
92 if (so == slirp->tcp_last_so) {
93 slirp->tcp_last_so = &slirp->tcb;
94 } else if (so == slirp->udp_last_so) {
95 slirp->udp_last_so = &slirp->udb;
96 } else if (so == slirp->icmp_last_so) {
97 slirp->icmp_last_so = &slirp->icmp;
99 m_free(so->so_m);
101 if(so->so_next && so->so_prev)
102 remque(so); /* crashes if so is not in a queue */
104 if (so->so_tcpcb) {
105 free(so->so_tcpcb);
107 g_free(so);
110 size_t sopreprbuf(struct socket *so, struct iovec *iov, int *np)
112 int n, lss, total;
113 struct sbuf *sb = &so->so_snd;
114 int len = sb->sb_datalen - sb->sb_cc;
115 int mss = so->so_tcpcb->t_maxseg;
117 DEBUG_CALL("sopreprbuf");
118 DEBUG_ARG("so = %p", so);
120 if (len <= 0)
121 return 0;
123 iov[0].iov_base = sb->sb_wptr;
124 iov[1].iov_base = NULL;
125 iov[1].iov_len = 0;
126 if (sb->sb_wptr < sb->sb_rptr) {
127 iov[0].iov_len = sb->sb_rptr - sb->sb_wptr;
128 /* Should never succeed, but... */
129 if (iov[0].iov_len > len)
130 iov[0].iov_len = len;
131 if (iov[0].iov_len > mss)
132 iov[0].iov_len -= iov[0].iov_len%mss;
133 n = 1;
134 } else {
135 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_wptr;
136 /* Should never succeed, but... */
137 if (iov[0].iov_len > len) iov[0].iov_len = len;
138 len -= iov[0].iov_len;
139 if (len) {
140 iov[1].iov_base = sb->sb_data;
141 iov[1].iov_len = sb->sb_rptr - sb->sb_data;
142 if(iov[1].iov_len > len)
143 iov[1].iov_len = len;
144 total = iov[0].iov_len + iov[1].iov_len;
145 if (total > mss) {
146 lss = total%mss;
147 if (iov[1].iov_len > lss) {
148 iov[1].iov_len -= lss;
149 n = 2;
150 } else {
151 lss -= iov[1].iov_len;
152 iov[0].iov_len -= lss;
153 n = 1;
155 } else
156 n = 2;
157 } else {
158 if (iov[0].iov_len > mss)
159 iov[0].iov_len -= iov[0].iov_len%mss;
160 n = 1;
163 if (np)
164 *np = n;
166 return iov[0].iov_len + (n - 1) * iov[1].iov_len;
170 * Read from so's socket into sb_snd, updating all relevant sbuf fields
171 * NOTE: This will only be called if it is select()ed for reading, so
172 * a read() of 0 (or less) means it's disconnected
175 soread(struct socket *so)
177 int n, nn;
178 struct sbuf *sb = &so->so_snd;
179 struct iovec iov[2];
181 DEBUG_CALL("soread");
182 DEBUG_ARG("so = %p", so);
185 * No need to check if there's enough room to read.
186 * soread wouldn't have been called if there weren't
188 sopreprbuf(so, iov, &n);
190 nn = qemu_recv(so->s, iov[0].iov_base, iov[0].iov_len,0);
191 if (nn <= 0) {
192 if (nn < 0 && (errno == EINTR || errno == EAGAIN))
193 return 0;
194 else {
195 int err;
196 socklen_t elen = sizeof err;
197 struct sockaddr_storage addr;
198 struct sockaddr *paddr = (struct sockaddr *) &addr;
199 socklen_t alen = sizeof addr;
201 err = errno;
202 if (nn == 0) {
203 if (getpeername(so->s, paddr, &alen) < 0) {
204 err = errno;
205 } else {
206 getsockopt(so->s, SOL_SOCKET, SO_ERROR,
207 &err, &elen);
211 DEBUG_MISC(" --- soread() disconnected, nn = %d, errno = %d-%s",
212 nn, errno,strerror(errno));
213 sofcantrcvmore(so);
215 if (err == ECONNRESET || err == ECONNREFUSED
216 || err == ENOTCONN || err == EPIPE) {
217 tcp_drop(sototcpcb(so), err);
218 } else {
219 tcp_sockclosed(sototcpcb(so));
221 return -1;
226 * If there was no error, try and read the second time round
227 * We read again if n = 2 (ie, there's another part of the buffer)
228 * and we read as much as we could in the first read
229 * We don't test for <= 0 this time, because there legitimately
230 * might not be any more data (since the socket is non-blocking),
231 * a close will be detected on next iteration.
232 * A return of -1 won't (shouldn't) happen, since it didn't happen above
234 if (n == 2 && nn == iov[0].iov_len) {
235 int ret;
236 ret = qemu_recv(so->s, iov[1].iov_base, iov[1].iov_len,0);
237 if (ret > 0)
238 nn += ret;
241 DEBUG_MISC(" ... read nn = %d bytes", nn);
243 /* Update fields */
244 sb->sb_cc += nn;
245 sb->sb_wptr += nn;
246 if (sb->sb_wptr >= (sb->sb_data + sb->sb_datalen))
247 sb->sb_wptr -= sb->sb_datalen;
248 return nn;
251 int soreadbuf(struct socket *so, const char *buf, int size)
253 int n, nn, copy = size;
254 struct sbuf *sb = &so->so_snd;
255 struct iovec iov[2];
257 DEBUG_CALL("soreadbuf");
258 DEBUG_ARG("so = %p", so);
261 * No need to check if there's enough room to read.
262 * soread wouldn't have been called if there weren't
264 if (sopreprbuf(so, iov, &n) < size)
265 goto err;
267 nn = MIN(iov[0].iov_len, copy);
268 memcpy(iov[0].iov_base, buf, nn);
270 copy -= nn;
271 buf += nn;
273 if (copy == 0)
274 goto done;
276 memcpy(iov[1].iov_base, buf, copy);
278 done:
279 /* Update fields */
280 sb->sb_cc += size;
281 sb->sb_wptr += size;
282 if (sb->sb_wptr >= (sb->sb_data + sb->sb_datalen))
283 sb->sb_wptr -= sb->sb_datalen;
284 return size;
285 err:
287 sofcantrcvmore(so);
288 tcp_sockclosed(sototcpcb(so));
289 g_critical("soreadbuf buffer too small");
290 return -1;
294 * Get urgent data
296 * When the socket is created, we set it SO_OOBINLINE,
297 * so when OOB data arrives, we soread() it and everything
298 * in the send buffer is sent as urgent data
301 sorecvoob(struct socket *so)
303 struct tcpcb *tp = sototcpcb(so);
304 int ret;
306 DEBUG_CALL("sorecvoob");
307 DEBUG_ARG("so = %p", so);
310 * We take a guess at how much urgent data has arrived.
311 * In most situations, when urgent data arrives, the next
312 * read() should get all the urgent data. This guess will
313 * be wrong however if more data arrives just after the
314 * urgent data, or the read() doesn't return all the
315 * urgent data.
317 ret = soread(so);
318 if (ret > 0) {
319 tp->snd_up = tp->snd_una + so->so_snd.sb_cc;
320 tp->t_force = 1;
321 tcp_output(tp);
322 tp->t_force = 0;
325 return ret;
329 * Send urgent data
330 * There's a lot duplicated code here, but...
333 sosendoob(struct socket *so)
335 struct sbuf *sb = &so->so_rcv;
336 char buff[2048]; /* XXX Shouldn't be sending more oob data than this */
338 int n;
340 DEBUG_CALL("sosendoob");
341 DEBUG_ARG("so = %p", so);
342 DEBUG_ARG("sb->sb_cc = %d", sb->sb_cc);
344 if (so->so_urgc > 2048)
345 so->so_urgc = 2048; /* XXXX */
347 if (sb->sb_rptr < sb->sb_wptr) {
348 /* We can send it directly */
349 n = slirp_send(so, sb->sb_rptr, so->so_urgc, (MSG_OOB)); /* |MSG_DONTWAIT)); */
350 } else {
352 * Since there's no sendv or sendtov like writev,
353 * we must copy all data to a linear buffer then
354 * send it all
356 uint32_t urgc = so->so_urgc;
357 int len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
358 if (len > urgc) {
359 len = urgc;
361 memcpy(buff, sb->sb_rptr, len);
362 urgc -= len;
363 if (urgc) {
364 n = sb->sb_wptr - sb->sb_data;
365 if (n > urgc) {
366 n = urgc;
368 memcpy((buff + len), sb->sb_data, n);
369 len += n;
371 n = slirp_send(so, buff, len, (MSG_OOB)); /* |MSG_DONTWAIT)); */
372 #ifdef DEBUG
373 if (n != len) {
374 DEBUG_ERROR("Didn't send all data urgently XXXXX");
376 #endif
379 if (n < 0) {
380 return n;
382 so->so_urgc -= n;
383 DEBUG_MISC(" ---2 sent %d bytes urgent data, %d urgent bytes left", n, so->so_urgc);
385 sb->sb_cc -= n;
386 sb->sb_rptr += n;
387 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
388 sb->sb_rptr -= sb->sb_datalen;
390 return n;
394 * Write data from so_rcv to so's socket,
395 * updating all sbuf field as necessary
398 sowrite(struct socket *so)
400 int n,nn;
401 struct sbuf *sb = &so->so_rcv;
402 int len = sb->sb_cc;
403 struct iovec iov[2];
405 DEBUG_CALL("sowrite");
406 DEBUG_ARG("so = %p", so);
408 if (so->so_urgc) {
409 uint32_t expected = so->so_urgc;
410 if (sosendoob(so) < expected) {
411 /* Treat a short write as a fatal error too,
412 * rather than continuing on and sending the urgent
413 * data as if it were non-urgent and leaving the
414 * so_urgc count wrong.
416 goto err_disconnected;
418 if (sb->sb_cc == 0)
419 return 0;
423 * No need to check if there's something to write,
424 * sowrite wouldn't have been called otherwise
427 iov[0].iov_base = sb->sb_rptr;
428 iov[1].iov_base = NULL;
429 iov[1].iov_len = 0;
430 if (sb->sb_rptr < sb->sb_wptr) {
431 iov[0].iov_len = sb->sb_wptr - sb->sb_rptr;
432 /* Should never succeed, but... */
433 if (iov[0].iov_len > len) iov[0].iov_len = len;
434 n = 1;
435 } else {
436 iov[0].iov_len = (sb->sb_data + sb->sb_datalen) - sb->sb_rptr;
437 if (iov[0].iov_len > len) iov[0].iov_len = len;
438 len -= iov[0].iov_len;
439 if (len) {
440 iov[1].iov_base = sb->sb_data;
441 iov[1].iov_len = sb->sb_wptr - sb->sb_data;
442 if (iov[1].iov_len > len) iov[1].iov_len = len;
443 n = 2;
444 } else
445 n = 1;
447 /* Check if there's urgent data to send, and if so, send it */
449 nn = slirp_send(so, iov[0].iov_base, iov[0].iov_len,0);
450 /* This should never happen, but people tell me it does *shrug* */
451 if (nn < 0 && (errno == EAGAIN || errno == EINTR))
452 return 0;
454 if (nn <= 0) {
455 goto err_disconnected;
458 if (n == 2 && nn == iov[0].iov_len) {
459 int ret;
460 ret = slirp_send(so, iov[1].iov_base, iov[1].iov_len,0);
461 if (ret > 0)
462 nn += ret;
464 DEBUG_MISC(" ... wrote nn = %d bytes", nn);
466 /* Update sbuf */
467 sb->sb_cc -= nn;
468 sb->sb_rptr += nn;
469 if (sb->sb_rptr >= (sb->sb_data + sb->sb_datalen))
470 sb->sb_rptr -= sb->sb_datalen;
473 * If in DRAIN mode, and there's no more data, set
474 * it CANTSENDMORE
476 if ((so->so_state & SS_FWDRAIN) && sb->sb_cc == 0)
477 sofcantsendmore(so);
479 return nn;
481 err_disconnected:
482 DEBUG_MISC(" --- sowrite disconnected, so->so_state = %x, errno = %d",
483 so->so_state, errno);
484 sofcantsendmore(so);
485 tcp_sockclosed(sototcpcb(so));
486 return -1;
490 * recvfrom() a UDP socket
492 void
493 sorecvfrom(struct socket *so)
495 struct sockaddr_storage addr;
496 struct sockaddr_storage saddr, daddr;
497 socklen_t addrlen = sizeof(struct sockaddr_storage);
499 DEBUG_CALL("sorecvfrom");
500 DEBUG_ARG("so = %p", so);
502 if (so->so_type == IPPROTO_ICMP) { /* This is a "ping" reply */
503 char buff[256];
504 int len;
506 len = recvfrom(so->s, buff, 256, 0,
507 (struct sockaddr *)&addr, &addrlen);
508 /* XXX Check if reply is "correct"? */
510 if(len == -1 || len == 0) {
511 u_char code=ICMP_UNREACH_PORT;
513 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
514 else if(errno == ENETUNREACH) code=ICMP_UNREACH_NET;
516 DEBUG_MISC(" udp icmp rx errno = %d-%s",
517 errno,strerror(errno));
518 icmp_send_error(so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
519 } else {
520 icmp_reflect(so->so_m);
521 so->so_m = NULL; /* Don't m_free() it again! */
523 /* No need for this socket anymore, udp_detach it */
524 udp_detach(so);
525 } else { /* A "normal" UDP packet */
526 struct mbuf *m;
527 int len;
528 #ifdef _WIN32
529 unsigned long n;
530 #else
531 int n;
532 #endif
534 m = m_get(so->slirp);
535 if (!m) {
536 return;
538 switch (so->so_ffamily) {
539 case AF_INET:
540 m->m_data += IF_MAXLINKHDR + sizeof(struct udpiphdr);
541 break;
542 case AF_INET6:
543 m->m_data += IF_MAXLINKHDR + sizeof(struct ip6)
544 + sizeof(struct udphdr);
545 break;
546 default:
547 g_assert_not_reached();
548 break;
552 * XXX Shouldn't FIONREAD packets destined for port 53,
553 * but I don't know the max packet size for DNS lookups
555 len = M_FREEROOM(m);
556 /* if (so->so_fport != htons(53)) { */
557 ioctlsocket(so->s, FIONREAD, &n);
559 if (n > len) {
560 n = (m->m_data - m->m_dat) + m->m_len + n + 1;
561 m_inc(m, n);
562 len = M_FREEROOM(m);
564 /* } */
566 m->m_len = recvfrom(so->s, m->m_data, len, 0,
567 (struct sockaddr *)&addr, &addrlen);
568 DEBUG_MISC(" did recvfrom %d, errno = %d-%s",
569 m->m_len, errno,strerror(errno));
570 if(m->m_len<0) {
571 /* Report error as ICMP */
572 switch (so->so_lfamily) {
573 uint8_t code;
574 case AF_INET:
575 code = ICMP_UNREACH_PORT;
577 if (errno == EHOSTUNREACH) {
578 code = ICMP_UNREACH_HOST;
579 } else if (errno == ENETUNREACH) {
580 code = ICMP_UNREACH_NET;
583 DEBUG_MISC(" rx error, tx icmp ICMP_UNREACH:%i", code);
584 icmp_send_error(so->so_m, ICMP_UNREACH, code, 0, strerror(errno));
585 break;
586 case AF_INET6:
587 code = ICMP6_UNREACH_PORT;
589 if (errno == EHOSTUNREACH) {
590 code = ICMP6_UNREACH_ADDRESS;
591 } else if (errno == ENETUNREACH) {
592 code = ICMP6_UNREACH_NO_ROUTE;
595 DEBUG_MISC(" rx error, tx icmp6 ICMP_UNREACH:%i", code);
596 icmp6_send_error(so->so_m, ICMP6_UNREACH, code);
597 break;
598 default:
599 g_assert_not_reached();
600 break;
602 m_free(m);
603 } else {
605 * Hack: domain name lookup will be used the most for UDP,
606 * and since they'll only be used once there's no need
607 * for the 4 minute (or whatever) timeout... So we time them
608 * out much quicker (10 seconds for now...)
610 if (so->so_expire) {
611 if (so->so_fport == htons(53))
612 so->so_expire = curtime + SO_EXPIREFAST;
613 else
614 so->so_expire = curtime + SO_EXPIRE;
618 * If this packet was destined for CTL_ADDR,
619 * make it look like that's where it came from
621 saddr = addr;
622 sotranslate_in(so, &saddr);
623 daddr = so->lhost.ss;
625 switch (so->so_ffamily) {
626 case AF_INET:
627 udp_output(so, m, (struct sockaddr_in *) &saddr,
628 (struct sockaddr_in *) &daddr,
629 so->so_iptos);
630 break;
631 case AF_INET6:
632 udp6_output(so, m, (struct sockaddr_in6 *) &saddr,
633 (struct sockaddr_in6 *) &daddr);
634 break;
635 default:
636 g_assert_not_reached();
637 break;
639 } /* rx error */
640 } /* if ping packet */
644 * sendto() a socket
647 sosendto(struct socket *so, struct mbuf *m)
649 int ret;
650 struct sockaddr_storage addr;
652 DEBUG_CALL("sosendto");
653 DEBUG_ARG("so = %p", so);
654 DEBUG_ARG("m = %p", m);
656 addr = so->fhost.ss;
657 DEBUG_CALL(" sendto()ing)");
658 sotranslate_out(so, &addr);
660 /* Don't care what port we get */
661 ret = sendto(so->s, m->m_data, m->m_len, 0,
662 (struct sockaddr *)&addr, sockaddr_size(&addr));
663 if (ret < 0)
664 return -1;
667 * Kill the socket if there's no reply in 4 minutes,
668 * but only if it's an expirable socket
670 if (so->so_expire)
671 so->so_expire = curtime + SO_EXPIRE;
672 so->so_state &= SS_PERSISTENT_MASK;
673 so->so_state |= SS_ISFCONNECTED; /* So that it gets select()ed */
674 return 0;
678 * Listen for incoming TCP connections
680 struct socket *
681 tcp_listen(Slirp *slirp, uint32_t haddr, u_int hport, uint32_t laddr,
682 u_int lport, int flags)
684 struct sockaddr_in addr;
685 struct socket *so;
686 int s, opt = 1;
687 socklen_t addrlen = sizeof(addr);
688 memset(&addr, 0, addrlen);
690 DEBUG_CALL("tcp_listen");
691 DEBUG_ARG("haddr = %s", inet_ntoa((struct in_addr){.s_addr = haddr}));
692 DEBUG_ARG("hport = %d", ntohs(hport));
693 DEBUG_ARG("laddr = %s", inet_ntoa((struct in_addr){.s_addr = laddr}));
694 DEBUG_ARG("lport = %d", ntohs(lport));
695 DEBUG_ARG("flags = %x", flags);
697 so = socreate(slirp);
699 /* Don't tcp_attach... we don't need so_snd nor so_rcv */
700 if ((so->so_tcpcb = tcp_newtcpcb(so)) == NULL) {
701 g_free(so);
702 return NULL;
704 insque(so, &slirp->tcb);
707 * SS_FACCEPTONCE sockets must time out.
709 if (flags & SS_FACCEPTONCE)
710 so->so_tcpcb->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT*2;
712 so->so_state &= SS_PERSISTENT_MASK;
713 so->so_state |= (SS_FACCEPTCONN | flags);
714 so->so_lfamily = AF_INET;
715 so->so_lport = lport; /* Kept in network format */
716 so->so_laddr.s_addr = laddr; /* Ditto */
718 addr.sin_family = AF_INET;
719 addr.sin_addr.s_addr = haddr;
720 addr.sin_port = hport;
722 if (((s = qemu_socket(AF_INET,SOCK_STREAM,0)) < 0) ||
723 (socket_set_fast_reuse(s) < 0) ||
724 (bind(s,(struct sockaddr *)&addr, sizeof(addr)) < 0) ||
725 (listen(s,1) < 0)) {
726 int tmperrno = errno; /* Don't clobber the real reason we failed */
728 if (s >= 0) {
729 closesocket(s);
731 sofree(so);
732 /* Restore the real errno */
733 #ifdef _WIN32
734 WSASetLastError(tmperrno);
735 #else
736 errno = tmperrno;
737 #endif
738 return NULL;
740 qemu_setsockopt(s, SOL_SOCKET, SO_OOBINLINE, &opt, sizeof(int));
741 opt = 1;
742 qemu_setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(int));
744 getsockname(s,(struct sockaddr *)&addr,&addrlen);
745 so->so_ffamily = AF_INET;
746 so->so_fport = addr.sin_port;
747 if (addr.sin_addr.s_addr == 0 || addr.sin_addr.s_addr == loopback_addr.s_addr)
748 so->so_faddr = slirp->vhost_addr;
749 else
750 so->so_faddr = addr.sin_addr;
752 so->s = s;
753 return so;
757 * Various session state calls
758 * XXX Should be #define's
759 * The socket state stuff needs work, these often get call 2 or 3
760 * times each when only 1 was needed
762 void
763 soisfconnecting(struct socket *so)
765 so->so_state &= ~(SS_NOFDREF|SS_ISFCONNECTED|SS_FCANTRCVMORE|
766 SS_FCANTSENDMORE|SS_FWDRAIN);
767 so->so_state |= SS_ISFCONNECTING; /* Clobber other states */
770 void
771 soisfconnected(struct socket *so)
773 so->so_state &= ~(SS_ISFCONNECTING|SS_FWDRAIN|SS_NOFDREF);
774 so->so_state |= SS_ISFCONNECTED; /* Clobber other states */
777 static void
778 sofcantrcvmore(struct socket *so)
780 if ((so->so_state & SS_NOFDREF) == 0) {
781 shutdown(so->s,0);
783 so->so_state &= ~(SS_ISFCONNECTING);
784 if (so->so_state & SS_FCANTSENDMORE) {
785 so->so_state &= SS_PERSISTENT_MASK;
786 so->so_state |= SS_NOFDREF; /* Don't select it */
787 } else {
788 so->so_state |= SS_FCANTRCVMORE;
792 static void
793 sofcantsendmore(struct socket *so)
795 if ((so->so_state & SS_NOFDREF) == 0) {
796 shutdown(so->s,1); /* send FIN to fhost */
798 so->so_state &= ~(SS_ISFCONNECTING);
799 if (so->so_state & SS_FCANTRCVMORE) {
800 so->so_state &= SS_PERSISTENT_MASK;
801 so->so_state |= SS_NOFDREF; /* as above */
802 } else {
803 so->so_state |= SS_FCANTSENDMORE;
808 * Set write drain mode
809 * Set CANTSENDMORE once all data has been write()n
811 void
812 sofwdrain(struct socket *so)
814 if (so->so_rcv.sb_cc)
815 so->so_state |= SS_FWDRAIN;
816 else
817 sofcantsendmore(so);
821 * Translate addr in host addr when it is a virtual address
823 void sotranslate_out(struct socket *so, struct sockaddr_storage *addr)
825 Slirp *slirp = so->slirp;
826 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
827 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
829 switch (addr->ss_family) {
830 case AF_INET:
831 if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) ==
832 slirp->vnetwork_addr.s_addr) {
833 /* It's an alias */
834 if (so->so_faddr.s_addr == slirp->vnameserver_addr.s_addr) {
835 if (get_dns_addr(&sin->sin_addr) < 0) {
836 sin->sin_addr = loopback_addr;
838 } else {
839 sin->sin_addr = loopback_addr;
843 DEBUG_MISC(" addr.sin_port=%d, addr.sin_addr.s_addr=%.16s",
844 ntohs(sin->sin_port), inet_ntoa(sin->sin_addr));
845 break;
847 case AF_INET6:
848 if (in6_equal_net(&so->so_faddr6, &slirp->vprefix_addr6,
849 slirp->vprefix_len)) {
850 if (in6_equal(&so->so_faddr6, &slirp->vnameserver_addr6)) {
851 uint32_t scope_id;
852 if (get_dns6_addr(&sin6->sin6_addr, &scope_id) >= 0) {
853 sin6->sin6_scope_id = scope_id;
854 } else {
855 sin6->sin6_addr = in6addr_loopback;
857 } else {
858 sin6->sin6_addr = in6addr_loopback;
861 break;
863 default:
864 break;
868 void sotranslate_in(struct socket *so, struct sockaddr_storage *addr)
870 Slirp *slirp = so->slirp;
871 struct sockaddr_in *sin = (struct sockaddr_in *)addr;
872 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)addr;
874 switch (addr->ss_family) {
875 case AF_INET:
876 if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) ==
877 slirp->vnetwork_addr.s_addr) {
878 uint32_t inv_mask = ~slirp->vnetwork_mask.s_addr;
880 if ((so->so_faddr.s_addr & inv_mask) == inv_mask) {
881 sin->sin_addr = slirp->vhost_addr;
882 } else if (sin->sin_addr.s_addr == loopback_addr.s_addr ||
883 so->so_faddr.s_addr != slirp->vhost_addr.s_addr) {
884 sin->sin_addr = so->so_faddr;
887 break;
889 case AF_INET6:
890 if (in6_equal_net(&so->so_faddr6, &slirp->vprefix_addr6,
891 slirp->vprefix_len)) {
892 if (in6_equal(&sin6->sin6_addr, &in6addr_loopback)
893 || !in6_equal(&so->so_faddr6, &slirp->vhost_addr6)) {
894 sin6->sin6_addr = so->so_faddr6;
897 break;
899 default:
900 break;
905 * Translate connections from localhost to the real hostname
907 void sotranslate_accept(struct socket *so)
909 Slirp *slirp = so->slirp;
911 switch (so->so_ffamily) {
912 case AF_INET:
913 if (so->so_faddr.s_addr == INADDR_ANY ||
914 (so->so_faddr.s_addr & loopback_mask) ==
915 (loopback_addr.s_addr & loopback_mask)) {
916 so->so_faddr = slirp->vhost_addr;
918 break;
920 case AF_INET6:
921 if (in6_equal(&so->so_faddr6, &in6addr_any) ||
922 in6_equal(&so->so_faddr6, &in6addr_loopback)) {
923 so->so_faddr6 = slirp->vhost_addr6;
925 break;
927 default:
928 break;