2 * Copyright (c) 1995 Danny Gasparovski.
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
8 #include "qemu/osdep.h"
10 #include "qemu/timer.h"
13 ifs_insque(struct mbuf
*ifm
, struct mbuf
*ifmhead
)
15 ifm
->ifs_next
= ifmhead
->ifs_next
;
16 ifmhead
->ifs_next
= ifm
;
17 ifm
->ifs_prev
= ifmhead
;
18 ifm
->ifs_next
->ifs_prev
= ifm
;
22 ifs_remque(struct mbuf
*ifm
)
24 ifm
->ifs_prev
->ifs_next
= ifm
->ifs_next
;
25 ifm
->ifs_next
->ifs_prev
= ifm
->ifs_prev
;
31 slirp
->if_fastq
.qh_link
= slirp
->if_fastq
.qh_rlink
= &slirp
->if_fastq
;
32 slirp
->if_batchq
.qh_link
= slirp
->if_batchq
.qh_rlink
= &slirp
->if_batchq
;
36 * if_output: Queue packet into an output queue.
37 * There are 2 output queue's, if_fastq and if_batchq.
38 * Each output queue is a doubly linked list of double linked lists
39 * of mbufs, each list belonging to one "session" (socket). This
40 * way, we can output packets fairly by sending one packet from each
41 * session, instead of all the packets from one session, then all packets
42 * from the next session, etc. Packets on the if_fastq get absolute
43 * priority, but if one session hogs the link, it gets "downgraded"
44 * to the batchq until it runs out of packets, then it'll return
45 * to the fastq (eg. if the user does an ls -alR in a telnet session,
46 * it'll temporarily get downgraded to the batchq)
49 if_output(struct socket
*so
, struct mbuf
*ifm
)
51 Slirp
*slirp
= ifm
->slirp
;
55 DEBUG_CALL("if_output");
56 DEBUG_ARG("so = %p", so
);
57 DEBUG_ARG("ifm = %p", ifm
);
60 * First remove the mbuf from m_usedlist,
61 * since we're gonna use m_next and m_prev ourselves
62 * XXX Shouldn't need this, gotta change dtom() etc.
64 if (ifm
->m_flags
& M_USEDLIST
) {
66 ifm
->m_flags
&= ~M_USEDLIST
;
70 * See if there's already a batchq list for this session.
71 * This can include an interactive session, which should go on fastq,
72 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
73 * We mustn't put this packet back on the fastq (or we'll send it out of order)
77 for (ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
78 (struct quehead
*) ifq
!= &slirp
->if_batchq
;
79 ifq
= ifq
->ifq_prev
) {
80 if (so
== ifq
->ifq_so
) {
83 ifs_insque(ifm
, ifq
->ifs_prev
);
89 /* No match, check which queue to put it on */
90 if (so
&& (so
->so_iptos
& IPTOS_LOWDELAY
)) {
91 ifq
= (struct mbuf
*) slirp
->if_fastq
.qh_rlink
;
94 * Check if this packet is a part of the last
97 if (ifq
->ifq_so
== so
) {
99 ifs_insque(ifm
, ifq
->ifs_prev
);
103 ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
106 /* Create a new doubly linked list for this session */
113 /* Update *_queued */
117 * Check if the interactive session should be downgraded to
118 * the batchq. A session is downgraded if it has queued 6
119 * packets without pausing, and at least 3 of those packets
120 * have been sent over the link
121 * (XXX These are arbitrary numbers, probably not optimal..)
123 if (on_fastq
&& ((so
->so_nqueued
>= 6) &&
124 (so
->so_nqueued
- so
->so_queued
) >= 3)) {
126 /* Remove from current queue... */
127 remque(ifm
->ifs_next
);
129 /* ...And insert in the new. That'll teach ya! */
130 insque(ifm
->ifs_next
, &slirp
->if_batchq
);
136 * This prevents us from malloc()ing too many mbufs
138 if_start(ifm
->slirp
);
143 * Send one packet from each session.
144 * If there are packets on the fastq, they are sent FIFO, before
145 * everything else. Then we choose the first packet from each
146 * batchq session (socket) and send it.
147 * For example, if there are 3 ftp sessions fighting for bandwidth,
148 * one packet will be sent from the first session, then one packet
149 * from the second session, then one packet from the third.
151 void if_start(Slirp
*slirp
)
153 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
154 bool from_batchq
= false;
155 struct mbuf
*ifm
, *ifm_next
, *ifqt
;
157 DEBUG_CALL("if_start");
159 if (slirp
->if_start_busy
) {
162 slirp
->if_start_busy
= true;
164 struct mbuf
*batch_head
= NULL
;
165 if (slirp
->if_batchq
.qh_link
!= &slirp
->if_batchq
) {
166 batch_head
= (struct mbuf
*) slirp
->if_batchq
.qh_link
;
169 if (slirp
->if_fastq
.qh_link
!= &slirp
->if_fastq
) {
170 ifm_next
= (struct mbuf
*) slirp
->if_fastq
.qh_link
;
171 } else if (batch_head
) {
172 /* Nothing on fastq, pick up from batchq */
173 ifm_next
= batch_head
;
182 ifm_next
= ifm
->ifq_next
;
183 if ((struct quehead
*) ifm_next
== &slirp
->if_fastq
) {
184 /* No more packets in fastq, switch to batchq */
185 ifm_next
= batch_head
;
188 if ((struct quehead
*) ifm_next
== &slirp
->if_batchq
) {
193 /* Try to send packet unless it already expired */
194 if (ifm
->expiration_date
>= now
&& !if_encap(slirp
, ifm
)) {
195 /* Packet is delayed due to pending ARP or NDP resolution */
199 /* Remove it from the queue */
200 ifqt
= ifm
->ifq_prev
;
203 /* If there are more packets for this session, re-queue them */
204 if (ifm
->ifs_next
!= ifm
) {
205 struct mbuf
*next
= ifm
->ifs_next
;
214 /* Update so_queued */
215 if (ifm
->ifq_so
&& --ifm
->ifq_so
->so_queued
== 0) {
216 /* If there's no more queued, reset nqueued */
217 ifm
->ifq_so
->so_nqueued
= 0;
223 slirp
->if_start_busy
= false;