2 * Copyright (c) 1995 Danny Gasparovski.
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
8 #include "qemu/osdep.h"
10 #include "qemu/timer.h"
13 ifs_insque(struct mbuf
*ifm
, struct mbuf
*ifmhead
)
15 ifm
->ifs_next
= ifmhead
->ifs_next
;
16 ifmhead
->ifs_next
= ifm
;
17 ifm
->ifs_prev
= ifmhead
;
18 ifm
->ifs_next
->ifs_prev
= ifm
;
22 ifs_remque(struct mbuf
*ifm
)
24 ifm
->ifs_prev
->ifs_next
= ifm
->ifs_next
;
25 ifm
->ifs_next
->ifs_prev
= ifm
->ifs_prev
;
31 slirp
->if_fastq
.qh_link
= slirp
->if_fastq
.qh_rlink
= &slirp
->if_fastq
;
32 slirp
->if_batchq
.qh_link
= slirp
->if_batchq
.qh_rlink
= &slirp
->if_batchq
;
33 slirp
->next_m
= (struct mbuf
*) &slirp
->if_batchq
;
37 * if_output: Queue packet into an output queue.
38 * There are 2 output queue's, if_fastq and if_batchq.
39 * Each output queue is a doubly linked list of double linked lists
40 * of mbufs, each list belonging to one "session" (socket). This
41 * way, we can output packets fairly by sending one packet from each
42 * session, instead of all the packets from one session, then all packets
43 * from the next session, etc. Packets on the if_fastq get absolute
44 * priority, but if one session hogs the link, it gets "downgraded"
45 * to the batchq until it runs out of packets, then it'll return
46 * to the fastq (eg. if the user does an ls -alR in a telnet session,
47 * it'll temporarily get downgraded to the batchq)
50 if_output(struct socket
*so
, struct mbuf
*ifm
)
52 Slirp
*slirp
= ifm
->slirp
;
56 DEBUG_CALL("if_output");
57 DEBUG_ARG("so = %p", so
);
58 DEBUG_ARG("ifm = %p", ifm
);
61 * First remove the mbuf from m_usedlist,
62 * since we're gonna use m_next and m_prev ourselves
63 * XXX Shouldn't need this, gotta change dtom() etc.
65 if (ifm
->m_flags
& M_USEDLIST
) {
67 ifm
->m_flags
&= ~M_USEDLIST
;
71 * See if there's already a batchq list for this session.
72 * This can include an interactive session, which should go on fastq,
73 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
74 * We mustn't put this packet back on the fastq (or we'll send it out of order)
77 for (ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
78 (struct quehead
*) ifq
!= &slirp
->if_batchq
;
79 ifq
= ifq
->ifq_prev
) {
80 if (so
== ifq
->ifq_so
) {
83 ifs_insque(ifm
, ifq
->ifs_prev
);
88 /* No match, check which queue to put it on */
89 if (so
&& (so
->so_iptos
& IPTOS_LOWDELAY
)) {
90 ifq
= (struct mbuf
*) slirp
->if_fastq
.qh_rlink
;
93 * Check if this packet is a part of the last
96 if (ifq
->ifq_so
== so
) {
98 ifs_insque(ifm
, ifq
->ifs_prev
);
102 ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
103 /* Set next_m if the queue was empty so far */
104 if ((struct quehead
*) slirp
->next_m
== &slirp
->if_batchq
) {
109 /* Create a new doubly linked list for this session */
116 /* Update *_queued */
120 * Check if the interactive session should be downgraded to
121 * the batchq. A session is downgraded if it has queued 6
122 * packets without pausing, and at least 3 of those packets
123 * have been sent over the link
124 * (XXX These are arbitrary numbers, probably not optimal..)
126 if (on_fastq
&& ((so
->so_nqueued
>= 6) &&
127 (so
->so_nqueued
- so
->so_queued
) >= 3)) {
129 /* Remove from current queue... */
130 remque(ifm
->ifs_next
);
132 /* ...And insert in the new. That'll teach ya! */
133 insque(ifm
->ifs_next
, &slirp
->if_batchq
);
139 * This prevents us from malloc()ing too many mbufs
141 if_start(ifm
->slirp
);
147 * We choose a packet based on its position in the output queues;
148 * If there are packets on the fastq, they are sent FIFO, before
149 * everything else. Otherwise we choose the first packet from the
150 * batchq and send it. the next packet chosen will be from the session
151 * after this one, then the session after that one, and so on.. So,
152 * for example, if there are 3 ftp session's fighting for bandwidth,
153 * one packet will be sent from the first session, then one packet
154 * from the second session, then one packet from the third, then back
155 * to the first, etc. etc.
157 void if_start(Slirp
*slirp
)
159 uint64_t now
= qemu_clock_get_ns(QEMU_CLOCK_REALTIME
);
160 bool from_batchq
, next_from_batchq
;
161 struct mbuf
*ifm
, *ifm_next
, *ifqt
;
163 DEBUG_CALL("if_start");
165 if (slirp
->if_start_busy
) {
168 slirp
->if_start_busy
= true;
170 if (slirp
->if_fastq
.qh_link
!= &slirp
->if_fastq
) {
171 ifm_next
= (struct mbuf
*) slirp
->if_fastq
.qh_link
;
172 next_from_batchq
= false;
173 } else if ((struct quehead
*) slirp
->next_m
!= &slirp
->if_batchq
) {
174 /* Nothing on fastq, pick up from batchq via next_m */
175 ifm_next
= slirp
->next_m
;
176 next_from_batchq
= true;
183 from_batchq
= next_from_batchq
;
185 ifm_next
= ifm
->ifq_next
;
186 if ((struct quehead
*) ifm_next
== &slirp
->if_fastq
) {
187 /* No more packets in fastq, switch to batchq */
188 ifm_next
= slirp
->next_m
;
189 next_from_batchq
= true;
191 if ((struct quehead
*) ifm_next
== &slirp
->if_batchq
) {
196 /* Try to send packet unless it already expired */
197 if (ifm
->expiration_date
>= now
&& !if_encap(slirp
, ifm
)) {
198 /* Packet is delayed due to pending ARP or NDP resolution */
202 if (ifm
== slirp
->next_m
) {
203 /* Set which packet to send on next iteration */
204 slirp
->next_m
= ifm
->ifq_next
;
207 /* Remove it from the queue */
208 ifqt
= ifm
->ifq_prev
;
211 /* If there are more packets for this session, re-queue them */
212 if (ifm
->ifs_next
!= ifm
) {
213 struct mbuf
*next
= ifm
->ifs_next
;
219 /* Next packet in fastq is from the same session */
221 next_from_batchq
= false;
222 } else if ((struct quehead
*) slirp
->next_m
== &slirp
->if_batchq
) {
223 /* Set next_m and ifm_next if the session packet is now the
224 * only one on batchq */
225 slirp
->next_m
= ifm_next
= next
;
229 /* Update so_queued */
230 if (ifm
->ifq_so
&& --ifm
->ifq_so
->so_queued
== 0) {
231 /* If there's no more queued, reset nqueued */
232 ifm
->ifq_so
->so_nqueued
= 0;
238 slirp
->if_start_busy
= false;