2 * Copyright (c) 1995 Danny Gasparovski.
4 * Please read the file COPYRIGHT for the
5 * terms and conditions of the copyright.
11 ifs_insque(struct mbuf
*ifm
, struct mbuf
*ifmhead
)
13 ifm
->ifs_next
= ifmhead
->ifs_next
;
14 ifmhead
->ifs_next
= ifm
;
15 ifm
->ifs_prev
= ifmhead
;
16 ifm
->ifs_next
->ifs_prev
= ifm
;
20 ifs_remque(struct mbuf
*ifm
)
22 ifm
->ifs_prev
->ifs_next
= ifm
->ifs_next
;
23 ifm
->ifs_next
->ifs_prev
= ifm
->ifs_prev
;
29 slirp
->if_fastq
.qh_link
= slirp
->if_fastq
.qh_rlink
= &slirp
->if_fastq
;
30 slirp
->if_batchq
.qh_link
= slirp
->if_batchq
.qh_rlink
= &slirp
->if_batchq
;
34 * if_output: Queue packet into an output queue.
35 * There are 2 output queue's, if_fastq and if_batchq.
36 * Each output queue is a doubly linked list of double linked lists
37 * of mbufs, each list belonging to one "session" (socket). This
38 * way, we can output packets fairly by sending one packet from each
39 * session, instead of all the packets from one session, then all packets
40 * from the next session, etc. Packets on the if_fastq get absolute
41 * priority, but if one session hogs the link, it gets "downgraded"
42 * to the batchq until it runs out of packets, then it'll return
43 * to the fastq (eg. if the user does an ls -alR in a telnet session,
44 * it'll temporarily get downgraded to the batchq)
47 if_output(struct socket
*so
, struct mbuf
*ifm
)
49 Slirp
*slirp
= ifm
->slirp
;
53 DEBUG_CALL("if_output");
54 DEBUG_ARG("so = %p", so
);
55 DEBUG_ARG("ifm = %p", ifm
);
58 * First remove the mbuf from m_usedlist,
59 * since we're gonna use m_next and m_prev ourselves
60 * XXX Shouldn't need this, gotta change dtom() etc.
62 if (ifm
->m_flags
& M_USEDLIST
) {
64 ifm
->m_flags
&= ~M_USEDLIST
;
68 * See if there's already a batchq list for this session.
69 * This can include an interactive session, which should go on fastq,
70 * but gets too greedy... hence it'll be downgraded from fastq to batchq.
71 * We mustn't put this packet back on the fastq (or we'll send it out of order)
75 for (ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
76 (struct quehead
*) ifq
!= &slirp
->if_batchq
;
77 ifq
= ifq
->ifq_prev
) {
78 if (so
== ifq
->ifq_so
) {
81 ifs_insque(ifm
, ifq
->ifs_prev
);
87 /* No match, check which queue to put it on */
88 if (so
&& (so
->so_iptos
& IPTOS_LOWDELAY
)) {
89 ifq
= (struct mbuf
*) slirp
->if_fastq
.qh_rlink
;
92 * Check if this packet is a part of the last
95 if (ifq
->ifq_so
== so
) {
97 ifs_insque(ifm
, ifq
->ifs_prev
);
101 ifq
= (struct mbuf
*) slirp
->if_batchq
.qh_rlink
;
104 /* Create a new doubly linked list for this session */
111 /* Update *_queued */
115 * Check if the interactive session should be downgraded to
116 * the batchq. A session is downgraded if it has queued 6
117 * packets without pausing, and at least 3 of those packets
118 * have been sent over the link
119 * (XXX These are arbitrary numbers, probably not optimal..)
121 if (on_fastq
&& ((so
->so_nqueued
>= 6) &&
122 (so
->so_nqueued
- so
->so_queued
) >= 3)) {
124 /* Remove from current queue... */
125 remque(ifm
->ifs_next
);
127 /* ...And insert in the new. That'll teach ya! */
128 insque(ifm
->ifs_next
, &slirp
->if_batchq
);
133 * This prevents us from malloc()ing too many mbufs
135 if_start(ifm
->slirp
);
139 * Send one packet from each session.
140 * If there are packets on the fastq, they are sent FIFO, before
141 * everything else. Then we choose the first packet from each
142 * batchq session (socket) and send it.
143 * For example, if there are 3 ftp sessions fighting for bandwidth,
144 * one packet will be sent from the first session, then one packet
145 * from the second session, then one packet from the third.
147 void if_start(Slirp
*slirp
)
149 uint64_t now
= slirp
->cb
->clock_get_ns(slirp
->opaque
);
150 bool from_batchq
= false;
151 struct mbuf
*ifm
, *ifm_next
, *ifqt
;
153 DEBUG_CALL("if_start");
155 if (slirp
->if_start_busy
) {
158 slirp
->if_start_busy
= true;
160 struct mbuf
*batch_head
= NULL
;
161 if (slirp
->if_batchq
.qh_link
!= &slirp
->if_batchq
) {
162 batch_head
= (struct mbuf
*) slirp
->if_batchq
.qh_link
;
165 if (slirp
->if_fastq
.qh_link
!= &slirp
->if_fastq
) {
166 ifm_next
= (struct mbuf
*) slirp
->if_fastq
.qh_link
;
167 } else if (batch_head
) {
168 /* Nothing on fastq, pick up from batchq */
169 ifm_next
= batch_head
;
178 ifm_next
= ifm
->ifq_next
;
179 if ((struct quehead
*) ifm_next
== &slirp
->if_fastq
) {
180 /* No more packets in fastq, switch to batchq */
181 ifm_next
= batch_head
;
184 if ((struct quehead
*) ifm_next
== &slirp
->if_batchq
) {
189 /* Try to send packet unless it already expired */
190 if (ifm
->expiration_date
>= now
&& !if_encap(slirp
, ifm
)) {
191 /* Packet is delayed due to pending ARP or NDP resolution */
195 /* Remove it from the queue */
196 ifqt
= ifm
->ifq_prev
;
199 /* If there are more packets for this session, re-queue them */
200 if (ifm
->ifs_next
!= ifm
) {
201 struct mbuf
*next
= ifm
->ifs_next
;
210 /* Update so_queued */
211 if (ifm
->ifq_so
&& --ifm
->ifq_so
->so_queued
== 0) {
212 /* If there's no more queued, reset nqueued */
213 ifm
->ifq_so
->so_nqueued
= 0;
219 slirp
->if_start_busy
= false;