2 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/types.h>
14 #include <linux/kernel.h>
15 #include <linux/jiffies.h>
16 #include <linux/string.h>
18 #include <linux/errno.h>
19 #include <linux/init.h>
20 #include <linux/ipv6.h>
21 #include <linux/skbuff.h>
22 #include <linux/jhash.h>
23 #include <linux/slab.h>
25 #include <net/netlink.h>
26 #include <net/pkt_sched.h>
29 /* Stochastic Fairness Queuing algorithm.
30 =======================================
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
36 Paul E. McKenney "Stochastic Fairness Queuing",
37 "Interworking: Research and Experience", v.2, 1991, p.113-131.
41 M. Shreedhar and George Varghese "Efficient Fair
42 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
45 This is not the thing that is usually called (W)FQ nowadays.
46 It does not use any timestamp mechanism, but instead
47 processes queues in round-robin order.
51 - It is very cheap. Both CPU and memory requirements are minimal.
55 - "Stochastic" -> It is not 100% fair.
56 When hash collisions occur, several flows are considered as one.
58 - "Round-robin" -> It introduces larger delays than virtual clock
59 based schemes, and should not be used for isolating interactive
60 traffic from non-interactive. It means, that this scheduler
61 should be used as leaf of CBQ or P3, which put interactive traffic
62 to higher priority band.
64 We still need true WFQ for top level CSZ, but using WFQ
65 for the best effort traffic is absolutely pointless:
66 SFQ is superior for this purpose.
69 This implementation limits maximal queue length to 128;
70 maximal mtu to 2^15-1; number of hash buckets to 1024.
71 The only goal of this restrictions was that all data
72 fit into one 4K page :-). Struct sfq_sched_data is
73 organized in anti-cache manner: all the data for a bucket
74 are scattered over different locations. This is not good,
75 but it allowed me to put it into 4K.
77 It is easy to increase these values, but not in flight. */
80 #define SFQ_HASH_DIVISOR 1024
82 /* This type should contain at least SFQ_DEPTH*2 values */
83 typedef unsigned char sfq_index
;
95 unsigned quantum
; /* Allotment per round: MUST BE >= MTU */
99 struct tcf_proto
*filter_list
;
100 struct timer_list perturb_timer
;
102 sfq_index tail
; /* Index of current slot in round */
103 sfq_index max_depth
; /* Maximal depth */
105 sfq_index ht
[SFQ_HASH_DIVISOR
]; /* Hash table */
106 sfq_index next
[SFQ_DEPTH
]; /* Active slots link */
107 short allot
[SFQ_DEPTH
]; /* Current allotment per slot */
108 unsigned short hash
[SFQ_DEPTH
]; /* Hash value indexed by slots */
109 struct sk_buff_head qs
[SFQ_DEPTH
]; /* Slot queue */
110 struct sfq_head dep
[SFQ_DEPTH
*2]; /* Linked list of slots, indexed by depth */
113 static __inline__
unsigned sfq_fold_hash(struct sfq_sched_data
*q
, u32 h
, u32 h1
)
115 return jhash_2words(h
, h1
, q
->perturbation
) & (SFQ_HASH_DIVISOR
- 1);
118 static unsigned sfq_hash(struct sfq_sched_data
*q
, struct sk_buff
*skb
)
122 switch (skb
->protocol
) {
123 case htons(ETH_P_IP
):
125 const struct iphdr
*iph
;
128 if (!pskb_network_may_pull(skb
, sizeof(*iph
)))
131 h
= (__force u32
)iph
->daddr
;
132 h2
= (__force u32
)iph
->saddr
^ iph
->protocol
;
133 if (iph
->frag_off
& htons(IP_MF
|IP_OFFSET
))
135 poff
= proto_ports_offset(iph
->protocol
);
137 pskb_network_may_pull(skb
, iph
->ihl
* 4 + 4 + poff
)) {
139 h2
^= *(u32
*)((void *)iph
+ iph
->ihl
* 4 + poff
);
143 case htons(ETH_P_IPV6
):
148 if (!pskb_network_may_pull(skb
, sizeof(*iph
)))
151 h
= (__force u32
)iph
->daddr
.s6_addr32
[3];
152 h2
= (__force u32
)iph
->saddr
.s6_addr32
[3] ^ iph
->nexthdr
;
153 poff
= proto_ports_offset(iph
->nexthdr
);
155 pskb_network_may_pull(skb
, sizeof(*iph
) + 4 + poff
)) {
157 h2
^= *(u32
*)((void *)iph
+ sizeof(*iph
) + poff
);
163 h
= (unsigned long)skb_dst(skb
) ^ (__force u32
)skb
->protocol
;
164 h2
= (unsigned long)skb
->sk
;
167 return sfq_fold_hash(q
, h
, h2
);
170 static unsigned int sfq_classify(struct sk_buff
*skb
, struct Qdisc
*sch
,
173 struct sfq_sched_data
*q
= qdisc_priv(sch
);
174 struct tcf_result res
;
177 if (TC_H_MAJ(skb
->priority
) == sch
->handle
&&
178 TC_H_MIN(skb
->priority
) > 0 &&
179 TC_H_MIN(skb
->priority
) <= SFQ_HASH_DIVISOR
)
180 return TC_H_MIN(skb
->priority
);
183 return sfq_hash(q
, skb
) + 1;
185 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
186 result
= tc_classify(skb
, q
->filter_list
, &res
);
188 #ifdef CONFIG_NET_CLS_ACT
192 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
197 if (TC_H_MIN(res
.classid
) <= SFQ_HASH_DIVISOR
)
198 return TC_H_MIN(res
.classid
);
203 static inline void sfq_link(struct sfq_sched_data
*q
, sfq_index x
)
206 int d
= q
->qs
[x
].qlen
+ SFQ_DEPTH
;
212 q
->dep
[p
].next
= q
->dep
[n
].prev
= x
;
215 static inline void sfq_dec(struct sfq_sched_data
*q
, sfq_index x
)
224 if (n
== p
&& q
->max_depth
== q
->qs
[x
].qlen
+ 1)
230 static inline void sfq_inc(struct sfq_sched_data
*q
, sfq_index x
)
240 if (q
->max_depth
< d
)
246 static unsigned int sfq_drop(struct Qdisc
*sch
)
248 struct sfq_sched_data
*q
= qdisc_priv(sch
);
249 sfq_index d
= q
->max_depth
;
253 /* Queue is full! Find the longest slot and
254 drop a packet from it */
257 sfq_index x
= q
->dep
[d
+ SFQ_DEPTH
].next
;
259 len
= qdisc_pkt_len(skb
);
260 __skb_unlink(skb
, &q
->qs
[x
]);
265 sch
->qstats
.backlog
-= len
;
270 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
271 d
= q
->next
[q
->tail
];
272 q
->next
[q
->tail
] = q
->next
[d
];
274 len
= qdisc_pkt_len(skb
);
275 __skb_unlink(skb
, &q
->qs
[d
]);
279 q
->ht
[q
->hash
[d
]] = SFQ_DEPTH
;
281 sch
->qstats
.backlog
-= len
;
289 sfq_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
291 struct sfq_sched_data
*q
= qdisc_priv(sch
);
294 int uninitialized_var(ret
);
296 hash
= sfq_classify(skb
, sch
, &ret
);
298 if (ret
& __NET_XMIT_BYPASS
)
306 if (x
== SFQ_DEPTH
) {
307 q
->ht
[hash
] = x
= q
->dep
[SFQ_DEPTH
].next
;
311 /* If selected queue has length q->limit, this means that
312 * all another queues are empty and that we do simple tail drop,
313 * i.e. drop _this_ packet.
315 if (q
->qs
[x
].qlen
>= q
->limit
)
316 return qdisc_drop(skb
, sch
);
318 sch
->qstats
.backlog
+= qdisc_pkt_len(skb
);
319 __skb_queue_tail(&q
->qs
[x
], skb
);
321 if (q
->qs
[x
].qlen
== 1) { /* The flow is new */
322 if (q
->tail
== SFQ_DEPTH
) { /* It is the first flow */
325 q
->next
[x
] = q
->next
[q
->tail
];
326 q
->next
[q
->tail
] = x
;
329 q
->allot
[x
] = q
->quantum
;
331 if (++sch
->q
.qlen
<= q
->limit
) {
332 sch
->bstats
.bytes
+= qdisc_pkt_len(skb
);
333 sch
->bstats
.packets
++;
334 return NET_XMIT_SUCCESS
;
341 static struct sk_buff
*
342 sfq_peek(struct Qdisc
*sch
)
344 struct sfq_sched_data
*q
= qdisc_priv(sch
);
347 /* No active slots */
348 if (q
->tail
== SFQ_DEPTH
)
351 a
= q
->next
[q
->tail
];
352 return skb_peek(&q
->qs
[a
]);
355 static struct sk_buff
*
356 sfq_dequeue(struct Qdisc
*sch
)
358 struct sfq_sched_data
*q
= qdisc_priv(sch
);
362 /* No active slots */
363 if (q
->tail
== SFQ_DEPTH
)
366 a
= q
->next
[q
->tail
];
369 skb
= __skb_dequeue(&q
->qs
[a
]);
372 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
374 /* Is the slot empty? */
375 if (q
->qs
[a
].qlen
== 0) {
376 q
->ht
[q
->hash
[a
]] = SFQ_DEPTH
;
382 q
->next
[q
->tail
] = next_a
;
383 } else if ((q
->allot
[a
] -= qdisc_pkt_len(skb
)) <= 0) {
384 q
->allot
[a
] += q
->quantum
;
391 sfq_reset(struct Qdisc
*sch
)
395 while ((skb
= sfq_dequeue(sch
)) != NULL
)
399 static void sfq_perturbation(unsigned long arg
)
401 struct Qdisc
*sch
= (struct Qdisc
*)arg
;
402 struct sfq_sched_data
*q
= qdisc_priv(sch
);
404 q
->perturbation
= net_random();
406 if (q
->perturb_period
)
407 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
410 static int sfq_change(struct Qdisc
*sch
, struct nlattr
*opt
)
412 struct sfq_sched_data
*q
= qdisc_priv(sch
);
413 struct tc_sfq_qopt
*ctl
= nla_data(opt
);
416 if (opt
->nla_len
< nla_attr_size(sizeof(*ctl
)))
420 q
->quantum
= ctl
->quantum
? : psched_mtu(qdisc_dev(sch
));
421 q
->perturb_period
= ctl
->perturb_period
* HZ
;
423 q
->limit
= min_t(u32
, ctl
->limit
, SFQ_DEPTH
- 1);
426 while (sch
->q
.qlen
> q
->limit
)
428 qdisc_tree_decrease_qlen(sch
, qlen
- sch
->q
.qlen
);
430 del_timer(&q
->perturb_timer
);
431 if (q
->perturb_period
) {
432 mod_timer(&q
->perturb_timer
, jiffies
+ q
->perturb_period
);
433 q
->perturbation
= net_random();
435 sch_tree_unlock(sch
);
439 static int sfq_init(struct Qdisc
*sch
, struct nlattr
*opt
)
441 struct sfq_sched_data
*q
= qdisc_priv(sch
);
444 q
->perturb_timer
.function
= sfq_perturbation
;
445 q
->perturb_timer
.data
= (unsigned long)sch
;
446 init_timer_deferrable(&q
->perturb_timer
);
448 for (i
= 0; i
< SFQ_HASH_DIVISOR
; i
++)
449 q
->ht
[i
] = SFQ_DEPTH
;
451 for (i
= 0; i
< SFQ_DEPTH
; i
++) {
452 skb_queue_head_init(&q
->qs
[i
]);
453 q
->dep
[i
+ SFQ_DEPTH
].next
= i
+ SFQ_DEPTH
;
454 q
->dep
[i
+ SFQ_DEPTH
].prev
= i
+ SFQ_DEPTH
;
457 q
->limit
= SFQ_DEPTH
- 1;
461 q
->quantum
= psched_mtu(qdisc_dev(sch
));
462 q
->perturb_period
= 0;
463 q
->perturbation
= net_random();
465 int err
= sfq_change(sch
, opt
);
470 for (i
= 0; i
< SFQ_DEPTH
; i
++)
475 static void sfq_destroy(struct Qdisc
*sch
)
477 struct sfq_sched_data
*q
= qdisc_priv(sch
);
479 tcf_destroy_chain(&q
->filter_list
);
480 q
->perturb_period
= 0;
481 del_timer_sync(&q
->perturb_timer
);
484 static int sfq_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
486 struct sfq_sched_data
*q
= qdisc_priv(sch
);
487 unsigned char *b
= skb_tail_pointer(skb
);
488 struct tc_sfq_qopt opt
;
490 opt
.quantum
= q
->quantum
;
491 opt
.perturb_period
= q
->perturb_period
/ HZ
;
493 opt
.limit
= q
->limit
;
494 opt
.divisor
= SFQ_HASH_DIVISOR
;
495 opt
.flows
= q
->limit
;
497 NLA_PUT(skb
, TCA_OPTIONS
, sizeof(opt
), &opt
);
506 static struct Qdisc
*sfq_leaf(struct Qdisc
*sch
, unsigned long arg
)
511 static unsigned long sfq_get(struct Qdisc
*sch
, u32 classid
)
516 static unsigned long sfq_bind(struct Qdisc
*sch
, unsigned long parent
,
522 static void sfq_put(struct Qdisc
*q
, unsigned long cl
)
526 static struct tcf_proto
**sfq_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
528 struct sfq_sched_data
*q
= qdisc_priv(sch
);
532 return &q
->filter_list
;
535 static int sfq_dump_class(struct Qdisc
*sch
, unsigned long cl
,
536 struct sk_buff
*skb
, struct tcmsg
*tcm
)
538 tcm
->tcm_handle
|= TC_H_MIN(cl
);
542 static int sfq_dump_class_stats(struct Qdisc
*sch
, unsigned long cl
,
545 struct sfq_sched_data
*q
= qdisc_priv(sch
);
546 sfq_index idx
= q
->ht
[cl
-1];
547 struct gnet_stats_queue qs
= { .qlen
= q
->qs
[idx
].qlen
};
548 struct tc_sfq_xstats xstats
= { .allot
= q
->allot
[idx
] };
550 if (gnet_stats_copy_queue(d
, &qs
) < 0)
552 return gnet_stats_copy_app(d
, &xstats
, sizeof(xstats
));
555 static void sfq_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
557 struct sfq_sched_data
*q
= qdisc_priv(sch
);
563 for (i
= 0; i
< SFQ_HASH_DIVISOR
; i
++) {
564 if (q
->ht
[i
] == SFQ_DEPTH
||
565 arg
->count
< arg
->skip
) {
569 if (arg
->fn(sch
, i
+ 1, arg
) < 0) {
577 static const struct Qdisc_class_ops sfq_class_ops
= {
581 .tcf_chain
= sfq_find_tcf
,
582 .bind_tcf
= sfq_bind
,
583 .unbind_tcf
= sfq_put
,
584 .dump
= sfq_dump_class
,
585 .dump_stats
= sfq_dump_class_stats
,
589 static struct Qdisc_ops sfq_qdisc_ops __read_mostly
= {
590 .cl_ops
= &sfq_class_ops
,
592 .priv_size
= sizeof(struct sfq_sched_data
),
593 .enqueue
= sfq_enqueue
,
594 .dequeue
= sfq_dequeue
,
599 .destroy
= sfq_destroy
,
602 .owner
= THIS_MODULE
,
605 static int __init
sfq_module_init(void)
607 return register_qdisc(&sfq_qdisc_ops
);
609 static void __exit
sfq_module_exit(void)
611 unregister_qdisc(&sfq_qdisc_ops
);
613 module_init(sfq_module_init
)
614 module_exit(sfq_module_exit
)
615 MODULE_LICENSE("GPL");