add keepalive
[cor.git] / net / sched / sch_sfq.c
blobc787d4d46017b4b41b8eb6d41f2b0a44560ff5bf
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * net/sched/sch_sfq.c Stochastic Fairness Queueing discipline.
5 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
6 */
8 #include <linux/module.h>
9 #include <linux/types.h>
10 #include <linux/kernel.h>
11 #include <linux/jiffies.h>
12 #include <linux/string.h>
13 #include <linux/in.h>
14 #include <linux/errno.h>
15 #include <linux/init.h>
16 #include <linux/skbuff.h>
17 #include <linux/siphash.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <net/netlink.h>
21 #include <net/pkt_sched.h>
22 #include <net/pkt_cls.h>
23 #include <net/red.h>
26 /* Stochastic Fairness Queuing algorithm.
27 =======================================
29 Source:
30 Paul E. McKenney "Stochastic Fairness Queuing",
31 IEEE INFOCOMM'90 Proceedings, San Francisco, 1990.
33 Paul E. McKenney "Stochastic Fairness Queuing",
34 "Interworking: Research and Experience", v.2, 1991, p.113-131.
37 See also:
38 M. Shreedhar and George Varghese "Efficient Fair
39 Queuing using Deficit Round Robin", Proc. SIGCOMM 95.
42 This is not the thing that is usually called (W)FQ nowadays.
43 It does not use any timestamp mechanism, but instead
44 processes queues in round-robin order.
46 ADVANTAGE:
48 - It is very cheap. Both CPU and memory requirements are minimal.
50 DRAWBACKS:
52 - "Stochastic" -> It is not 100% fair.
53 When hash collisions occur, several flows are considered as one.
55 - "Round-robin" -> It introduces larger delays than virtual clock
56 based schemes, and should not be used for isolating interactive
57 traffic from non-interactive. It means, that this scheduler
58 should be used as leaf of CBQ or P3, which put interactive traffic
59 to higher priority band.
61 We still need true WFQ for top level CSZ, but using WFQ
62 for the best effort traffic is absolutely pointless:
63 SFQ is superior for this purpose.
65 IMPLEMENTATION:
66 This implementation limits :
67 - maximal queue length per flow to 127 packets.
68 - max mtu to 2^18-1;
69 - max 65408 flows,
70 - number of hash buckets to 65536.
72 It is easy to increase these values, but not in flight. */
74 #define SFQ_MAX_DEPTH 127 /* max number of packets per flow */
75 #define SFQ_DEFAULT_FLOWS 128
76 #define SFQ_MAX_FLOWS (0x10000 - SFQ_MAX_DEPTH - 1) /* max number of flows */
77 #define SFQ_EMPTY_SLOT 0xffff
78 #define SFQ_DEFAULT_HASH_DIVISOR 1024
80 /* We use 16 bits to store allot, and want to handle packets up to 64K
81 * Scale allot by 8 (1<<3) so that no overflow occurs.
83 #define SFQ_ALLOT_SHIFT 3
84 #define SFQ_ALLOT_SIZE(X) DIV_ROUND_UP(X, 1 << SFQ_ALLOT_SHIFT)
86 /* This type should contain at least SFQ_MAX_DEPTH + 1 + SFQ_MAX_FLOWS values */
87 typedef u16 sfq_index;
90 * We dont use pointers to save space.
91 * Small indexes [0 ... SFQ_MAX_FLOWS - 1] are 'pointers' to slots[] array
92 * while following values [SFQ_MAX_FLOWS ... SFQ_MAX_FLOWS + SFQ_MAX_DEPTH]
93 * are 'pointers' to dep[] array
95 struct sfq_head {
96 sfq_index next;
97 sfq_index prev;
100 struct sfq_slot {
101 struct sk_buff *skblist_next;
102 struct sk_buff *skblist_prev;
103 sfq_index qlen; /* number of skbs in skblist */
104 sfq_index next; /* next slot in sfq RR chain */
105 struct sfq_head dep; /* anchor in dep[] chains */
106 unsigned short hash; /* hash value (index in ht[]) */
107 short allot; /* credit for this slot */
109 unsigned int backlog;
110 struct red_vars vars;
113 struct sfq_sched_data {
114 /* frequently used fields */
115 int limit; /* limit of total number of packets in this qdisc */
116 unsigned int divisor; /* number of slots in hash table */
117 u8 headdrop;
118 u8 maxdepth; /* limit of packets per flow */
120 siphash_key_t perturbation;
121 u8 cur_depth; /* depth of longest slot */
122 u8 flags;
123 unsigned short scaled_quantum; /* SFQ_ALLOT_SIZE(quantum) */
124 struct tcf_proto __rcu *filter_list;
125 struct tcf_block *block;
126 sfq_index *ht; /* Hash table ('divisor' slots) */
127 struct sfq_slot *slots; /* Flows table ('maxflows' entries) */
129 struct red_parms *red_parms;
130 struct tc_sfqred_stats stats;
131 struct sfq_slot *tail; /* current slot in round */
133 struct sfq_head dep[SFQ_MAX_DEPTH + 1];
134 /* Linked lists of slots, indexed by depth
135 * dep[0] : list of unused flows
136 * dep[1] : list of flows with 1 packet
137 * dep[X] : list of flows with X packets
140 unsigned int maxflows; /* number of flows in flows array */
141 int perturb_period;
142 unsigned int quantum; /* Allotment per round: MUST BE >= MTU */
143 struct timer_list perturb_timer;
144 struct Qdisc *sch;
148 * sfq_head are either in a sfq_slot or in dep[] array
150 static inline struct sfq_head *sfq_dep_head(struct sfq_sched_data *q, sfq_index val)
152 if (val < SFQ_MAX_FLOWS)
153 return &q->slots[val].dep;
154 return &q->dep[val - SFQ_MAX_FLOWS];
157 static unsigned int sfq_hash(const struct sfq_sched_data *q,
158 const struct sk_buff *skb)
160 return skb_get_hash_perturb(skb, &q->perturbation) & (q->divisor - 1);
163 static unsigned int sfq_classify(struct sk_buff *skb, struct Qdisc *sch,
164 int *qerr)
166 struct sfq_sched_data *q = qdisc_priv(sch);
167 struct tcf_result res;
168 struct tcf_proto *fl;
169 int result;
171 if (TC_H_MAJ(skb->priority) == sch->handle &&
172 TC_H_MIN(skb->priority) > 0 &&
173 TC_H_MIN(skb->priority) <= q->divisor)
174 return TC_H_MIN(skb->priority);
176 fl = rcu_dereference_bh(q->filter_list);
177 if (!fl)
178 return sfq_hash(q, skb) + 1;
180 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
181 result = tcf_classify(skb, fl, &res, false);
182 if (result >= 0) {
183 #ifdef CONFIG_NET_CLS_ACT
184 switch (result) {
185 case TC_ACT_STOLEN:
186 case TC_ACT_QUEUED:
187 case TC_ACT_TRAP:
188 *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
189 /* fall through */
190 case TC_ACT_SHOT:
191 return 0;
193 #endif
194 if (TC_H_MIN(res.classid) <= q->divisor)
195 return TC_H_MIN(res.classid);
197 return 0;
201 * x : slot number [0 .. SFQ_MAX_FLOWS - 1]
203 static inline void sfq_link(struct sfq_sched_data *q, sfq_index x)
205 sfq_index p, n;
206 struct sfq_slot *slot = &q->slots[x];
207 int qlen = slot->qlen;
209 p = qlen + SFQ_MAX_FLOWS;
210 n = q->dep[qlen].next;
212 slot->dep.next = n;
213 slot->dep.prev = p;
215 q->dep[qlen].next = x; /* sfq_dep_head(q, p)->next = x */
216 sfq_dep_head(q, n)->prev = x;
219 #define sfq_unlink(q, x, n, p) \
220 do { \
221 n = q->slots[x].dep.next; \
222 p = q->slots[x].dep.prev; \
223 sfq_dep_head(q, p)->next = n; \
224 sfq_dep_head(q, n)->prev = p; \
225 } while (0)
228 static inline void sfq_dec(struct sfq_sched_data *q, sfq_index x)
230 sfq_index p, n;
231 int d;
233 sfq_unlink(q, x, n, p);
235 d = q->slots[x].qlen--;
236 if (n == p && q->cur_depth == d)
237 q->cur_depth--;
238 sfq_link(q, x);
241 static inline void sfq_inc(struct sfq_sched_data *q, sfq_index x)
243 sfq_index p, n;
244 int d;
246 sfq_unlink(q, x, n, p);
248 d = ++q->slots[x].qlen;
249 if (q->cur_depth < d)
250 q->cur_depth = d;
251 sfq_link(q, x);
254 /* helper functions : might be changed when/if skb use a standard list_head */
256 /* remove one skb from tail of slot queue */
257 static inline struct sk_buff *slot_dequeue_tail(struct sfq_slot *slot)
259 struct sk_buff *skb = slot->skblist_prev;
261 slot->skblist_prev = skb->prev;
262 skb->prev->next = (struct sk_buff *)slot;
263 skb->next = skb->prev = NULL;
264 return skb;
267 /* remove one skb from head of slot queue */
268 static inline struct sk_buff *slot_dequeue_head(struct sfq_slot *slot)
270 struct sk_buff *skb = slot->skblist_next;
272 slot->skblist_next = skb->next;
273 skb->next->prev = (struct sk_buff *)slot;
274 skb->next = skb->prev = NULL;
275 return skb;
278 static inline void slot_queue_init(struct sfq_slot *slot)
280 memset(slot, 0, sizeof(*slot));
281 slot->skblist_prev = slot->skblist_next = (struct sk_buff *)slot;
284 /* add skb to slot queue (tail add) */
285 static inline void slot_queue_add(struct sfq_slot *slot, struct sk_buff *skb)
287 skb->prev = slot->skblist_prev;
288 skb->next = (struct sk_buff *)slot;
289 slot->skblist_prev->next = skb;
290 slot->skblist_prev = skb;
293 static unsigned int sfq_drop(struct Qdisc *sch, struct sk_buff **to_free)
295 struct sfq_sched_data *q = qdisc_priv(sch);
296 sfq_index x, d = q->cur_depth;
297 struct sk_buff *skb;
298 unsigned int len;
299 struct sfq_slot *slot;
301 /* Queue is full! Find the longest slot and drop tail packet from it */
302 if (d > 1) {
303 x = q->dep[d].next;
304 slot = &q->slots[x];
305 drop:
306 skb = q->headdrop ? slot_dequeue_head(slot) : slot_dequeue_tail(slot);
307 len = qdisc_pkt_len(skb);
308 slot->backlog -= len;
309 sfq_dec(q, x);
310 sch->q.qlen--;
311 qdisc_qstats_backlog_dec(sch, skb);
312 qdisc_drop(skb, sch, to_free);
313 return len;
316 if (d == 1) {
317 /* It is difficult to believe, but ALL THE SLOTS HAVE LENGTH 1. */
318 x = q->tail->next;
319 slot = &q->slots[x];
320 q->tail->next = slot->next;
321 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
322 goto drop;
325 return 0;
328 /* Is ECN parameter configured */
329 static int sfq_prob_mark(const struct sfq_sched_data *q)
331 return q->flags & TC_RED_ECN;
334 /* Should packets over max threshold just be marked */
335 static int sfq_hard_mark(const struct sfq_sched_data *q)
337 return (q->flags & (TC_RED_ECN | TC_RED_HARDDROP)) == TC_RED_ECN;
340 static int sfq_headdrop(const struct sfq_sched_data *q)
342 return q->headdrop;
345 static int
346 sfq_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
348 struct sfq_sched_data *q = qdisc_priv(sch);
349 unsigned int hash, dropped;
350 sfq_index x, qlen;
351 struct sfq_slot *slot;
352 int uninitialized_var(ret);
353 struct sk_buff *head;
354 int delta;
356 hash = sfq_classify(skb, sch, &ret);
357 if (hash == 0) {
358 if (ret & __NET_XMIT_BYPASS)
359 qdisc_qstats_drop(sch);
360 __qdisc_drop(skb, to_free);
361 return ret;
363 hash--;
365 x = q->ht[hash];
366 slot = &q->slots[x];
367 if (x == SFQ_EMPTY_SLOT) {
368 x = q->dep[0].next; /* get a free slot */
369 if (x >= SFQ_MAX_FLOWS)
370 return qdisc_drop(skb, sch, to_free);
371 q->ht[hash] = x;
372 slot = &q->slots[x];
373 slot->hash = hash;
374 slot->backlog = 0; /* should already be 0 anyway... */
375 red_set_vars(&slot->vars);
376 goto enqueue;
378 if (q->red_parms) {
379 slot->vars.qavg = red_calc_qavg_no_idle_time(q->red_parms,
380 &slot->vars,
381 slot->backlog);
382 switch (red_action(q->red_parms,
383 &slot->vars,
384 slot->vars.qavg)) {
385 case RED_DONT_MARK:
386 break;
388 case RED_PROB_MARK:
389 qdisc_qstats_overlimit(sch);
390 if (sfq_prob_mark(q)) {
391 /* We know we have at least one packet in queue */
392 if (sfq_headdrop(q) &&
393 INET_ECN_set_ce(slot->skblist_next)) {
394 q->stats.prob_mark_head++;
395 break;
397 if (INET_ECN_set_ce(skb)) {
398 q->stats.prob_mark++;
399 break;
402 q->stats.prob_drop++;
403 goto congestion_drop;
405 case RED_HARD_MARK:
406 qdisc_qstats_overlimit(sch);
407 if (sfq_hard_mark(q)) {
408 /* We know we have at least one packet in queue */
409 if (sfq_headdrop(q) &&
410 INET_ECN_set_ce(slot->skblist_next)) {
411 q->stats.forced_mark_head++;
412 break;
414 if (INET_ECN_set_ce(skb)) {
415 q->stats.forced_mark++;
416 break;
419 q->stats.forced_drop++;
420 goto congestion_drop;
424 if (slot->qlen >= q->maxdepth) {
425 congestion_drop:
426 if (!sfq_headdrop(q))
427 return qdisc_drop(skb, sch, to_free);
429 /* We know we have at least one packet in queue */
430 head = slot_dequeue_head(slot);
431 delta = qdisc_pkt_len(head) - qdisc_pkt_len(skb);
432 sch->qstats.backlog -= delta;
433 slot->backlog -= delta;
434 qdisc_drop(head, sch, to_free);
436 slot_queue_add(slot, skb);
437 qdisc_tree_reduce_backlog(sch, 0, delta);
438 return NET_XMIT_CN;
441 enqueue:
442 qdisc_qstats_backlog_inc(sch, skb);
443 slot->backlog += qdisc_pkt_len(skb);
444 slot_queue_add(slot, skb);
445 sfq_inc(q, x);
446 if (slot->qlen == 1) { /* The flow is new */
447 if (q->tail == NULL) { /* It is the first flow */
448 slot->next = x;
449 } else {
450 slot->next = q->tail->next;
451 q->tail->next = x;
453 /* We put this flow at the end of our flow list.
454 * This might sound unfair for a new flow to wait after old ones,
455 * but we could endup servicing new flows only, and freeze old ones.
457 q->tail = slot;
458 /* We could use a bigger initial quantum for new flows */
459 slot->allot = q->scaled_quantum;
461 if (++sch->q.qlen <= q->limit)
462 return NET_XMIT_SUCCESS;
464 qlen = slot->qlen;
465 dropped = sfq_drop(sch, to_free);
466 /* Return Congestion Notification only if we dropped a packet
467 * from this flow.
469 if (qlen != slot->qlen) {
470 qdisc_tree_reduce_backlog(sch, 0, dropped - qdisc_pkt_len(skb));
471 return NET_XMIT_CN;
474 /* As we dropped a packet, better let upper stack know this */
475 qdisc_tree_reduce_backlog(sch, 1, dropped);
476 return NET_XMIT_SUCCESS;
479 static struct sk_buff *
480 sfq_dequeue(struct Qdisc *sch)
482 struct sfq_sched_data *q = qdisc_priv(sch);
483 struct sk_buff *skb;
484 sfq_index a, next_a;
485 struct sfq_slot *slot;
487 /* No active slots */
488 if (q->tail == NULL)
489 return NULL;
491 next_slot:
492 a = q->tail->next;
493 slot = &q->slots[a];
494 if (slot->allot <= 0) {
495 q->tail = slot;
496 slot->allot += q->scaled_quantum;
497 goto next_slot;
499 skb = slot_dequeue_head(slot);
500 sfq_dec(q, a);
501 qdisc_bstats_update(sch, skb);
502 sch->q.qlen--;
503 qdisc_qstats_backlog_dec(sch, skb);
504 slot->backlog -= qdisc_pkt_len(skb);
505 /* Is the slot empty? */
506 if (slot->qlen == 0) {
507 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
508 next_a = slot->next;
509 if (a == next_a) {
510 q->tail = NULL; /* no more active slots */
511 return skb;
513 q->tail->next = next_a;
514 } else {
515 slot->allot -= SFQ_ALLOT_SIZE(qdisc_pkt_len(skb));
517 return skb;
520 static void
521 sfq_reset(struct Qdisc *sch)
523 struct sk_buff *skb;
525 while ((skb = sfq_dequeue(sch)) != NULL)
526 rtnl_kfree_skbs(skb, skb);
530 * When q->perturbation is changed, we rehash all queued skbs
531 * to avoid OOO (Out Of Order) effects.
532 * We dont use sfq_dequeue()/sfq_enqueue() because we dont want to change
533 * counters.
535 static void sfq_rehash(struct Qdisc *sch)
537 struct sfq_sched_data *q = qdisc_priv(sch);
538 struct sk_buff *skb;
539 int i;
540 struct sfq_slot *slot;
541 struct sk_buff_head list;
542 int dropped = 0;
543 unsigned int drop_len = 0;
545 __skb_queue_head_init(&list);
547 for (i = 0; i < q->maxflows; i++) {
548 slot = &q->slots[i];
549 if (!slot->qlen)
550 continue;
551 while (slot->qlen) {
552 skb = slot_dequeue_head(slot);
553 sfq_dec(q, i);
554 __skb_queue_tail(&list, skb);
556 slot->backlog = 0;
557 red_set_vars(&slot->vars);
558 q->ht[slot->hash] = SFQ_EMPTY_SLOT;
560 q->tail = NULL;
562 while ((skb = __skb_dequeue(&list)) != NULL) {
563 unsigned int hash = sfq_hash(q, skb);
564 sfq_index x = q->ht[hash];
566 slot = &q->slots[x];
567 if (x == SFQ_EMPTY_SLOT) {
568 x = q->dep[0].next; /* get a free slot */
569 if (x >= SFQ_MAX_FLOWS) {
570 drop:
571 qdisc_qstats_backlog_dec(sch, skb);
572 drop_len += qdisc_pkt_len(skb);
573 kfree_skb(skb);
574 dropped++;
575 continue;
577 q->ht[hash] = x;
578 slot = &q->slots[x];
579 slot->hash = hash;
581 if (slot->qlen >= q->maxdepth)
582 goto drop;
583 slot_queue_add(slot, skb);
584 if (q->red_parms)
585 slot->vars.qavg = red_calc_qavg(q->red_parms,
586 &slot->vars,
587 slot->backlog);
588 slot->backlog += qdisc_pkt_len(skb);
589 sfq_inc(q, x);
590 if (slot->qlen == 1) { /* The flow is new */
591 if (q->tail == NULL) { /* It is the first flow */
592 slot->next = x;
593 } else {
594 slot->next = q->tail->next;
595 q->tail->next = x;
597 q->tail = slot;
598 slot->allot = q->scaled_quantum;
601 sch->q.qlen -= dropped;
602 qdisc_tree_reduce_backlog(sch, dropped, drop_len);
605 static void sfq_perturbation(struct timer_list *t)
607 struct sfq_sched_data *q = from_timer(q, t, perturb_timer);
608 struct Qdisc *sch = q->sch;
609 spinlock_t *root_lock = qdisc_lock(qdisc_root_sleeping(sch));
610 siphash_key_t nkey;
612 get_random_bytes(&nkey, sizeof(nkey));
613 spin_lock(root_lock);
614 q->perturbation = nkey;
615 if (!q->filter_list && q->tail)
616 sfq_rehash(sch);
617 spin_unlock(root_lock);
619 if (q->perturb_period)
620 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
623 static int sfq_change(struct Qdisc *sch, struct nlattr *opt)
625 struct sfq_sched_data *q = qdisc_priv(sch);
626 struct tc_sfq_qopt *ctl = nla_data(opt);
627 struct tc_sfq_qopt_v1 *ctl_v1 = NULL;
628 unsigned int qlen, dropped = 0;
629 struct red_parms *p = NULL;
630 struct sk_buff *to_free = NULL;
631 struct sk_buff *tail = NULL;
633 if (opt->nla_len < nla_attr_size(sizeof(*ctl)))
634 return -EINVAL;
635 if (opt->nla_len >= nla_attr_size(sizeof(*ctl_v1)))
636 ctl_v1 = nla_data(opt);
637 if (ctl->divisor &&
638 (!is_power_of_2(ctl->divisor) || ctl->divisor > 65536))
639 return -EINVAL;
640 if (ctl_v1 && !red_check_params(ctl_v1->qth_min, ctl_v1->qth_max,
641 ctl_v1->Wlog))
642 return -EINVAL;
643 if (ctl_v1 && ctl_v1->qth_min) {
644 p = kmalloc(sizeof(*p), GFP_KERNEL);
645 if (!p)
646 return -ENOMEM;
648 sch_tree_lock(sch);
649 if (ctl->quantum) {
650 q->quantum = ctl->quantum;
651 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
653 q->perturb_period = ctl->perturb_period * HZ;
654 if (ctl->flows)
655 q->maxflows = min_t(u32, ctl->flows, SFQ_MAX_FLOWS);
656 if (ctl->divisor) {
657 q->divisor = ctl->divisor;
658 q->maxflows = min_t(u32, q->maxflows, q->divisor);
660 if (ctl_v1) {
661 if (ctl_v1->depth)
662 q->maxdepth = min_t(u32, ctl_v1->depth, SFQ_MAX_DEPTH);
663 if (p) {
664 swap(q->red_parms, p);
665 red_set_parms(q->red_parms,
666 ctl_v1->qth_min, ctl_v1->qth_max,
667 ctl_v1->Wlog,
668 ctl_v1->Plog, ctl_v1->Scell_log,
669 NULL,
670 ctl_v1->max_P);
672 q->flags = ctl_v1->flags;
673 q->headdrop = ctl_v1->headdrop;
675 if (ctl->limit) {
676 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows);
677 q->maxflows = min_t(u32, q->maxflows, q->limit);
680 qlen = sch->q.qlen;
681 while (sch->q.qlen > q->limit) {
682 dropped += sfq_drop(sch, &to_free);
683 if (!tail)
684 tail = to_free;
687 rtnl_kfree_skbs(to_free, tail);
688 qdisc_tree_reduce_backlog(sch, qlen - sch->q.qlen, dropped);
690 del_timer(&q->perturb_timer);
691 if (q->perturb_period) {
692 mod_timer(&q->perturb_timer, jiffies + q->perturb_period);
693 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
695 sch_tree_unlock(sch);
696 kfree(p);
697 return 0;
700 static void *sfq_alloc(size_t sz)
702 return kvmalloc(sz, GFP_KERNEL);
705 static void sfq_free(void *addr)
707 kvfree(addr);
710 static void sfq_destroy(struct Qdisc *sch)
712 struct sfq_sched_data *q = qdisc_priv(sch);
714 tcf_block_put(q->block);
715 q->perturb_period = 0;
716 del_timer_sync(&q->perturb_timer);
717 sfq_free(q->ht);
718 sfq_free(q->slots);
719 kfree(q->red_parms);
722 static int sfq_init(struct Qdisc *sch, struct nlattr *opt,
723 struct netlink_ext_ack *extack)
725 struct sfq_sched_data *q = qdisc_priv(sch);
726 int i;
727 int err;
729 q->sch = sch;
730 timer_setup(&q->perturb_timer, sfq_perturbation, TIMER_DEFERRABLE);
732 err = tcf_block_get(&q->block, &q->filter_list, sch, extack);
733 if (err)
734 return err;
736 for (i = 0; i < SFQ_MAX_DEPTH + 1; i++) {
737 q->dep[i].next = i + SFQ_MAX_FLOWS;
738 q->dep[i].prev = i + SFQ_MAX_FLOWS;
741 q->limit = SFQ_MAX_DEPTH;
742 q->maxdepth = SFQ_MAX_DEPTH;
743 q->cur_depth = 0;
744 q->tail = NULL;
745 q->divisor = SFQ_DEFAULT_HASH_DIVISOR;
746 q->maxflows = SFQ_DEFAULT_FLOWS;
747 q->quantum = psched_mtu(qdisc_dev(sch));
748 q->scaled_quantum = SFQ_ALLOT_SIZE(q->quantum);
749 q->perturb_period = 0;
750 get_random_bytes(&q->perturbation, sizeof(q->perturbation));
752 if (opt) {
753 int err = sfq_change(sch, opt);
754 if (err)
755 return err;
758 q->ht = sfq_alloc(sizeof(q->ht[0]) * q->divisor);
759 q->slots = sfq_alloc(sizeof(q->slots[0]) * q->maxflows);
760 if (!q->ht || !q->slots) {
761 /* Note: sfq_destroy() will be called by our caller */
762 return -ENOMEM;
765 for (i = 0; i < q->divisor; i++)
766 q->ht[i] = SFQ_EMPTY_SLOT;
768 for (i = 0; i < q->maxflows; i++) {
769 slot_queue_init(&q->slots[i]);
770 sfq_link(q, i);
772 if (q->limit >= 1)
773 sch->flags |= TCQ_F_CAN_BYPASS;
774 else
775 sch->flags &= ~TCQ_F_CAN_BYPASS;
776 return 0;
779 static int sfq_dump(struct Qdisc *sch, struct sk_buff *skb)
781 struct sfq_sched_data *q = qdisc_priv(sch);
782 unsigned char *b = skb_tail_pointer(skb);
783 struct tc_sfq_qopt_v1 opt;
784 struct red_parms *p = q->red_parms;
786 memset(&opt, 0, sizeof(opt));
787 opt.v0.quantum = q->quantum;
788 opt.v0.perturb_period = q->perturb_period / HZ;
789 opt.v0.limit = q->limit;
790 opt.v0.divisor = q->divisor;
791 opt.v0.flows = q->maxflows;
792 opt.depth = q->maxdepth;
793 opt.headdrop = q->headdrop;
795 if (p) {
796 opt.qth_min = p->qth_min >> p->Wlog;
797 opt.qth_max = p->qth_max >> p->Wlog;
798 opt.Wlog = p->Wlog;
799 opt.Plog = p->Plog;
800 opt.Scell_log = p->Scell_log;
801 opt.max_P = p->max_P;
803 memcpy(&opt.stats, &q->stats, sizeof(opt.stats));
804 opt.flags = q->flags;
806 if (nla_put(skb, TCA_OPTIONS, sizeof(opt), &opt))
807 goto nla_put_failure;
809 return skb->len;
811 nla_put_failure:
812 nlmsg_trim(skb, b);
813 return -1;
816 static struct Qdisc *sfq_leaf(struct Qdisc *sch, unsigned long arg)
818 return NULL;
821 static unsigned long sfq_find(struct Qdisc *sch, u32 classid)
823 return 0;
826 static unsigned long sfq_bind(struct Qdisc *sch, unsigned long parent,
827 u32 classid)
829 return 0;
832 static void sfq_unbind(struct Qdisc *q, unsigned long cl)
836 static struct tcf_block *sfq_tcf_block(struct Qdisc *sch, unsigned long cl,
837 struct netlink_ext_ack *extack)
839 struct sfq_sched_data *q = qdisc_priv(sch);
841 if (cl)
842 return NULL;
843 return q->block;
846 static int sfq_dump_class(struct Qdisc *sch, unsigned long cl,
847 struct sk_buff *skb, struct tcmsg *tcm)
849 tcm->tcm_handle |= TC_H_MIN(cl);
850 return 0;
853 static int sfq_dump_class_stats(struct Qdisc *sch, unsigned long cl,
854 struct gnet_dump *d)
856 struct sfq_sched_data *q = qdisc_priv(sch);
857 sfq_index idx = q->ht[cl - 1];
858 struct gnet_stats_queue qs = { 0 };
859 struct tc_sfq_xstats xstats = { 0 };
861 if (idx != SFQ_EMPTY_SLOT) {
862 const struct sfq_slot *slot = &q->slots[idx];
864 xstats.allot = slot->allot << SFQ_ALLOT_SHIFT;
865 qs.qlen = slot->qlen;
866 qs.backlog = slot->backlog;
868 if (gnet_stats_copy_queue(d, NULL, &qs, qs.qlen) < 0)
869 return -1;
870 return gnet_stats_copy_app(d, &xstats, sizeof(xstats));
873 static void sfq_walk(struct Qdisc *sch, struct qdisc_walker *arg)
875 struct sfq_sched_data *q = qdisc_priv(sch);
876 unsigned int i;
878 if (arg->stop)
879 return;
881 for (i = 0; i < q->divisor; i++) {
882 if (q->ht[i] == SFQ_EMPTY_SLOT ||
883 arg->count < arg->skip) {
884 arg->count++;
885 continue;
887 if (arg->fn(sch, i + 1, arg) < 0) {
888 arg->stop = 1;
889 break;
891 arg->count++;
895 static const struct Qdisc_class_ops sfq_class_ops = {
896 .leaf = sfq_leaf,
897 .find = sfq_find,
898 .tcf_block = sfq_tcf_block,
899 .bind_tcf = sfq_bind,
900 .unbind_tcf = sfq_unbind,
901 .dump = sfq_dump_class,
902 .dump_stats = sfq_dump_class_stats,
903 .walk = sfq_walk,
906 static struct Qdisc_ops sfq_qdisc_ops __read_mostly = {
907 .cl_ops = &sfq_class_ops,
908 .id = "sfq",
909 .priv_size = sizeof(struct sfq_sched_data),
910 .enqueue = sfq_enqueue,
911 .dequeue = sfq_dequeue,
912 .peek = qdisc_peek_dequeued,
913 .init = sfq_init,
914 .reset = sfq_reset,
915 .destroy = sfq_destroy,
916 .change = NULL,
917 .dump = sfq_dump,
918 .owner = THIS_MODULE,
921 static int __init sfq_module_init(void)
923 return register_qdisc(&sfq_qdisc_ops);
925 static void __exit sfq_module_exit(void)
927 unregister_qdisc(&sfq_qdisc_ops);
929 module_init(sfq_module_init)
930 module_exit(sfq_module_exit)
931 MODULE_LICENSE("GPL");