Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / net / ipv4 / ipvs / ip_vs_nq.c
blobbc2a9e5f2a7b41c61610e64489dfa06b078eec5c
1 /*
2 * IPVS: Never Queue scheduling module
4 * Version: $Id: ip_vs_nq.c,v 1.2 2003/06/08 09:31:19 wensong Exp $
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Changes:
18 * The NQ algorithm adopts a two-speed model. When there is an idle server
19 * available, the job will be sent to the idle server, instead of waiting
20 * for a fast one. When there is no idle server available, the job will be
21 * sent to the server that minimize its expected delay (The Shortest
22 * Expected Delay scheduling algorithm).
24 * See the following paper for more information:
25 * A. Weinrib and S. Shenker, Greed is not enough: Adaptive load sharing
26 * in large heterogeneous systems. In Proceedings IEEE INFOCOM'88,
27 * pages 986-994, 1988.
29 * Thanks must go to Marko Buuri <marko@buuri.name> for talking NQ to me.
31 * The difference between NQ and SED is that NQ can improve overall
32 * system utilization.
36 #include <linux/module.h>
37 #include <linux/kernel.h>
39 #include <net/ip_vs.h>
42 static int
43 ip_vs_nq_init_svc(struct ip_vs_service *svc)
45 return 0;
49 static int
50 ip_vs_nq_done_svc(struct ip_vs_service *svc)
52 return 0;
56 static int
57 ip_vs_nq_update_svc(struct ip_vs_service *svc)
59 return 0;
63 static inline unsigned int
64 ip_vs_nq_dest_overhead(struct ip_vs_dest *dest)
67 * We only use the active connection number in the cost
68 * calculation here.
70 return atomic_read(&dest->activeconns) + 1;
75 * Weighted Least Connection scheduling
77 static struct ip_vs_dest *
78 ip_vs_nq_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
80 struct ip_vs_dest *dest, *least = NULL;
81 unsigned int loh = 0, doh;
83 IP_VS_DBG(6, "ip_vs_nq_schedule(): Scheduling...\n");
86 * We calculate the load of each dest server as follows:
87 * (server expected overhead) / dest->weight
89 * Remember -- no floats in kernel mode!!!
90 * The comparison of h1*w2 > h2*w1 is equivalent to that of
91 * h1/w1 > h2/w2
92 * if every weight is larger than zero.
94 * The server with weight=0 is quiesced and will not receive any
95 * new connections.
98 list_for_each_entry(dest, &svc->destinations, n_list) {
100 if (dest->flags & IP_VS_DEST_F_OVERLOAD ||
101 !atomic_read(&dest->weight))
102 continue;
104 doh = ip_vs_nq_dest_overhead(dest);
106 /* return the server directly if it is idle */
107 if (atomic_read(&dest->activeconns) == 0) {
108 least = dest;
109 loh = doh;
110 goto out;
113 if (!least ||
114 (loh * atomic_read(&dest->weight) >
115 doh * atomic_read(&least->weight))) {
116 least = dest;
117 loh = doh;
121 if (!least)
122 return NULL;
124 out:
125 IP_VS_DBG(6, "NQ: server %u.%u.%u.%u:%u "
126 "activeconns %d refcnt %d weight %d overhead %d\n",
127 NIPQUAD(least->addr), ntohs(least->port),
128 atomic_read(&least->activeconns),
129 atomic_read(&least->refcnt),
130 atomic_read(&least->weight), loh);
132 return least;
136 static struct ip_vs_scheduler ip_vs_nq_scheduler =
138 .name = "nq",
139 .refcnt = ATOMIC_INIT(0),
140 .module = THIS_MODULE,
141 .init_service = ip_vs_nq_init_svc,
142 .done_service = ip_vs_nq_done_svc,
143 .update_service = ip_vs_nq_update_svc,
144 .schedule = ip_vs_nq_schedule,
148 static int __init ip_vs_nq_init(void)
150 INIT_LIST_HEAD(&ip_vs_nq_scheduler.n_list);
151 return register_ip_vs_scheduler(&ip_vs_nq_scheduler);
154 static void __exit ip_vs_nq_cleanup(void)
156 unregister_ip_vs_scheduler(&ip_vs_nq_scheduler);
159 module_init(ip_vs_nq_init);
160 module_exit(ip_vs_nq_cleanup);
161 MODULE_LICENSE("GPL");