2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
41 #include <linux/module.h>
42 #include <linux/kernel.h>
43 #include <linux/skbuff.h>
44 #include <linux/jiffies.h>
48 #include <linux/sysctl.h>
49 #include <net/net_namespace.h>
51 #include <net/ip_vs.h>
55 * It is for garbage collection of stale IPVS lblcr entries,
56 * when the table is full.
58 #define CHECK_EXPIRE_INTERVAL (60*HZ)
59 #define ENTRY_TIMEOUT (6*60*HZ)
62 * It is for full expiration check.
63 * When there is no partial expiration check (garbage collection)
64 * in a half hour, do a full expiration check to collect stale
65 * entries that haven't been touched for a day.
67 #define COUNT_FOR_FULL_EXPIRATION 30
68 static int sysctl_ip_vs_lblcr_expiration
= 24*60*60*HZ
;
72 * for IPVS lblcr entry hash table
74 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
75 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
77 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
79 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
83 * IPVS destination set structure and operations
85 struct ip_vs_dest_list
{
86 struct ip_vs_dest_list
*next
; /* list link */
87 struct ip_vs_dest
*dest
; /* destination server */
90 struct ip_vs_dest_set
{
91 atomic_t size
; /* set size */
92 unsigned long lastmod
; /* last modified time */
93 struct ip_vs_dest_list
*list
; /* destination list */
94 rwlock_t lock
; /* lock for this list */
98 static struct ip_vs_dest_list
*
99 ip_vs_dest_set_insert(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
101 struct ip_vs_dest_list
*e
;
103 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
105 /* already existed */
109 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
111 IP_VS_ERR("ip_vs_dest_set_insert(): no memory\n");
115 atomic_inc(&dest
->refcnt
);
118 /* link it to the list */
121 atomic_inc(&set
->size
);
123 set
->lastmod
= jiffies
;
128 ip_vs_dest_set_erase(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
130 struct ip_vs_dest_list
*e
, **ep
;
132 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
133 if (e
->dest
== dest
) {
136 atomic_dec(&set
->size
);
137 set
->lastmod
= jiffies
;
138 atomic_dec(&e
->dest
->refcnt
);
146 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set
*set
)
148 struct ip_vs_dest_list
*e
, **ep
;
150 write_lock(&set
->lock
);
151 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
154 * We don't kfree dest because it is refered either
155 * by its service or by the trash dest list.
157 atomic_dec(&e
->dest
->refcnt
);
160 write_unlock(&set
->lock
);
163 /* get weighted least-connection node in the destination set */
164 static inline struct ip_vs_dest
*ip_vs_dest_set_min(struct ip_vs_dest_set
*set
)
166 register struct ip_vs_dest_list
*e
;
167 struct ip_vs_dest
*dest
, *least
;
173 /* select the first destination server, whose weight > 0 */
174 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
176 if (least
->flags
& IP_VS_DEST_F_OVERLOAD
)
179 if ((atomic_read(&least
->weight
) > 0)
180 && (least
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
181 loh
= atomic_read(&least
->activeconns
) * 50
182 + atomic_read(&least
->inactconns
);
188 /* find the destination with the weighted least load */
190 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
192 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
195 doh
= atomic_read(&dest
->activeconns
) * 50
196 + atomic_read(&dest
->inactconns
);
197 if ((loh
* atomic_read(&dest
->weight
) >
198 doh
* atomic_read(&least
->weight
))
199 && (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
205 IP_VS_DBG(6, "ip_vs_dest_set_min: server %d.%d.%d.%d:%d "
206 "activeconns %d refcnt %d weight %d overhead %d\n",
207 NIPQUAD(least
->addr
), ntohs(least
->port
),
208 atomic_read(&least
->activeconns
),
209 atomic_read(&least
->refcnt
),
210 atomic_read(&least
->weight
), loh
);
215 /* get weighted most-connection node in the destination set */
216 static inline struct ip_vs_dest
*ip_vs_dest_set_max(struct ip_vs_dest_set
*set
)
218 register struct ip_vs_dest_list
*e
;
219 struct ip_vs_dest
*dest
, *most
;
225 /* select the first destination server, whose weight > 0 */
226 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
228 if (atomic_read(&most
->weight
) > 0) {
229 moh
= atomic_read(&most
->activeconns
) * 50
230 + atomic_read(&most
->inactconns
);
236 /* find the destination with the weighted most load */
238 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
240 doh
= atomic_read(&dest
->activeconns
) * 50
241 + atomic_read(&dest
->inactconns
);
242 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
243 if ((moh
* atomic_read(&dest
->weight
) <
244 doh
* atomic_read(&most
->weight
))
245 && (atomic_read(&dest
->weight
) > 0)) {
251 IP_VS_DBG(6, "ip_vs_dest_set_max: server %d.%d.%d.%d:%d "
252 "activeconns %d refcnt %d weight %d overhead %d\n",
253 NIPQUAD(most
->addr
), ntohs(most
->port
),
254 atomic_read(&most
->activeconns
),
255 atomic_read(&most
->refcnt
),
256 atomic_read(&most
->weight
), moh
);
262 * IPVS lblcr entry represents an association between destination
263 * IP address and its destination server set
265 struct ip_vs_lblcr_entry
{
266 struct list_head list
;
267 __be32 addr
; /* destination IP address */
268 struct ip_vs_dest_set set
; /* destination server set */
269 unsigned long lastuse
; /* last used time */
274 * IPVS lblcr hash table
276 struct ip_vs_lblcr_table
{
277 struct list_head bucket
[IP_VS_LBLCR_TAB_SIZE
]; /* hash bucket */
278 atomic_t entries
; /* number of entries */
279 int max_size
; /* maximum size of entries */
280 struct timer_list periodic_timer
; /* collect stale entries */
281 int rover
; /* rover for expire check */
282 int counter
; /* counter for no expire */
287 * IPVS LBLCR sysctl table
290 static ctl_table vs_vars_table
[] = {
292 .procname
= "lblcr_expiration",
293 .data
= &sysctl_ip_vs_lblcr_expiration
,
294 .maxlen
= sizeof(int),
296 .proc_handler
= &proc_dointvec_jiffies
,
301 static struct ctl_table_header
* sysctl_header
;
303 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry
*en
)
306 ip_vs_dest_set_eraseall(&en
->set
);
312 * Returns hash value for IPVS LBLCR entry
314 static inline unsigned ip_vs_lblcr_hashkey(__be32 addr
)
316 return (ntohl(addr
)*2654435761UL) & IP_VS_LBLCR_TAB_MASK
;
321 * Hash an entry in the ip_vs_lblcr_table.
322 * returns bool success.
325 ip_vs_lblcr_hash(struct ip_vs_lblcr_table
*tbl
, struct ip_vs_lblcr_entry
*en
)
327 unsigned hash
= ip_vs_lblcr_hashkey(en
->addr
);
329 list_add(&en
->list
, &tbl
->bucket
[hash
]);
330 atomic_inc(&tbl
->entries
);
335 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
338 static inline struct ip_vs_lblcr_entry
*
339 ip_vs_lblcr_get(struct ip_vs_lblcr_table
*tbl
, __be32 addr
)
341 unsigned hash
= ip_vs_lblcr_hashkey(addr
);
342 struct ip_vs_lblcr_entry
*en
;
344 list_for_each_entry(en
, &tbl
->bucket
[hash
], list
)
345 if (en
->addr
== addr
)
353 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
354 * IP address to a server. Called under write lock.
356 static inline struct ip_vs_lblcr_entry
*
357 ip_vs_lblcr_new(struct ip_vs_lblcr_table
*tbl
, __be32 daddr
,
358 struct ip_vs_dest
*dest
)
360 struct ip_vs_lblcr_entry
*en
;
362 en
= ip_vs_lblcr_get(tbl
, daddr
);
364 en
= kmalloc(sizeof(*en
), GFP_ATOMIC
);
366 IP_VS_ERR("ip_vs_lblcr_new(): no memory\n");
371 en
->lastuse
= jiffies
;
373 /* initilize its dest set */
374 atomic_set(&(en
->set
.size
), 0);
376 rwlock_init(&en
->set
.lock
);
378 ip_vs_lblcr_hash(tbl
, en
);
381 write_lock(&en
->set
.lock
);
382 ip_vs_dest_set_insert(&en
->set
, dest
);
383 write_unlock(&en
->set
.lock
);
390 * Flush all the entries of the specified table.
392 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table
*tbl
)
395 struct ip_vs_lblcr_entry
*en
, *nxt
;
397 /* No locking required, only called during cleanup. */
398 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
399 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[i
], list
) {
400 ip_vs_lblcr_free(en
);
406 static inline void ip_vs_lblcr_full_check(struct ip_vs_service
*svc
)
408 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
409 unsigned long now
= jiffies
;
411 struct ip_vs_lblcr_entry
*en
, *nxt
;
413 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
414 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
416 write_lock(&svc
->sched_lock
);
417 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
418 if (time_after(en
->lastuse
+sysctl_ip_vs_lblcr_expiration
,
422 ip_vs_lblcr_free(en
);
423 atomic_dec(&tbl
->entries
);
425 write_unlock(&svc
->sched_lock
);
432 * Periodical timer handler for IPVS lblcr table
433 * It is used to collect stale entries when the number of entries
434 * exceeds the maximum size of the table.
436 * Fixme: we probably need more complicated algorithm to collect
437 * entries that have not been used for a long time even
438 * if the number of entries doesn't exceed the maximum size
440 * The full expiration check is for this purpose now.
442 static void ip_vs_lblcr_check_expire(unsigned long data
)
444 struct ip_vs_service
*svc
= (struct ip_vs_service
*) data
;
445 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
446 unsigned long now
= jiffies
;
449 struct ip_vs_lblcr_entry
*en
, *nxt
;
451 if ((tbl
->counter
% COUNT_FOR_FULL_EXPIRATION
) == 0) {
452 /* do full expiration check */
453 ip_vs_lblcr_full_check(svc
);
458 if (atomic_read(&tbl
->entries
) <= tbl
->max_size
) {
463 goal
= (atomic_read(&tbl
->entries
) - tbl
->max_size
)*4/3;
464 if (goal
> tbl
->max_size
/2)
465 goal
= tbl
->max_size
/2;
467 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
468 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
470 write_lock(&svc
->sched_lock
);
471 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
472 if (time_before(now
, en
->lastuse
+ENTRY_TIMEOUT
))
475 ip_vs_lblcr_free(en
);
476 atomic_dec(&tbl
->entries
);
479 write_unlock(&svc
->sched_lock
);
486 mod_timer(&tbl
->periodic_timer
, jiffies
+CHECK_EXPIRE_INTERVAL
);
489 static int ip_vs_lblcr_init_svc(struct ip_vs_service
*svc
)
492 struct ip_vs_lblcr_table
*tbl
;
495 * Allocate the ip_vs_lblcr_table for this service
497 tbl
= kmalloc(sizeof(*tbl
), GFP_ATOMIC
);
499 IP_VS_ERR("ip_vs_lblcr_init_svc(): no memory\n");
502 svc
->sched_data
= tbl
;
503 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
504 "current service\n", sizeof(*tbl
));
507 * Initialize the hash buckets
509 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
510 INIT_LIST_HEAD(&tbl
->bucket
[i
]);
512 tbl
->max_size
= IP_VS_LBLCR_TAB_SIZE
*16;
517 * Hook periodic timer for garbage collection
519 setup_timer(&tbl
->periodic_timer
, ip_vs_lblcr_check_expire
,
521 mod_timer(&tbl
->periodic_timer
, jiffies
+ CHECK_EXPIRE_INTERVAL
);
527 static int ip_vs_lblcr_done_svc(struct ip_vs_service
*svc
)
529 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
531 /* remove periodic timer */
532 del_timer_sync(&tbl
->periodic_timer
);
534 /* got to clean up table entries here */
535 ip_vs_lblcr_flush(tbl
);
537 /* release the table itself */
539 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
546 static inline struct ip_vs_dest
*
547 __ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, struct iphdr
*iph
)
549 struct ip_vs_dest
*dest
, *least
;
553 * We think the overhead of processing active connections is fifty
554 * times higher than that of inactive connections in average. (This
555 * fifty times might not be accurate, we will change it later.) We
556 * use the following formula to estimate the overhead:
557 * dest->activeconns*50 + dest->inactconns
559 * (dest overhead) / dest->weight
561 * Remember -- no floats in kernel mode!!!
562 * The comparison of h1*w2 > h2*w1 is equivalent to that of
564 * if every weight is larger than zero.
566 * The server with weight=0 is quiesced and will not receive any
569 list_for_each_entry(dest
, &svc
->destinations
, n_list
) {
570 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
573 if (atomic_read(&dest
->weight
) > 0) {
575 loh
= atomic_read(&least
->activeconns
) * 50
576 + atomic_read(&least
->inactconns
);
583 * Find the destination with the least load.
586 list_for_each_entry_continue(dest
, &svc
->destinations
, n_list
) {
587 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
590 doh
= atomic_read(&dest
->activeconns
) * 50
591 + atomic_read(&dest
->inactconns
);
592 if (loh
* atomic_read(&dest
->weight
) >
593 doh
* atomic_read(&least
->weight
)) {
599 IP_VS_DBG(6, "LBLCR: server %d.%d.%d.%d:%d "
600 "activeconns %d refcnt %d weight %d overhead %d\n",
601 NIPQUAD(least
->addr
), ntohs(least
->port
),
602 atomic_read(&least
->activeconns
),
603 atomic_read(&least
->refcnt
),
604 atomic_read(&least
->weight
), loh
);
611 * If this destination server is overloaded and there is a less loaded
612 * server, then return true.
615 is_overloaded(struct ip_vs_dest
*dest
, struct ip_vs_service
*svc
)
617 if (atomic_read(&dest
->activeconns
) > atomic_read(&dest
->weight
)) {
618 struct ip_vs_dest
*d
;
620 list_for_each_entry(d
, &svc
->destinations
, n_list
) {
621 if (atomic_read(&d
->activeconns
)*2
622 < atomic_read(&d
->weight
)) {
632 * Locality-Based (weighted) Least-Connection scheduling
634 static struct ip_vs_dest
*
635 ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
637 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
638 struct iphdr
*iph
= ip_hdr(skb
);
639 struct ip_vs_dest
*dest
= NULL
;
640 struct ip_vs_lblcr_entry
*en
;
642 IP_VS_DBG(6, "ip_vs_lblcr_schedule(): Scheduling...\n");
644 /* First look in our cache */
645 read_lock(&svc
->sched_lock
);
646 en
= ip_vs_lblcr_get(tbl
, iph
->daddr
);
648 /* We only hold a read lock, but this is atomic */
649 en
->lastuse
= jiffies
;
651 /* Get the least loaded destination */
652 read_lock(&en
->set
.lock
);
653 dest
= ip_vs_dest_set_min(&en
->set
);
654 read_unlock(&en
->set
.lock
);
656 /* More than one destination + enough time passed by, cleanup */
657 if (atomic_read(&en
->set
.size
) > 1 &&
658 time_after(jiffies
, en
->set
.lastmod
+
659 sysctl_ip_vs_lblcr_expiration
)) {
660 struct ip_vs_dest
*m
;
662 write_lock(&en
->set
.lock
);
663 m
= ip_vs_dest_set_max(&en
->set
);
665 ip_vs_dest_set_erase(&en
->set
, m
);
666 write_unlock(&en
->set
.lock
);
669 /* If the destination is not overloaded, use it */
670 if (dest
&& !is_overloaded(dest
, svc
)) {
671 read_unlock(&svc
->sched_lock
);
675 /* The cache entry is invalid, time to schedule */
676 dest
= __ip_vs_lblcr_schedule(svc
, iph
);
678 IP_VS_DBG(1, "no destination available\n");
679 read_unlock(&svc
->sched_lock
);
683 /* Update our cache entry */
684 write_lock(&en
->set
.lock
);
685 ip_vs_dest_set_insert(&en
->set
, dest
);
686 write_unlock(&en
->set
.lock
);
688 read_unlock(&svc
->sched_lock
);
693 /* No cache entry, time to schedule */
694 dest
= __ip_vs_lblcr_schedule(svc
, iph
);
696 IP_VS_DBG(1, "no destination available\n");
700 /* If we fail to create a cache entry, we'll just use the valid dest */
701 write_lock(&svc
->sched_lock
);
702 ip_vs_lblcr_new(tbl
, iph
->daddr
, dest
);
703 write_unlock(&svc
->sched_lock
);
706 IP_VS_DBG(6, "LBLCR: destination IP address %u.%u.%u.%u "
707 "--> server %u.%u.%u.%u:%d\n",
717 * IPVS LBLCR Scheduler structure
719 static struct ip_vs_scheduler ip_vs_lblcr_scheduler
=
722 .refcnt
= ATOMIC_INIT(0),
723 .module
= THIS_MODULE
,
724 .n_list
= LIST_HEAD_INIT(ip_vs_lblcr_scheduler
.n_list
),
725 .init_service
= ip_vs_lblcr_init_svc
,
726 .done_service
= ip_vs_lblcr_done_svc
,
727 .schedule
= ip_vs_lblcr_schedule
,
731 static int __init
ip_vs_lblcr_init(void)
735 sysctl_header
= register_sysctl_paths(net_vs_ctl_path
, vs_vars_table
);
736 ret
= register_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
738 unregister_sysctl_table(sysctl_header
);
743 static void __exit
ip_vs_lblcr_cleanup(void)
745 unregister_sysctl_table(sysctl_header
);
746 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
750 module_init(ip_vs_lblcr_init
);
751 module_exit(ip_vs_lblcr_cleanup
);
752 MODULE_LICENSE("GPL");