2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
51 #include <linux/sysctl.h>
52 #include <net/net_namespace.h>
54 #include <net/ip_vs.h>
58 * It is for garbage collection of stale IPVS lblcr entries,
59 * when the table is full.
61 #define CHECK_EXPIRE_INTERVAL (60*HZ)
62 #define ENTRY_TIMEOUT (6*60*HZ)
65 * It is for full expiration check.
66 * When there is no partial expiration check (garbage collection)
67 * in a half hour, do a full expiration check to collect stale
68 * entries that haven't been touched for a day.
70 #define COUNT_FOR_FULL_EXPIRATION 30
71 static int sysctl_ip_vs_lblcr_expiration
= 24*60*60*HZ
;
75 * for IPVS lblcr entry hash table
77 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
78 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
80 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
81 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
82 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
86 * IPVS destination set structure and operations
88 struct ip_vs_dest_list
{
89 struct ip_vs_dest_list
*next
; /* list link */
90 struct ip_vs_dest
*dest
; /* destination server */
93 struct ip_vs_dest_set
{
94 atomic_t size
; /* set size */
95 unsigned long lastmod
; /* last modified time */
96 struct ip_vs_dest_list
*list
; /* destination list */
97 rwlock_t lock
; /* lock for this list */
101 static struct ip_vs_dest_list
*
102 ip_vs_dest_set_insert(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
104 struct ip_vs_dest_list
*e
;
106 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
108 /* already existed */
112 e
= kmalloc(sizeof(*e
), GFP_ATOMIC
);
114 pr_err("%s(): no memory\n", __func__
);
118 atomic_inc(&dest
->refcnt
);
121 /* link it to the list */
124 atomic_inc(&set
->size
);
126 set
->lastmod
= jiffies
;
131 ip_vs_dest_set_erase(struct ip_vs_dest_set
*set
, struct ip_vs_dest
*dest
)
133 struct ip_vs_dest_list
*e
, **ep
;
135 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
136 if (e
->dest
== dest
) {
139 atomic_dec(&set
->size
);
140 set
->lastmod
= jiffies
;
141 atomic_dec(&e
->dest
->refcnt
);
149 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set
*set
)
151 struct ip_vs_dest_list
*e
, **ep
;
153 write_lock(&set
->lock
);
154 for (ep
=&set
->list
, e
=*ep
; e
!=NULL
; e
=*ep
) {
157 * We don't kfree dest because it is refered either
158 * by its service or by the trash dest list.
160 atomic_dec(&e
->dest
->refcnt
);
163 write_unlock(&set
->lock
);
166 /* get weighted least-connection node in the destination set */
167 static inline struct ip_vs_dest
*ip_vs_dest_set_min(struct ip_vs_dest_set
*set
)
169 register struct ip_vs_dest_list
*e
;
170 struct ip_vs_dest
*dest
, *least
;
176 /* select the first destination server, whose weight > 0 */
177 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
179 if (least
->flags
& IP_VS_DEST_F_OVERLOAD
)
182 if ((atomic_read(&least
->weight
) > 0)
183 && (least
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
184 loh
= atomic_read(&least
->activeconns
) * 50
185 + atomic_read(&least
->inactconns
);
191 /* find the destination with the weighted least load */
193 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
195 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
198 doh
= atomic_read(&dest
->activeconns
) * 50
199 + atomic_read(&dest
->inactconns
);
200 if ((loh
* atomic_read(&dest
->weight
) >
201 doh
* atomic_read(&least
->weight
))
202 && (dest
->flags
& IP_VS_DEST_F_AVAILABLE
)) {
208 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
209 "activeconns %d refcnt %d weight %d overhead %d\n",
211 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
213 atomic_read(&least
->activeconns
),
214 atomic_read(&least
->refcnt
),
215 atomic_read(&least
->weight
), loh
);
220 /* get weighted most-connection node in the destination set */
221 static inline struct ip_vs_dest
*ip_vs_dest_set_max(struct ip_vs_dest_set
*set
)
223 register struct ip_vs_dest_list
*e
;
224 struct ip_vs_dest
*dest
, *most
;
230 /* select the first destination server, whose weight > 0 */
231 for (e
=set
->list
; e
!=NULL
; e
=e
->next
) {
233 if (atomic_read(&most
->weight
) > 0) {
234 moh
= atomic_read(&most
->activeconns
) * 50
235 + atomic_read(&most
->inactconns
);
241 /* find the destination with the weighted most load */
243 for (e
=e
->next
; e
!=NULL
; e
=e
->next
) {
245 doh
= atomic_read(&dest
->activeconns
) * 50
246 + atomic_read(&dest
->inactconns
);
247 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
248 if ((moh
* atomic_read(&dest
->weight
) <
249 doh
* atomic_read(&most
->weight
))
250 && (atomic_read(&dest
->weight
) > 0)) {
256 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
257 "activeconns %d refcnt %d weight %d overhead %d\n",
259 IP_VS_DBG_ADDR(most
->af
, &most
->addr
), ntohs(most
->port
),
260 atomic_read(&most
->activeconns
),
261 atomic_read(&most
->refcnt
),
262 atomic_read(&most
->weight
), moh
);
268 * IPVS lblcr entry represents an association between destination
269 * IP address and its destination server set
271 struct ip_vs_lblcr_entry
{
272 struct list_head list
;
273 int af
; /* address family */
274 union nf_inet_addr addr
; /* destination IP address */
275 struct ip_vs_dest_set set
; /* destination server set */
276 unsigned long lastuse
; /* last used time */
281 * IPVS lblcr hash table
283 struct ip_vs_lblcr_table
{
284 struct list_head bucket
[IP_VS_LBLCR_TAB_SIZE
]; /* hash bucket */
285 atomic_t entries
; /* number of entries */
286 int max_size
; /* maximum size of entries */
287 struct timer_list periodic_timer
; /* collect stale entries */
288 int rover
; /* rover for expire check */
289 int counter
; /* counter for no expire */
294 * IPVS LBLCR sysctl table
297 static ctl_table vs_vars_table
[] = {
299 .procname
= "lblcr_expiration",
300 .data
= &sysctl_ip_vs_lblcr_expiration
,
301 .maxlen
= sizeof(int),
303 .proc_handler
= proc_dointvec_jiffies
,
308 static struct ctl_table_header
* sysctl_header
;
310 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry
*en
)
313 ip_vs_dest_set_eraseall(&en
->set
);
319 * Returns hash value for IPVS LBLCR entry
321 static inline unsigned
322 ip_vs_lblcr_hashkey(int af
, const union nf_inet_addr
*addr
)
324 __be32 addr_fold
= addr
->ip
;
326 #ifdef CONFIG_IP_VS_IPV6
328 addr_fold
= addr
->ip6
[0]^addr
->ip6
[1]^
329 addr
->ip6
[2]^addr
->ip6
[3];
331 return (ntohl(addr_fold
)*2654435761UL) & IP_VS_LBLCR_TAB_MASK
;
336 * Hash an entry in the ip_vs_lblcr_table.
337 * returns bool success.
340 ip_vs_lblcr_hash(struct ip_vs_lblcr_table
*tbl
, struct ip_vs_lblcr_entry
*en
)
342 unsigned hash
= ip_vs_lblcr_hashkey(en
->af
, &en
->addr
);
344 list_add(&en
->list
, &tbl
->bucket
[hash
]);
345 atomic_inc(&tbl
->entries
);
350 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
353 static inline struct ip_vs_lblcr_entry
*
354 ip_vs_lblcr_get(int af
, struct ip_vs_lblcr_table
*tbl
,
355 const union nf_inet_addr
*addr
)
357 unsigned hash
= ip_vs_lblcr_hashkey(af
, addr
);
358 struct ip_vs_lblcr_entry
*en
;
360 list_for_each_entry(en
, &tbl
->bucket
[hash
], list
)
361 if (ip_vs_addr_equal(af
, &en
->addr
, addr
))
369 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
370 * IP address to a server. Called under write lock.
372 static inline struct ip_vs_lblcr_entry
*
373 ip_vs_lblcr_new(struct ip_vs_lblcr_table
*tbl
, const union nf_inet_addr
*daddr
,
374 struct ip_vs_dest
*dest
)
376 struct ip_vs_lblcr_entry
*en
;
378 en
= ip_vs_lblcr_get(dest
->af
, tbl
, daddr
);
380 en
= kmalloc(sizeof(*en
), GFP_ATOMIC
);
382 pr_err("%s(): no memory\n", __func__
);
387 ip_vs_addr_copy(dest
->af
, &en
->addr
, daddr
);
388 en
->lastuse
= jiffies
;
390 /* initilize its dest set */
391 atomic_set(&(en
->set
.size
), 0);
393 rwlock_init(&en
->set
.lock
);
395 ip_vs_lblcr_hash(tbl
, en
);
398 write_lock(&en
->set
.lock
);
399 ip_vs_dest_set_insert(&en
->set
, dest
);
400 write_unlock(&en
->set
.lock
);
407 * Flush all the entries of the specified table.
409 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table
*tbl
)
412 struct ip_vs_lblcr_entry
*en
, *nxt
;
414 /* No locking required, only called during cleanup. */
415 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
416 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[i
], list
) {
417 ip_vs_lblcr_free(en
);
423 static inline void ip_vs_lblcr_full_check(struct ip_vs_service
*svc
)
425 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
426 unsigned long now
= jiffies
;
428 struct ip_vs_lblcr_entry
*en
, *nxt
;
430 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
431 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
433 write_lock(&svc
->sched_lock
);
434 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
435 if (time_after(en
->lastuse
+sysctl_ip_vs_lblcr_expiration
,
439 ip_vs_lblcr_free(en
);
440 atomic_dec(&tbl
->entries
);
442 write_unlock(&svc
->sched_lock
);
449 * Periodical timer handler for IPVS lblcr table
450 * It is used to collect stale entries when the number of entries
451 * exceeds the maximum size of the table.
453 * Fixme: we probably need more complicated algorithm to collect
454 * entries that have not been used for a long time even
455 * if the number of entries doesn't exceed the maximum size
457 * The full expiration check is for this purpose now.
459 static void ip_vs_lblcr_check_expire(unsigned long data
)
461 struct ip_vs_service
*svc
= (struct ip_vs_service
*) data
;
462 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
463 unsigned long now
= jiffies
;
466 struct ip_vs_lblcr_entry
*en
, *nxt
;
468 if ((tbl
->counter
% COUNT_FOR_FULL_EXPIRATION
) == 0) {
469 /* do full expiration check */
470 ip_vs_lblcr_full_check(svc
);
475 if (atomic_read(&tbl
->entries
) <= tbl
->max_size
) {
480 goal
= (atomic_read(&tbl
->entries
) - tbl
->max_size
)*4/3;
481 if (goal
> tbl
->max_size
/2)
482 goal
= tbl
->max_size
/2;
484 for (i
=0, j
=tbl
->rover
; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
485 j
= (j
+ 1) & IP_VS_LBLCR_TAB_MASK
;
487 write_lock(&svc
->sched_lock
);
488 list_for_each_entry_safe(en
, nxt
, &tbl
->bucket
[j
], list
) {
489 if (time_before(now
, en
->lastuse
+ENTRY_TIMEOUT
))
492 ip_vs_lblcr_free(en
);
493 atomic_dec(&tbl
->entries
);
496 write_unlock(&svc
->sched_lock
);
503 mod_timer(&tbl
->periodic_timer
, jiffies
+CHECK_EXPIRE_INTERVAL
);
506 static int ip_vs_lblcr_init_svc(struct ip_vs_service
*svc
)
509 struct ip_vs_lblcr_table
*tbl
;
512 * Allocate the ip_vs_lblcr_table for this service
514 tbl
= kmalloc(sizeof(*tbl
), GFP_ATOMIC
);
516 pr_err("%s(): no memory\n", __func__
);
519 svc
->sched_data
= tbl
;
520 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
521 "current service\n", sizeof(*tbl
));
524 * Initialize the hash buckets
526 for (i
=0; i
<IP_VS_LBLCR_TAB_SIZE
; i
++) {
527 INIT_LIST_HEAD(&tbl
->bucket
[i
]);
529 tbl
->max_size
= IP_VS_LBLCR_TAB_SIZE
*16;
534 * Hook periodic timer for garbage collection
536 setup_timer(&tbl
->periodic_timer
, ip_vs_lblcr_check_expire
,
538 mod_timer(&tbl
->periodic_timer
, jiffies
+ CHECK_EXPIRE_INTERVAL
);
544 static int ip_vs_lblcr_done_svc(struct ip_vs_service
*svc
)
546 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
548 /* remove periodic timer */
549 del_timer_sync(&tbl
->periodic_timer
);
551 /* got to clean up table entries here */
552 ip_vs_lblcr_flush(tbl
);
554 /* release the table itself */
556 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
563 static inline struct ip_vs_dest
*
564 __ip_vs_lblcr_schedule(struct ip_vs_service
*svc
)
566 struct ip_vs_dest
*dest
, *least
;
570 * We think the overhead of processing active connections is fifty
571 * times higher than that of inactive connections in average. (This
572 * fifty times might not be accurate, we will change it later.) We
573 * use the following formula to estimate the overhead:
574 * dest->activeconns*50 + dest->inactconns
576 * (dest overhead) / dest->weight
578 * Remember -- no floats in kernel mode!!!
579 * The comparison of h1*w2 > h2*w1 is equivalent to that of
581 * if every weight is larger than zero.
583 * The server with weight=0 is quiesced and will not receive any
586 list_for_each_entry(dest
, &svc
->destinations
, n_list
) {
587 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
590 if (atomic_read(&dest
->weight
) > 0) {
592 loh
= atomic_read(&least
->activeconns
) * 50
593 + atomic_read(&least
->inactconns
);
600 * Find the destination with the least load.
603 list_for_each_entry_continue(dest
, &svc
->destinations
, n_list
) {
604 if (dest
->flags
& IP_VS_DEST_F_OVERLOAD
)
607 doh
= atomic_read(&dest
->activeconns
) * 50
608 + atomic_read(&dest
->inactconns
);
609 if (loh
* atomic_read(&dest
->weight
) >
610 doh
* atomic_read(&least
->weight
)) {
616 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
617 "activeconns %d refcnt %d weight %d overhead %d\n",
618 IP_VS_DBG_ADDR(least
->af
, &least
->addr
),
620 atomic_read(&least
->activeconns
),
621 atomic_read(&least
->refcnt
),
622 atomic_read(&least
->weight
), loh
);
629 * If this destination server is overloaded and there is a less loaded
630 * server, then return true.
633 is_overloaded(struct ip_vs_dest
*dest
, struct ip_vs_service
*svc
)
635 if (atomic_read(&dest
->activeconns
) > atomic_read(&dest
->weight
)) {
636 struct ip_vs_dest
*d
;
638 list_for_each_entry(d
, &svc
->destinations
, n_list
) {
639 if (atomic_read(&d
->activeconns
)*2
640 < atomic_read(&d
->weight
)) {
650 * Locality-Based (weighted) Least-Connection scheduling
652 static struct ip_vs_dest
*
653 ip_vs_lblcr_schedule(struct ip_vs_service
*svc
, const struct sk_buff
*skb
)
655 struct ip_vs_lblcr_table
*tbl
= svc
->sched_data
;
656 struct ip_vs_iphdr iph
;
657 struct ip_vs_dest
*dest
= NULL
;
658 struct ip_vs_lblcr_entry
*en
;
660 ip_vs_fill_iphdr(svc
->af
, skb_network_header(skb
), &iph
);
662 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__
);
664 /* First look in our cache */
665 read_lock(&svc
->sched_lock
);
666 en
= ip_vs_lblcr_get(svc
->af
, tbl
, &iph
.daddr
);
668 /* We only hold a read lock, but this is atomic */
669 en
->lastuse
= jiffies
;
671 /* Get the least loaded destination */
672 read_lock(&en
->set
.lock
);
673 dest
= ip_vs_dest_set_min(&en
->set
);
674 read_unlock(&en
->set
.lock
);
676 /* More than one destination + enough time passed by, cleanup */
677 if (atomic_read(&en
->set
.size
) > 1 &&
678 time_after(jiffies
, en
->set
.lastmod
+
679 sysctl_ip_vs_lblcr_expiration
)) {
680 struct ip_vs_dest
*m
;
682 write_lock(&en
->set
.lock
);
683 m
= ip_vs_dest_set_max(&en
->set
);
685 ip_vs_dest_set_erase(&en
->set
, m
);
686 write_unlock(&en
->set
.lock
);
689 /* If the destination is not overloaded, use it */
690 if (dest
&& !is_overloaded(dest
, svc
)) {
691 read_unlock(&svc
->sched_lock
);
695 /* The cache entry is invalid, time to schedule */
696 dest
= __ip_vs_lblcr_schedule(svc
);
698 IP_VS_ERR_RL("LBLCR: no destination available\n");
699 read_unlock(&svc
->sched_lock
);
703 /* Update our cache entry */
704 write_lock(&en
->set
.lock
);
705 ip_vs_dest_set_insert(&en
->set
, dest
);
706 write_unlock(&en
->set
.lock
);
708 read_unlock(&svc
->sched_lock
);
713 /* No cache entry, time to schedule */
714 dest
= __ip_vs_lblcr_schedule(svc
);
716 IP_VS_DBG(1, "no destination available\n");
720 /* If we fail to create a cache entry, we'll just use the valid dest */
721 write_lock(&svc
->sched_lock
);
722 ip_vs_lblcr_new(tbl
, &iph
.daddr
, dest
);
723 write_unlock(&svc
->sched_lock
);
726 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
727 IP_VS_DBG_ADDR(svc
->af
, &iph
.daddr
),
728 IP_VS_DBG_ADDR(svc
->af
, &dest
->addr
), ntohs(dest
->port
));
735 * IPVS LBLCR Scheduler structure
737 static struct ip_vs_scheduler ip_vs_lblcr_scheduler
=
740 .refcnt
= ATOMIC_INIT(0),
741 .module
= THIS_MODULE
,
742 .n_list
= LIST_HEAD_INIT(ip_vs_lblcr_scheduler
.n_list
),
743 .init_service
= ip_vs_lblcr_init_svc
,
744 .done_service
= ip_vs_lblcr_done_svc
,
745 .schedule
= ip_vs_lblcr_schedule
,
749 static int __init
ip_vs_lblcr_init(void)
753 sysctl_header
= register_sysctl_paths(net_vs_ctl_path
, vs_vars_table
);
754 ret
= register_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
756 unregister_sysctl_table(sysctl_header
);
761 static void __exit
ip_vs_lblcr_cleanup(void)
763 unregister_sysctl_table(sysctl_header
);
764 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler
);
768 module_init(ip_vs_lblcr_init
);
769 module_exit(ip_vs_lblcr_cleanup
);
770 MODULE_LICENSE("GPL");