user per registers vs. ptrace single stepping
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / netfilter / ipvs / ip_vs_lblcr.c
blob90f618ab6ddac3b04fe118c3573cf0fad779f744
1 /*
2 * IPVS: Locality-Based Least-Connection with Replication scheduler
4 * Authors: Wensong Zhang <wensong@gnuchina.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Changes:
12 * Julian Anastasov : Added the missing (dest->weight>0)
13 * condition in the ip_vs_dest_set_max.
18 * The lblc/r algorithm is as follows (pseudo code):
20 * if serverSet[dest_ip] is null then
21 * n, serverSet[dest_ip] <- {weighted least-conn node};
22 * else
23 * n <- {least-conn (alive) node in serverSet[dest_ip]};
24 * if (n is null) OR
25 * (n.conns>n.weight AND
26 * there is a node m with m.conns<m.weight/2) then
27 * n <- {weighted least-conn node};
28 * add n to serverSet[dest_ip];
29 * if |serverSet[dest_ip]| > 1 AND
30 * now - serverSet[dest_ip].lastMod > T then
31 * m <- {most conn node in serverSet[dest_ip]};
32 * remove m from serverSet[dest_ip];
33 * if serverSet[dest_ip] changed then
34 * serverSet[dest_ip].lastMod <- now;
36 * return n;
40 #define KMSG_COMPONENT "IPVS"
41 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
43 #include <linux/ip.h>
44 #include <linux/module.h>
45 #include <linux/kernel.h>
46 #include <linux/skbuff.h>
47 #include <linux/jiffies.h>
48 #include <linux/list.h>
49 #include <linux/slab.h>
51 /* for sysctl */
52 #include <linux/fs.h>
53 #include <linux/sysctl.h>
54 #include <net/net_namespace.h>
56 #include <net/ip_vs.h>
60 * It is for garbage collection of stale IPVS lblcr entries,
61 * when the table is full.
63 #define CHECK_EXPIRE_INTERVAL (60*HZ)
64 #define ENTRY_TIMEOUT (6*60*HZ)
66 #define DEFAULT_EXPIRATION (24*60*60*HZ)
69 * It is for full expiration check.
70 * When there is no partial expiration check (garbage collection)
71 * in a half hour, do a full expiration check to collect stale
72 * entries that haven't been touched for a day.
74 #define COUNT_FOR_FULL_EXPIRATION 30
77 * for IPVS lblcr entry hash table
79 #ifndef CONFIG_IP_VS_LBLCR_TAB_BITS
80 #define CONFIG_IP_VS_LBLCR_TAB_BITS 10
81 #endif
82 #define IP_VS_LBLCR_TAB_BITS CONFIG_IP_VS_LBLCR_TAB_BITS
83 #define IP_VS_LBLCR_TAB_SIZE (1 << IP_VS_LBLCR_TAB_BITS)
84 #define IP_VS_LBLCR_TAB_MASK (IP_VS_LBLCR_TAB_SIZE - 1)
88 * IPVS destination set structure and operations
90 struct ip_vs_dest_set_elem {
91 struct list_head list; /* list link */
92 struct ip_vs_dest *dest; /* destination server */
95 struct ip_vs_dest_set {
96 atomic_t size; /* set size */
97 unsigned long lastmod; /* last modified time */
98 struct list_head list; /* destination list */
99 rwlock_t lock; /* lock for this list */
103 static struct ip_vs_dest_set_elem *
104 ip_vs_dest_set_insert(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
106 struct ip_vs_dest_set_elem *e;
108 list_for_each_entry(e, &set->list, list) {
109 if (e->dest == dest)
110 /* already existed */
111 return NULL;
114 e = kmalloc(sizeof(*e), GFP_ATOMIC);
115 if (e == NULL) {
116 pr_err("%s(): no memory\n", __func__);
117 return NULL;
120 atomic_inc(&dest->refcnt);
121 e->dest = dest;
123 list_add(&e->list, &set->list);
124 atomic_inc(&set->size);
126 set->lastmod = jiffies;
127 return e;
130 static void
131 ip_vs_dest_set_erase(struct ip_vs_dest_set *set, struct ip_vs_dest *dest)
133 struct ip_vs_dest_set_elem *e;
135 list_for_each_entry(e, &set->list, list) {
136 if (e->dest == dest) {
137 /* HIT */
138 atomic_dec(&set->size);
139 set->lastmod = jiffies;
140 atomic_dec(&e->dest->refcnt);
141 list_del(&e->list);
142 kfree(e);
143 break;
148 static void ip_vs_dest_set_eraseall(struct ip_vs_dest_set *set)
150 struct ip_vs_dest_set_elem *e, *ep;
152 write_lock(&set->lock);
153 list_for_each_entry_safe(e, ep, &set->list, list) {
155 * We don't kfree dest because it is referred either
156 * by its service or by the trash dest list.
158 atomic_dec(&e->dest->refcnt);
159 list_del(&e->list);
160 kfree(e);
162 write_unlock(&set->lock);
165 /* get weighted least-connection node in the destination set */
166 static inline struct ip_vs_dest *ip_vs_dest_set_min(struct ip_vs_dest_set *set)
168 register struct ip_vs_dest_set_elem *e;
169 struct ip_vs_dest *dest, *least;
170 int loh, doh;
172 if (set == NULL)
173 return NULL;
175 /* select the first destination server, whose weight > 0 */
176 list_for_each_entry(e, &set->list, list) {
177 least = e->dest;
178 if (least->flags & IP_VS_DEST_F_OVERLOAD)
179 continue;
181 if ((atomic_read(&least->weight) > 0)
182 && (least->flags & IP_VS_DEST_F_AVAILABLE)) {
183 loh = ip_vs_dest_conn_overhead(least);
184 goto nextstage;
187 return NULL;
189 /* find the destination with the weighted least load */
190 nextstage:
191 list_for_each_entry(e, &set->list, list) {
192 dest = e->dest;
193 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
194 continue;
196 doh = ip_vs_dest_conn_overhead(dest);
197 if ((loh * atomic_read(&dest->weight) >
198 doh * atomic_read(&least->weight))
199 && (dest->flags & IP_VS_DEST_F_AVAILABLE)) {
200 least = dest;
201 loh = doh;
205 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
206 "activeconns %d refcnt %d weight %d overhead %d\n",
207 __func__,
208 IP_VS_DBG_ADDR(least->af, &least->addr),
209 ntohs(least->port),
210 atomic_read(&least->activeconns),
211 atomic_read(&least->refcnt),
212 atomic_read(&least->weight), loh);
213 return least;
217 /* get weighted most-connection node in the destination set */
218 static inline struct ip_vs_dest *ip_vs_dest_set_max(struct ip_vs_dest_set *set)
220 register struct ip_vs_dest_set_elem *e;
221 struct ip_vs_dest *dest, *most;
222 int moh, doh;
224 if (set == NULL)
225 return NULL;
227 /* select the first destination server, whose weight > 0 */
228 list_for_each_entry(e, &set->list, list) {
229 most = e->dest;
230 if (atomic_read(&most->weight) > 0) {
231 moh = ip_vs_dest_conn_overhead(most);
232 goto nextstage;
235 return NULL;
237 /* find the destination with the weighted most load */
238 nextstage:
239 list_for_each_entry(e, &set->list, list) {
240 dest = e->dest;
241 doh = ip_vs_dest_conn_overhead(dest);
242 /* moh/mw < doh/dw ==> moh*dw < doh*mw, where mw,dw>0 */
243 if ((moh * atomic_read(&dest->weight) <
244 doh * atomic_read(&most->weight))
245 && (atomic_read(&dest->weight) > 0)) {
246 most = dest;
247 moh = doh;
251 IP_VS_DBG_BUF(6, "%s(): server %s:%d "
252 "activeconns %d refcnt %d weight %d overhead %d\n",
253 __func__,
254 IP_VS_DBG_ADDR(most->af, &most->addr), ntohs(most->port),
255 atomic_read(&most->activeconns),
256 atomic_read(&most->refcnt),
257 atomic_read(&most->weight), moh);
258 return most;
263 * IPVS lblcr entry represents an association between destination
264 * IP address and its destination server set
266 struct ip_vs_lblcr_entry {
267 struct list_head list;
268 int af; /* address family */
269 union nf_inet_addr addr; /* destination IP address */
270 struct ip_vs_dest_set set; /* destination server set */
271 unsigned long lastuse; /* last used time */
276 * IPVS lblcr hash table
278 struct ip_vs_lblcr_table {
279 struct list_head bucket[IP_VS_LBLCR_TAB_SIZE]; /* hash bucket */
280 atomic_t entries; /* number of entries */
281 int max_size; /* maximum size of entries */
282 struct timer_list periodic_timer; /* collect stale entries */
283 int rover; /* rover for expire check */
284 int counter; /* counter for no expire */
288 #ifdef CONFIG_SYSCTL
290 * IPVS LBLCR sysctl table
293 static ctl_table vs_vars_table[] = {
295 .procname = "lblcr_expiration",
296 .data = NULL,
297 .maxlen = sizeof(int),
298 .mode = 0644,
299 .proc_handler = proc_dointvec_jiffies,
303 #endif
305 static inline void ip_vs_lblcr_free(struct ip_vs_lblcr_entry *en)
307 list_del(&en->list);
308 ip_vs_dest_set_eraseall(&en->set);
309 kfree(en);
314 * Returns hash value for IPVS LBLCR entry
316 static inline unsigned
317 ip_vs_lblcr_hashkey(int af, const union nf_inet_addr *addr)
319 __be32 addr_fold = addr->ip;
321 #ifdef CONFIG_IP_VS_IPV6
322 if (af == AF_INET6)
323 addr_fold = addr->ip6[0]^addr->ip6[1]^
324 addr->ip6[2]^addr->ip6[3];
325 #endif
326 return (ntohl(addr_fold)*2654435761UL) & IP_VS_LBLCR_TAB_MASK;
331 * Hash an entry in the ip_vs_lblcr_table.
332 * returns bool success.
334 static void
335 ip_vs_lblcr_hash(struct ip_vs_lblcr_table *tbl, struct ip_vs_lblcr_entry *en)
337 unsigned hash = ip_vs_lblcr_hashkey(en->af, &en->addr);
339 list_add(&en->list, &tbl->bucket[hash]);
340 atomic_inc(&tbl->entries);
345 * Get ip_vs_lblcr_entry associated with supplied parameters. Called under
346 * read lock.
348 static inline struct ip_vs_lblcr_entry *
349 ip_vs_lblcr_get(int af, struct ip_vs_lblcr_table *tbl,
350 const union nf_inet_addr *addr)
352 unsigned hash = ip_vs_lblcr_hashkey(af, addr);
353 struct ip_vs_lblcr_entry *en;
355 list_for_each_entry(en, &tbl->bucket[hash], list)
356 if (ip_vs_addr_equal(af, &en->addr, addr))
357 return en;
359 return NULL;
364 * Create or update an ip_vs_lblcr_entry, which is a mapping of a destination
365 * IP address to a server. Called under write lock.
367 static inline struct ip_vs_lblcr_entry *
368 ip_vs_lblcr_new(struct ip_vs_lblcr_table *tbl, const union nf_inet_addr *daddr,
369 struct ip_vs_dest *dest)
371 struct ip_vs_lblcr_entry *en;
373 en = ip_vs_lblcr_get(dest->af, tbl, daddr);
374 if (!en) {
375 en = kmalloc(sizeof(*en), GFP_ATOMIC);
376 if (!en) {
377 pr_err("%s(): no memory\n", __func__);
378 return NULL;
381 en->af = dest->af;
382 ip_vs_addr_copy(dest->af, &en->addr, daddr);
383 en->lastuse = jiffies;
385 /* initialize its dest set */
386 atomic_set(&(en->set.size), 0);
387 INIT_LIST_HEAD(&en->set.list);
388 rwlock_init(&en->set.lock);
390 ip_vs_lblcr_hash(tbl, en);
393 write_lock(&en->set.lock);
394 ip_vs_dest_set_insert(&en->set, dest);
395 write_unlock(&en->set.lock);
397 return en;
402 * Flush all the entries of the specified table.
404 static void ip_vs_lblcr_flush(struct ip_vs_lblcr_table *tbl)
406 int i;
407 struct ip_vs_lblcr_entry *en, *nxt;
409 /* No locking required, only called during cleanup. */
410 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
411 list_for_each_entry_safe(en, nxt, &tbl->bucket[i], list) {
412 ip_vs_lblcr_free(en);
417 static int sysctl_lblcr_expiration(struct ip_vs_service *svc)
419 #ifdef CONFIG_SYSCTL
420 struct netns_ipvs *ipvs = net_ipvs(svc->net);
421 return ipvs->sysctl_lblcr_expiration;
422 #else
423 return DEFAULT_EXPIRATION;
424 #endif
427 static inline void ip_vs_lblcr_full_check(struct ip_vs_service *svc)
429 struct ip_vs_lblcr_table *tbl = svc->sched_data;
430 unsigned long now = jiffies;
431 int i, j;
432 struct ip_vs_lblcr_entry *en, *nxt;
434 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
435 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
437 write_lock(&svc->sched_lock);
438 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
439 if (time_after(en->lastuse +
440 sysctl_lblcr_expiration(svc), now))
441 continue;
443 ip_vs_lblcr_free(en);
444 atomic_dec(&tbl->entries);
446 write_unlock(&svc->sched_lock);
448 tbl->rover = j;
453 * Periodical timer handler for IPVS lblcr table
454 * It is used to collect stale entries when the number of entries
455 * exceeds the maximum size of the table.
457 * Fixme: we probably need more complicated algorithm to collect
458 * entries that have not been used for a long time even
459 * if the number of entries doesn't exceed the maximum size
460 * of the table.
461 * The full expiration check is for this purpose now.
463 static void ip_vs_lblcr_check_expire(unsigned long data)
465 struct ip_vs_service *svc = (struct ip_vs_service *) data;
466 struct ip_vs_lblcr_table *tbl = svc->sched_data;
467 unsigned long now = jiffies;
468 int goal;
469 int i, j;
470 struct ip_vs_lblcr_entry *en, *nxt;
472 if ((tbl->counter % COUNT_FOR_FULL_EXPIRATION) == 0) {
473 /* do full expiration check */
474 ip_vs_lblcr_full_check(svc);
475 tbl->counter = 1;
476 goto out;
479 if (atomic_read(&tbl->entries) <= tbl->max_size) {
480 tbl->counter++;
481 goto out;
484 goal = (atomic_read(&tbl->entries) - tbl->max_size)*4/3;
485 if (goal > tbl->max_size/2)
486 goal = tbl->max_size/2;
488 for (i=0, j=tbl->rover; i<IP_VS_LBLCR_TAB_SIZE; i++) {
489 j = (j + 1) & IP_VS_LBLCR_TAB_MASK;
491 write_lock(&svc->sched_lock);
492 list_for_each_entry_safe(en, nxt, &tbl->bucket[j], list) {
493 if (time_before(now, en->lastuse+ENTRY_TIMEOUT))
494 continue;
496 ip_vs_lblcr_free(en);
497 atomic_dec(&tbl->entries);
498 goal--;
500 write_unlock(&svc->sched_lock);
501 if (goal <= 0)
502 break;
504 tbl->rover = j;
506 out:
507 mod_timer(&tbl->periodic_timer, jiffies+CHECK_EXPIRE_INTERVAL);
510 static int ip_vs_lblcr_init_svc(struct ip_vs_service *svc)
512 int i;
513 struct ip_vs_lblcr_table *tbl;
516 * Allocate the ip_vs_lblcr_table for this service
518 tbl = kmalloc(sizeof(*tbl), GFP_ATOMIC);
519 if (tbl == NULL) {
520 pr_err("%s(): no memory\n", __func__);
521 return -ENOMEM;
523 svc->sched_data = tbl;
524 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) allocated for "
525 "current service\n", sizeof(*tbl));
528 * Initialize the hash buckets
530 for (i=0; i<IP_VS_LBLCR_TAB_SIZE; i++) {
531 INIT_LIST_HEAD(&tbl->bucket[i]);
533 tbl->max_size = IP_VS_LBLCR_TAB_SIZE*16;
534 tbl->rover = 0;
535 tbl->counter = 1;
538 * Hook periodic timer for garbage collection
540 setup_timer(&tbl->periodic_timer, ip_vs_lblcr_check_expire,
541 (unsigned long)svc);
542 mod_timer(&tbl->periodic_timer, jiffies + CHECK_EXPIRE_INTERVAL);
544 return 0;
548 static int ip_vs_lblcr_done_svc(struct ip_vs_service *svc)
550 struct ip_vs_lblcr_table *tbl = svc->sched_data;
552 /* remove periodic timer */
553 del_timer_sync(&tbl->periodic_timer);
555 /* got to clean up table entries here */
556 ip_vs_lblcr_flush(tbl);
558 /* release the table itself */
559 kfree(tbl);
560 IP_VS_DBG(6, "LBLCR hash table (memory=%Zdbytes) released\n",
561 sizeof(*tbl));
563 return 0;
567 static inline struct ip_vs_dest *
568 __ip_vs_lblcr_schedule(struct ip_vs_service *svc)
570 struct ip_vs_dest *dest, *least;
571 int loh, doh;
574 * We use the following formula to estimate the load:
575 * (dest overhead) / dest->weight
577 * Remember -- no floats in kernel mode!!!
578 * The comparison of h1*w2 > h2*w1 is equivalent to that of
579 * h1/w1 > h2/w2
580 * if every weight is larger than zero.
582 * The server with weight=0 is quiesced and will not receive any
583 * new connection.
585 list_for_each_entry(dest, &svc->destinations, n_list) {
586 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
587 continue;
589 if (atomic_read(&dest->weight) > 0) {
590 least = dest;
591 loh = ip_vs_dest_conn_overhead(least);
592 goto nextstage;
595 return NULL;
598 * Find the destination with the least load.
600 nextstage:
601 list_for_each_entry_continue(dest, &svc->destinations, n_list) {
602 if (dest->flags & IP_VS_DEST_F_OVERLOAD)
603 continue;
605 doh = ip_vs_dest_conn_overhead(dest);
606 if (loh * atomic_read(&dest->weight) >
607 doh * atomic_read(&least->weight)) {
608 least = dest;
609 loh = doh;
613 IP_VS_DBG_BUF(6, "LBLCR: server %s:%d "
614 "activeconns %d refcnt %d weight %d overhead %d\n",
615 IP_VS_DBG_ADDR(least->af, &least->addr),
616 ntohs(least->port),
617 atomic_read(&least->activeconns),
618 atomic_read(&least->refcnt),
619 atomic_read(&least->weight), loh);
621 return least;
626 * If this destination server is overloaded and there is a less loaded
627 * server, then return true.
629 static inline int
630 is_overloaded(struct ip_vs_dest *dest, struct ip_vs_service *svc)
632 if (atomic_read(&dest->activeconns) > atomic_read(&dest->weight)) {
633 struct ip_vs_dest *d;
635 list_for_each_entry(d, &svc->destinations, n_list) {
636 if (atomic_read(&d->activeconns)*2
637 < atomic_read(&d->weight)) {
638 return 1;
642 return 0;
647 * Locality-Based (weighted) Least-Connection scheduling
649 static struct ip_vs_dest *
650 ip_vs_lblcr_schedule(struct ip_vs_service *svc, const struct sk_buff *skb)
652 struct ip_vs_lblcr_table *tbl = svc->sched_data;
653 struct ip_vs_iphdr iph;
654 struct ip_vs_dest *dest = NULL;
655 struct ip_vs_lblcr_entry *en;
657 ip_vs_fill_iphdr(svc->af, skb_network_header(skb), &iph);
659 IP_VS_DBG(6, "%s(): Scheduling...\n", __func__);
661 /* First look in our cache */
662 read_lock(&svc->sched_lock);
663 en = ip_vs_lblcr_get(svc->af, tbl, &iph.daddr);
664 if (en) {
665 /* We only hold a read lock, but this is atomic */
666 en->lastuse = jiffies;
668 /* Get the least loaded destination */
669 read_lock(&en->set.lock);
670 dest = ip_vs_dest_set_min(&en->set);
671 read_unlock(&en->set.lock);
673 /* More than one destination + enough time passed by, cleanup */
674 if (atomic_read(&en->set.size) > 1 &&
675 time_after(jiffies, en->set.lastmod +
676 sysctl_lblcr_expiration(svc))) {
677 struct ip_vs_dest *m;
679 write_lock(&en->set.lock);
680 m = ip_vs_dest_set_max(&en->set);
681 if (m)
682 ip_vs_dest_set_erase(&en->set, m);
683 write_unlock(&en->set.lock);
686 /* If the destination is not overloaded, use it */
687 if (dest && !is_overloaded(dest, svc)) {
688 read_unlock(&svc->sched_lock);
689 goto out;
692 /* The cache entry is invalid, time to schedule */
693 dest = __ip_vs_lblcr_schedule(svc);
694 if (!dest) {
695 ip_vs_scheduler_err(svc, "no destination available");
696 read_unlock(&svc->sched_lock);
697 return NULL;
700 /* Update our cache entry */
701 write_lock(&en->set.lock);
702 ip_vs_dest_set_insert(&en->set, dest);
703 write_unlock(&en->set.lock);
705 read_unlock(&svc->sched_lock);
707 if (dest)
708 goto out;
710 /* No cache entry, time to schedule */
711 dest = __ip_vs_lblcr_schedule(svc);
712 if (!dest) {
713 IP_VS_DBG(1, "no destination available\n");
714 return NULL;
717 /* If we fail to create a cache entry, we'll just use the valid dest */
718 write_lock(&svc->sched_lock);
719 ip_vs_lblcr_new(tbl, &iph.daddr, dest);
720 write_unlock(&svc->sched_lock);
722 out:
723 IP_VS_DBG_BUF(6, "LBLCR: destination IP address %s --> server %s:%d\n",
724 IP_VS_DBG_ADDR(svc->af, &iph.daddr),
725 IP_VS_DBG_ADDR(svc->af, &dest->addr), ntohs(dest->port));
727 return dest;
732 * IPVS LBLCR Scheduler structure
734 static struct ip_vs_scheduler ip_vs_lblcr_scheduler =
736 .name = "lblcr",
737 .refcnt = ATOMIC_INIT(0),
738 .module = THIS_MODULE,
739 .n_list = LIST_HEAD_INIT(ip_vs_lblcr_scheduler.n_list),
740 .init_service = ip_vs_lblcr_init_svc,
741 .done_service = ip_vs_lblcr_done_svc,
742 .schedule = ip_vs_lblcr_schedule,
746 * per netns init.
748 #ifdef CONFIG_SYSCTL
749 static int __net_init __ip_vs_lblcr_init(struct net *net)
751 struct netns_ipvs *ipvs = net_ipvs(net);
753 if (!net_eq(net, &init_net)) {
754 ipvs->lblcr_ctl_table = kmemdup(vs_vars_table,
755 sizeof(vs_vars_table),
756 GFP_KERNEL);
757 if (ipvs->lblcr_ctl_table == NULL)
758 return -ENOMEM;
759 } else
760 ipvs->lblcr_ctl_table = vs_vars_table;
761 ipvs->sysctl_lblcr_expiration = DEFAULT_EXPIRATION;
762 ipvs->lblcr_ctl_table[0].data = &ipvs->sysctl_lblcr_expiration;
764 ipvs->lblcr_ctl_header =
765 register_net_sysctl_table(net, net_vs_ctl_path,
766 ipvs->lblcr_ctl_table);
767 if (!ipvs->lblcr_ctl_header) {
768 if (!net_eq(net, &init_net))
769 kfree(ipvs->lblcr_ctl_table);
770 return -ENOMEM;
773 return 0;
776 static void __net_exit __ip_vs_lblcr_exit(struct net *net)
778 struct netns_ipvs *ipvs = net_ipvs(net);
780 unregister_net_sysctl_table(ipvs->lblcr_ctl_header);
782 if (!net_eq(net, &init_net))
783 kfree(ipvs->lblcr_ctl_table);
786 #else
788 static int __net_init __ip_vs_lblcr_init(struct net *net) { return 0; }
789 static void __net_exit __ip_vs_lblcr_exit(struct net *net) { }
791 #endif
793 static struct pernet_operations ip_vs_lblcr_ops = {
794 .init = __ip_vs_lblcr_init,
795 .exit = __ip_vs_lblcr_exit,
798 static int __init ip_vs_lblcr_init(void)
800 int ret;
802 ret = register_pernet_subsys(&ip_vs_lblcr_ops);
803 if (ret)
804 return ret;
806 ret = register_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
807 if (ret)
808 unregister_pernet_subsys(&ip_vs_lblcr_ops);
809 return ret;
812 static void __exit ip_vs_lblcr_cleanup(void)
814 unregister_ip_vs_scheduler(&ip_vs_lblcr_scheduler);
815 unregister_pernet_subsys(&ip_vs_lblcr_ops);
819 module_init(ip_vs_lblcr_init);
820 module_exit(ip_vs_lblcr_cleanup);
821 MODULE_LICENSE("GPL");