[SCSI] ips: Update version information
[linux-2.6/libata-dev.git] / net / ipv4 / ipvs / ip_vs_app.c
blob8d6901d4e94f897d850a5399214d82e6dd48c3d5
1 /*
2 * ip_vs_app.c: Application module support for IPVS
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses).
17 * IP_MASQ_APP application masquerading module
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <net/protocol.h>
29 #include <net/tcp.h>
30 #include <asm/system.h>
31 #include <linux/stat.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/mutex.h>
36 #include <net/ip_vs.h>
38 EXPORT_SYMBOL(register_ip_vs_app);
39 EXPORT_SYMBOL(unregister_ip_vs_app);
40 EXPORT_SYMBOL(register_ip_vs_app_inc);
42 /* ipvs application list head */
43 static LIST_HEAD(ip_vs_app_list);
44 static DEFINE_MUTEX(__ip_vs_app_mutex);
48 * Get an ip_vs_app object
50 static inline int ip_vs_app_get(struct ip_vs_app *app)
52 /* test and get the module atomically */
53 if (app->module)
54 return try_module_get(app->module);
55 else
56 return 1;
60 static inline void ip_vs_app_put(struct ip_vs_app *app)
62 if (app->module)
63 module_put(app->module);
68 * Allocate/initialize app incarnation and register it in proto apps.
70 static int
71 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
73 struct ip_vs_protocol *pp;
74 struct ip_vs_app *inc;
75 int ret;
77 if (!(pp = ip_vs_proto_get(proto)))
78 return -EPROTONOSUPPORT;
80 if (!pp->unregister_app)
81 return -EOPNOTSUPP;
83 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
84 if (!inc)
85 return -ENOMEM;
86 INIT_LIST_HEAD(&inc->p_list);
87 INIT_LIST_HEAD(&inc->incs_list);
88 inc->app = app;
89 inc->port = htons(port);
90 atomic_set(&inc->usecnt, 0);
92 if (app->timeouts) {
93 inc->timeout_table =
94 ip_vs_create_timeout_table(app->timeouts,
95 app->timeouts_size);
96 if (!inc->timeout_table) {
97 ret = -ENOMEM;
98 goto out;
102 ret = pp->register_app(inc);
103 if (ret)
104 goto out;
106 list_add(&inc->a_list, &app->incs_list);
107 IP_VS_DBG(9, "%s application %s:%u registered\n",
108 pp->name, inc->name, inc->port);
110 return 0;
112 out:
113 kfree(inc->timeout_table);
114 kfree(inc);
115 return ret;
120 * Release app incarnation
122 static void
123 ip_vs_app_inc_release(struct ip_vs_app *inc)
125 struct ip_vs_protocol *pp;
127 if (!(pp = ip_vs_proto_get(inc->protocol)))
128 return;
130 if (pp->unregister_app)
131 pp->unregister_app(inc);
133 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
134 pp->name, inc->name, inc->port);
136 list_del(&inc->a_list);
138 kfree(inc->timeout_table);
139 kfree(inc);
144 * Get reference to app inc (only called from softirq)
147 int ip_vs_app_inc_get(struct ip_vs_app *inc)
149 int result;
151 atomic_inc(&inc->usecnt);
152 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
153 atomic_dec(&inc->usecnt);
154 return result;
159 * Put the app inc (only called from timer or net softirq)
161 void ip_vs_app_inc_put(struct ip_vs_app *inc)
163 ip_vs_app_put(inc->app);
164 atomic_dec(&inc->usecnt);
169 * Register an application incarnation in protocol applications
172 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
174 int result;
176 mutex_lock(&__ip_vs_app_mutex);
178 result = ip_vs_app_inc_new(app, proto, port);
180 mutex_unlock(&__ip_vs_app_mutex);
182 return result;
187 * ip_vs_app registration routine
189 int register_ip_vs_app(struct ip_vs_app *app)
191 /* increase the module use count */
192 ip_vs_use_count_inc();
194 mutex_lock(&__ip_vs_app_mutex);
196 list_add(&app->a_list, &ip_vs_app_list);
198 mutex_unlock(&__ip_vs_app_mutex);
200 return 0;
205 * ip_vs_app unregistration routine
206 * We are sure there are no app incarnations attached to services
208 void unregister_ip_vs_app(struct ip_vs_app *app)
210 struct ip_vs_app *inc, *nxt;
212 mutex_lock(&__ip_vs_app_mutex);
214 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
215 ip_vs_app_inc_release(inc);
218 list_del(&app->a_list);
220 mutex_unlock(&__ip_vs_app_mutex);
222 /* decrease the module use count */
223 ip_vs_use_count_dec();
228 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
230 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
232 return pp->app_conn_bind(cp);
237 * Unbind cp from application incarnation (called by cp destructor)
239 void ip_vs_unbind_app(struct ip_vs_conn *cp)
241 struct ip_vs_app *inc = cp->app;
243 if (!inc)
244 return;
246 if (inc->unbind_conn)
247 inc->unbind_conn(inc, cp);
248 if (inc->done_conn)
249 inc->done_conn(inc, cp);
250 ip_vs_app_inc_put(inc);
251 cp->app = NULL;
256 * Fixes th->seq based on ip_vs_seq info.
258 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
260 __u32 seq = ntohl(th->seq);
263 * Adjust seq with delta-offset for all packets after
264 * the most recent resized pkt seq and with previous_delta offset
265 * for all packets before most recent resized pkt seq.
267 if (vseq->delta || vseq->previous_delta) {
268 if(after(seq, vseq->init_seq)) {
269 th->seq = htonl(seq + vseq->delta);
270 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
271 vseq->delta);
272 } else {
273 th->seq = htonl(seq + vseq->previous_delta);
274 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
275 "(%d) to seq\n", vseq->previous_delta);
282 * Fixes th->ack_seq based on ip_vs_seq info.
284 static inline void
285 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
287 __u32 ack_seq = ntohl(th->ack_seq);
290 * Adjust ack_seq with delta-offset for
291 * the packets AFTER most recent resized pkt has caused a shift
292 * for packets before most recent resized pkt, use previous_delta
294 if (vseq->delta || vseq->previous_delta) {
295 /* since ack_seq is the number of octet that is expected
296 to receive next, so compare it with init_seq+delta */
297 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
298 th->ack_seq = htonl(ack_seq - vseq->delta);
299 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
300 "(%d) from ack_seq\n", vseq->delta);
302 } else {
303 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
304 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
305 "previous_delta (%d) from ack_seq\n",
306 vseq->previous_delta);
313 * Updates ip_vs_seq if pkt has been resized
314 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
316 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
317 unsigned flag, __u32 seq, int diff)
319 /* spinlock is to keep updating cp->flags atomic */
320 spin_lock(&cp->lock);
321 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
322 vseq->previous_delta = vseq->delta;
323 vseq->delta += diff;
324 vseq->init_seq = seq;
325 cp->flags |= flag;
327 spin_unlock(&cp->lock);
330 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb,
331 struct ip_vs_app *app)
333 int diff;
334 const unsigned int tcp_offset = ip_hdrlen(*pskb);
335 struct tcphdr *th;
336 __u32 seq;
338 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
339 return 0;
341 th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
344 * Remember seq number in case this pkt gets resized
346 seq = ntohl(th->seq);
349 * Fix seq stuff if flagged as so.
351 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
352 vs_fix_seq(&cp->out_seq, th);
353 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
354 vs_fix_ack_seq(&cp->in_seq, th);
357 * Call private output hook function
359 if (app->pkt_out == NULL)
360 return 1;
362 if (!app->pkt_out(app, cp, pskb, &diff))
363 return 0;
366 * Update ip_vs seq stuff if len has changed.
368 if (diff != 0)
369 vs_seq_update(cp, &cp->out_seq,
370 IP_VS_CONN_F_OUT_SEQ, seq, diff);
372 return 1;
376 * Output pkt hook. Will call bound ip_vs_app specific function
377 * called by ipvs packet handler, assumes previously checked cp!=NULL
378 * returns false if it can't handle packet (oom)
380 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff **pskb)
382 struct ip_vs_app *app;
385 * check if application module is bound to
386 * this ip_vs_conn.
388 if ((app = cp->app) == NULL)
389 return 1;
391 /* TCP is complicated */
392 if (cp->protocol == IPPROTO_TCP)
393 return app_tcp_pkt_out(cp, pskb, app);
396 * Call private output hook function
398 if (app->pkt_out == NULL)
399 return 1;
401 return app->pkt_out(app, cp, pskb, NULL);
405 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb,
406 struct ip_vs_app *app)
408 int diff;
409 const unsigned int tcp_offset = ip_hdrlen(*pskb);
410 struct tcphdr *th;
411 __u32 seq;
413 if (!ip_vs_make_skb_writable(pskb, tcp_offset + sizeof(*th)))
414 return 0;
416 th = (struct tcphdr *)(skb_network_header(*pskb) + tcp_offset);
419 * Remember seq number in case this pkt gets resized
421 seq = ntohl(th->seq);
424 * Fix seq stuff if flagged as so.
426 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
427 vs_fix_seq(&cp->in_seq, th);
428 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
429 vs_fix_ack_seq(&cp->out_seq, th);
432 * Call private input hook function
434 if (app->pkt_in == NULL)
435 return 1;
437 if (!app->pkt_in(app, cp, pskb, &diff))
438 return 0;
441 * Update ip_vs seq stuff if len has changed.
443 if (diff != 0)
444 vs_seq_update(cp, &cp->in_seq,
445 IP_VS_CONN_F_IN_SEQ, seq, diff);
447 return 1;
451 * Input pkt hook. Will call bound ip_vs_app specific function
452 * called by ipvs packet handler, assumes previously checked cp!=NULL.
453 * returns false if can't handle packet (oom).
455 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff **pskb)
457 struct ip_vs_app *app;
460 * check if application module is bound to
461 * this ip_vs_conn.
463 if ((app = cp->app) == NULL)
464 return 1;
466 /* TCP is complicated */
467 if (cp->protocol == IPPROTO_TCP)
468 return app_tcp_pkt_in(cp, pskb, app);
471 * Call private input hook function
473 if (app->pkt_in == NULL)
474 return 1;
476 return app->pkt_in(app, cp, pskb, NULL);
480 #ifdef CONFIG_PROC_FS
482 * /proc/net/ip_vs_app entry function
485 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
487 struct ip_vs_app *app, *inc;
489 list_for_each_entry(app, &ip_vs_app_list, a_list) {
490 list_for_each_entry(inc, &app->incs_list, a_list) {
491 if (pos-- == 0)
492 return inc;
495 return NULL;
499 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
501 mutex_lock(&__ip_vs_app_mutex);
503 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
506 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
508 struct ip_vs_app *inc, *app;
509 struct list_head *e;
511 ++*pos;
512 if (v == SEQ_START_TOKEN)
513 return ip_vs_app_idx(0);
515 inc = v;
516 app = inc->app;
518 if ((e = inc->a_list.next) != &app->incs_list)
519 return list_entry(e, struct ip_vs_app, a_list);
521 /* go on to next application */
522 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
523 app = list_entry(e, struct ip_vs_app, a_list);
524 list_for_each_entry(inc, &app->incs_list, a_list) {
525 return inc;
528 return NULL;
531 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
533 mutex_unlock(&__ip_vs_app_mutex);
536 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
538 if (v == SEQ_START_TOKEN)
539 seq_puts(seq, "prot port usecnt name\n");
540 else {
541 const struct ip_vs_app *inc = v;
543 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
544 ip_vs_proto_name(inc->protocol),
545 ntohs(inc->port),
546 atomic_read(&inc->usecnt),
547 inc->name);
549 return 0;
552 static const struct seq_operations ip_vs_app_seq_ops = {
553 .start = ip_vs_app_seq_start,
554 .next = ip_vs_app_seq_next,
555 .stop = ip_vs_app_seq_stop,
556 .show = ip_vs_app_seq_show,
559 static int ip_vs_app_open(struct inode *inode, struct file *file)
561 return seq_open(file, &ip_vs_app_seq_ops);
564 static const struct file_operations ip_vs_app_fops = {
565 .owner = THIS_MODULE,
566 .open = ip_vs_app_open,
567 .read = seq_read,
568 .llseek = seq_lseek,
569 .release = seq_release,
571 #endif
575 * Replace a segment of data with a new segment
577 int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
578 char *o_buf, int o_len, char *n_buf, int n_len)
580 int diff;
581 int o_offset;
582 int o_left;
584 EnterFunction(9);
586 diff = n_len - o_len;
587 o_offset = o_buf - (char *)skb->data;
588 /* The length of left data after o_buf+o_len in the skb data */
589 o_left = skb->len - (o_offset + o_len);
591 if (diff <= 0) {
592 memmove(o_buf + n_len, o_buf + o_len, o_left);
593 memcpy(o_buf, n_buf, n_len);
594 skb_trim(skb, skb->len + diff);
595 } else if (diff <= skb_tailroom(skb)) {
596 skb_put(skb, diff);
597 memmove(o_buf + n_len, o_buf + o_len, o_left);
598 memcpy(o_buf, n_buf, n_len);
599 } else {
600 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
601 return -ENOMEM;
602 skb_put(skb, diff);
603 memmove(skb->data + o_offset + n_len,
604 skb->data + o_offset + o_len, o_left);
605 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
608 /* must update the iph total length here */
609 ip_hdr(skb)->tot_len = htons(skb->len);
611 LeaveFunction(9);
612 return 0;
616 int ip_vs_app_init(void)
618 /* we will replace it with proc_net_ipvs_create() soon */
619 proc_net_fops_create("ip_vs_app", 0, &ip_vs_app_fops);
620 return 0;
624 void ip_vs_app_cleanup(void)
626 proc_net_remove("ip_vs_app");