Committer: Michael Beasley <mike@snafu.setup>
[mikesnafu-overlay.git] / net / ipv4 / ipvs / ip_vs_app.c
blob535abe0c45e7f4fc4e71c401f249e58bdf9d90eb
1 /*
2 * ip_vs_app.c: Application module support for IPVS
4 * Version: $Id: ip_vs_app.c,v 1.17 2003/03/22 06:31:21 wensong Exp $
6 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
13 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
14 * is that ip_vs_app module handles the reverse direction (incoming requests
15 * and outgoing responses).
17 * IP_MASQ_APP application masquerading module
19 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
23 #include <linux/module.h>
24 #include <linux/kernel.h>
25 #include <linux/skbuff.h>
26 #include <linux/in.h>
27 #include <linux/ip.h>
28 #include <linux/netfilter.h>
29 #include <net/net_namespace.h>
30 #include <net/protocol.h>
31 #include <net/tcp.h>
32 #include <asm/system.h>
33 #include <linux/stat.h>
34 #include <linux/proc_fs.h>
35 #include <linux/seq_file.h>
36 #include <linux/mutex.h>
38 #include <net/ip_vs.h>
40 EXPORT_SYMBOL(register_ip_vs_app);
41 EXPORT_SYMBOL(unregister_ip_vs_app);
42 EXPORT_SYMBOL(register_ip_vs_app_inc);
44 /* ipvs application list head */
45 static LIST_HEAD(ip_vs_app_list);
46 static DEFINE_MUTEX(__ip_vs_app_mutex);
50 * Get an ip_vs_app object
52 static inline int ip_vs_app_get(struct ip_vs_app *app)
54 return try_module_get(app->module);
58 static inline void ip_vs_app_put(struct ip_vs_app *app)
60 module_put(app->module);
65 * Allocate/initialize app incarnation and register it in proto apps.
67 static int
68 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
70 struct ip_vs_protocol *pp;
71 struct ip_vs_app *inc;
72 int ret;
74 if (!(pp = ip_vs_proto_get(proto)))
75 return -EPROTONOSUPPORT;
77 if (!pp->unregister_app)
78 return -EOPNOTSUPP;
80 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
81 if (!inc)
82 return -ENOMEM;
83 INIT_LIST_HEAD(&inc->p_list);
84 INIT_LIST_HEAD(&inc->incs_list);
85 inc->app = app;
86 inc->port = htons(port);
87 atomic_set(&inc->usecnt, 0);
89 if (app->timeouts) {
90 inc->timeout_table =
91 ip_vs_create_timeout_table(app->timeouts,
92 app->timeouts_size);
93 if (!inc->timeout_table) {
94 ret = -ENOMEM;
95 goto out;
99 ret = pp->register_app(inc);
100 if (ret)
101 goto out;
103 list_add(&inc->a_list, &app->incs_list);
104 IP_VS_DBG(9, "%s application %s:%u registered\n",
105 pp->name, inc->name, inc->port);
107 return 0;
109 out:
110 kfree(inc->timeout_table);
111 kfree(inc);
112 return ret;
117 * Release app incarnation
119 static void
120 ip_vs_app_inc_release(struct ip_vs_app *inc)
122 struct ip_vs_protocol *pp;
124 if (!(pp = ip_vs_proto_get(inc->protocol)))
125 return;
127 if (pp->unregister_app)
128 pp->unregister_app(inc);
130 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
131 pp->name, inc->name, inc->port);
133 list_del(&inc->a_list);
135 kfree(inc->timeout_table);
136 kfree(inc);
141 * Get reference to app inc (only called from softirq)
144 int ip_vs_app_inc_get(struct ip_vs_app *inc)
146 int result;
148 atomic_inc(&inc->usecnt);
149 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
150 atomic_dec(&inc->usecnt);
151 return result;
156 * Put the app inc (only called from timer or net softirq)
158 void ip_vs_app_inc_put(struct ip_vs_app *inc)
160 ip_vs_app_put(inc->app);
161 atomic_dec(&inc->usecnt);
166 * Register an application incarnation in protocol applications
169 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
171 int result;
173 mutex_lock(&__ip_vs_app_mutex);
175 result = ip_vs_app_inc_new(app, proto, port);
177 mutex_unlock(&__ip_vs_app_mutex);
179 return result;
184 * ip_vs_app registration routine
186 int register_ip_vs_app(struct ip_vs_app *app)
188 /* increase the module use count */
189 ip_vs_use_count_inc();
191 mutex_lock(&__ip_vs_app_mutex);
193 list_add(&app->a_list, &ip_vs_app_list);
195 mutex_unlock(&__ip_vs_app_mutex);
197 return 0;
202 * ip_vs_app unregistration routine
203 * We are sure there are no app incarnations attached to services
205 void unregister_ip_vs_app(struct ip_vs_app *app)
207 struct ip_vs_app *inc, *nxt;
209 mutex_lock(&__ip_vs_app_mutex);
211 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
212 ip_vs_app_inc_release(inc);
215 list_del(&app->a_list);
217 mutex_unlock(&__ip_vs_app_mutex);
219 /* decrease the module use count */
220 ip_vs_use_count_dec();
225 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
227 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
229 return pp->app_conn_bind(cp);
234 * Unbind cp from application incarnation (called by cp destructor)
236 void ip_vs_unbind_app(struct ip_vs_conn *cp)
238 struct ip_vs_app *inc = cp->app;
240 if (!inc)
241 return;
243 if (inc->unbind_conn)
244 inc->unbind_conn(inc, cp);
245 if (inc->done_conn)
246 inc->done_conn(inc, cp);
247 ip_vs_app_inc_put(inc);
248 cp->app = NULL;
253 * Fixes th->seq based on ip_vs_seq info.
255 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
257 __u32 seq = ntohl(th->seq);
260 * Adjust seq with delta-offset for all packets after
261 * the most recent resized pkt seq and with previous_delta offset
262 * for all packets before most recent resized pkt seq.
264 if (vseq->delta || vseq->previous_delta) {
265 if(after(seq, vseq->init_seq)) {
266 th->seq = htonl(seq + vseq->delta);
267 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
268 vseq->delta);
269 } else {
270 th->seq = htonl(seq + vseq->previous_delta);
271 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
272 "(%d) to seq\n", vseq->previous_delta);
279 * Fixes th->ack_seq based on ip_vs_seq info.
281 static inline void
282 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
284 __u32 ack_seq = ntohl(th->ack_seq);
287 * Adjust ack_seq with delta-offset for
288 * the packets AFTER most recent resized pkt has caused a shift
289 * for packets before most recent resized pkt, use previous_delta
291 if (vseq->delta || vseq->previous_delta) {
292 /* since ack_seq is the number of octet that is expected
293 to receive next, so compare it with init_seq+delta */
294 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
295 th->ack_seq = htonl(ack_seq - vseq->delta);
296 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
297 "(%d) from ack_seq\n", vseq->delta);
299 } else {
300 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
301 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
302 "previous_delta (%d) from ack_seq\n",
303 vseq->previous_delta);
310 * Updates ip_vs_seq if pkt has been resized
311 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
313 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
314 unsigned flag, __u32 seq, int diff)
316 /* spinlock is to keep updating cp->flags atomic */
317 spin_lock(&cp->lock);
318 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
319 vseq->previous_delta = vseq->delta;
320 vseq->delta += diff;
321 vseq->init_seq = seq;
322 cp->flags |= flag;
324 spin_unlock(&cp->lock);
327 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
328 struct ip_vs_app *app)
330 int diff;
331 const unsigned int tcp_offset = ip_hdrlen(skb);
332 struct tcphdr *th;
333 __u32 seq;
335 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
336 return 0;
338 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
341 * Remember seq number in case this pkt gets resized
343 seq = ntohl(th->seq);
346 * Fix seq stuff if flagged as so.
348 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
349 vs_fix_seq(&cp->out_seq, th);
350 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
351 vs_fix_ack_seq(&cp->in_seq, th);
354 * Call private output hook function
356 if (app->pkt_out == NULL)
357 return 1;
359 if (!app->pkt_out(app, cp, skb, &diff))
360 return 0;
363 * Update ip_vs seq stuff if len has changed.
365 if (diff != 0)
366 vs_seq_update(cp, &cp->out_seq,
367 IP_VS_CONN_F_OUT_SEQ, seq, diff);
369 return 1;
373 * Output pkt hook. Will call bound ip_vs_app specific function
374 * called by ipvs packet handler, assumes previously checked cp!=NULL
375 * returns false if it can't handle packet (oom)
377 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
379 struct ip_vs_app *app;
382 * check if application module is bound to
383 * this ip_vs_conn.
385 if ((app = cp->app) == NULL)
386 return 1;
388 /* TCP is complicated */
389 if (cp->protocol == IPPROTO_TCP)
390 return app_tcp_pkt_out(cp, skb, app);
393 * Call private output hook function
395 if (app->pkt_out == NULL)
396 return 1;
398 return app->pkt_out(app, cp, skb, NULL);
402 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
403 struct ip_vs_app *app)
405 int diff;
406 const unsigned int tcp_offset = ip_hdrlen(skb);
407 struct tcphdr *th;
408 __u32 seq;
410 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
411 return 0;
413 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
416 * Remember seq number in case this pkt gets resized
418 seq = ntohl(th->seq);
421 * Fix seq stuff if flagged as so.
423 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
424 vs_fix_seq(&cp->in_seq, th);
425 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
426 vs_fix_ack_seq(&cp->out_seq, th);
429 * Call private input hook function
431 if (app->pkt_in == NULL)
432 return 1;
434 if (!app->pkt_in(app, cp, skb, &diff))
435 return 0;
438 * Update ip_vs seq stuff if len has changed.
440 if (diff != 0)
441 vs_seq_update(cp, &cp->in_seq,
442 IP_VS_CONN_F_IN_SEQ, seq, diff);
444 return 1;
448 * Input pkt hook. Will call bound ip_vs_app specific function
449 * called by ipvs packet handler, assumes previously checked cp!=NULL.
450 * returns false if can't handle packet (oom).
452 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
454 struct ip_vs_app *app;
457 * check if application module is bound to
458 * this ip_vs_conn.
460 if ((app = cp->app) == NULL)
461 return 1;
463 /* TCP is complicated */
464 if (cp->protocol == IPPROTO_TCP)
465 return app_tcp_pkt_in(cp, skb, app);
468 * Call private input hook function
470 if (app->pkt_in == NULL)
471 return 1;
473 return app->pkt_in(app, cp, skb, NULL);
477 #ifdef CONFIG_PROC_FS
479 * /proc/net/ip_vs_app entry function
482 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
484 struct ip_vs_app *app, *inc;
486 list_for_each_entry(app, &ip_vs_app_list, a_list) {
487 list_for_each_entry(inc, &app->incs_list, a_list) {
488 if (pos-- == 0)
489 return inc;
492 return NULL;
496 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
498 mutex_lock(&__ip_vs_app_mutex);
500 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
503 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
505 struct ip_vs_app *inc, *app;
506 struct list_head *e;
508 ++*pos;
509 if (v == SEQ_START_TOKEN)
510 return ip_vs_app_idx(0);
512 inc = v;
513 app = inc->app;
515 if ((e = inc->a_list.next) != &app->incs_list)
516 return list_entry(e, struct ip_vs_app, a_list);
518 /* go on to next application */
519 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
520 app = list_entry(e, struct ip_vs_app, a_list);
521 list_for_each_entry(inc, &app->incs_list, a_list) {
522 return inc;
525 return NULL;
528 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
530 mutex_unlock(&__ip_vs_app_mutex);
533 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
535 if (v == SEQ_START_TOKEN)
536 seq_puts(seq, "prot port usecnt name\n");
537 else {
538 const struct ip_vs_app *inc = v;
540 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
541 ip_vs_proto_name(inc->protocol),
542 ntohs(inc->port),
543 atomic_read(&inc->usecnt),
544 inc->name);
546 return 0;
549 static const struct seq_operations ip_vs_app_seq_ops = {
550 .start = ip_vs_app_seq_start,
551 .next = ip_vs_app_seq_next,
552 .stop = ip_vs_app_seq_stop,
553 .show = ip_vs_app_seq_show,
556 static int ip_vs_app_open(struct inode *inode, struct file *file)
558 return seq_open(file, &ip_vs_app_seq_ops);
561 static const struct file_operations ip_vs_app_fops = {
562 .owner = THIS_MODULE,
563 .open = ip_vs_app_open,
564 .read = seq_read,
565 .llseek = seq_lseek,
566 .release = seq_release,
568 #endif
572 * Replace a segment of data with a new segment
574 int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
575 char *o_buf, int o_len, char *n_buf, int n_len)
577 int diff;
578 int o_offset;
579 int o_left;
581 EnterFunction(9);
583 diff = n_len - o_len;
584 o_offset = o_buf - (char *)skb->data;
585 /* The length of left data after o_buf+o_len in the skb data */
586 o_left = skb->len - (o_offset + o_len);
588 if (diff <= 0) {
589 memmove(o_buf + n_len, o_buf + o_len, o_left);
590 memcpy(o_buf, n_buf, n_len);
591 skb_trim(skb, skb->len + diff);
592 } else if (diff <= skb_tailroom(skb)) {
593 skb_put(skb, diff);
594 memmove(o_buf + n_len, o_buf + o_len, o_left);
595 memcpy(o_buf, n_buf, n_len);
596 } else {
597 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
598 return -ENOMEM;
599 skb_put(skb, diff);
600 memmove(skb->data + o_offset + n_len,
601 skb->data + o_offset + o_len, o_left);
602 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
605 /* must update the iph total length here */
606 ip_hdr(skb)->tot_len = htons(skb->len);
608 LeaveFunction(9);
609 return 0;
613 int ip_vs_app_init(void)
615 /* we will replace it with proc_net_ipvs_create() soon */
616 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
617 return 0;
621 void ip_vs_app_cleanup(void)
623 proc_net_remove(&init_net, "ip_vs_app");