xen: handle highmem pages correctly when shrinking a domain
[linux-2.6/mini2440.git] / net / netfilter / ipvs / ip_vs_app.c
blob201b8ea3020dbe4e55edb7c089c0b4bc7c5f0a79
1 /*
2 * ip_vs_app.c: Application module support for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 * Most code here is taken from ip_masq_app.c in kernel 2.2. The difference
12 * is that ip_vs_app module handles the reverse direction (incoming requests
13 * and outgoing responses).
15 * IP_MASQ_APP application masquerading module
17 * Author: Juan Jose Ciarlante, <jjciarla@raiz.uncu.edu.ar>
21 #include <linux/module.h>
22 #include <linux/kernel.h>
23 #include <linux/skbuff.h>
24 #include <linux/in.h>
25 #include <linux/ip.h>
26 #include <linux/netfilter.h>
27 #include <net/net_namespace.h>
28 #include <net/protocol.h>
29 #include <net/tcp.h>
30 #include <asm/system.h>
31 #include <linux/stat.h>
32 #include <linux/proc_fs.h>
33 #include <linux/seq_file.h>
34 #include <linux/mutex.h>
36 #include <net/ip_vs.h>
38 EXPORT_SYMBOL(register_ip_vs_app);
39 EXPORT_SYMBOL(unregister_ip_vs_app);
40 EXPORT_SYMBOL(register_ip_vs_app_inc);
42 /* ipvs application list head */
43 static LIST_HEAD(ip_vs_app_list);
44 static DEFINE_MUTEX(__ip_vs_app_mutex);
48 * Get an ip_vs_app object
50 static inline int ip_vs_app_get(struct ip_vs_app *app)
52 return try_module_get(app->module);
56 static inline void ip_vs_app_put(struct ip_vs_app *app)
58 module_put(app->module);
63 * Allocate/initialize app incarnation and register it in proto apps.
65 static int
66 ip_vs_app_inc_new(struct ip_vs_app *app, __u16 proto, __u16 port)
68 struct ip_vs_protocol *pp;
69 struct ip_vs_app *inc;
70 int ret;
72 if (!(pp = ip_vs_proto_get(proto)))
73 return -EPROTONOSUPPORT;
75 if (!pp->unregister_app)
76 return -EOPNOTSUPP;
78 inc = kmemdup(app, sizeof(*inc), GFP_KERNEL);
79 if (!inc)
80 return -ENOMEM;
81 INIT_LIST_HEAD(&inc->p_list);
82 INIT_LIST_HEAD(&inc->incs_list);
83 inc->app = app;
84 inc->port = htons(port);
85 atomic_set(&inc->usecnt, 0);
87 if (app->timeouts) {
88 inc->timeout_table =
89 ip_vs_create_timeout_table(app->timeouts,
90 app->timeouts_size);
91 if (!inc->timeout_table) {
92 ret = -ENOMEM;
93 goto out;
97 ret = pp->register_app(inc);
98 if (ret)
99 goto out;
101 list_add(&inc->a_list, &app->incs_list);
102 IP_VS_DBG(9, "%s application %s:%u registered\n",
103 pp->name, inc->name, inc->port);
105 return 0;
107 out:
108 kfree(inc->timeout_table);
109 kfree(inc);
110 return ret;
115 * Release app incarnation
117 static void
118 ip_vs_app_inc_release(struct ip_vs_app *inc)
120 struct ip_vs_protocol *pp;
122 if (!(pp = ip_vs_proto_get(inc->protocol)))
123 return;
125 if (pp->unregister_app)
126 pp->unregister_app(inc);
128 IP_VS_DBG(9, "%s App %s:%u unregistered\n",
129 pp->name, inc->name, inc->port);
131 list_del(&inc->a_list);
133 kfree(inc->timeout_table);
134 kfree(inc);
139 * Get reference to app inc (only called from softirq)
142 int ip_vs_app_inc_get(struct ip_vs_app *inc)
144 int result;
146 atomic_inc(&inc->usecnt);
147 if (unlikely((result = ip_vs_app_get(inc->app)) != 1))
148 atomic_dec(&inc->usecnt);
149 return result;
154 * Put the app inc (only called from timer or net softirq)
156 void ip_vs_app_inc_put(struct ip_vs_app *inc)
158 ip_vs_app_put(inc->app);
159 atomic_dec(&inc->usecnt);
164 * Register an application incarnation in protocol applications
167 register_ip_vs_app_inc(struct ip_vs_app *app, __u16 proto, __u16 port)
169 int result;
171 mutex_lock(&__ip_vs_app_mutex);
173 result = ip_vs_app_inc_new(app, proto, port);
175 mutex_unlock(&__ip_vs_app_mutex);
177 return result;
182 * ip_vs_app registration routine
184 int register_ip_vs_app(struct ip_vs_app *app)
186 /* increase the module use count */
187 ip_vs_use_count_inc();
189 mutex_lock(&__ip_vs_app_mutex);
191 list_add(&app->a_list, &ip_vs_app_list);
193 mutex_unlock(&__ip_vs_app_mutex);
195 return 0;
200 * ip_vs_app unregistration routine
201 * We are sure there are no app incarnations attached to services
203 void unregister_ip_vs_app(struct ip_vs_app *app)
205 struct ip_vs_app *inc, *nxt;
207 mutex_lock(&__ip_vs_app_mutex);
209 list_for_each_entry_safe(inc, nxt, &app->incs_list, a_list) {
210 ip_vs_app_inc_release(inc);
213 list_del(&app->a_list);
215 mutex_unlock(&__ip_vs_app_mutex);
217 /* decrease the module use count */
218 ip_vs_use_count_dec();
223 * Bind ip_vs_conn to its ip_vs_app (called by cp constructor)
225 int ip_vs_bind_app(struct ip_vs_conn *cp, struct ip_vs_protocol *pp)
227 return pp->app_conn_bind(cp);
232 * Unbind cp from application incarnation (called by cp destructor)
234 void ip_vs_unbind_app(struct ip_vs_conn *cp)
236 struct ip_vs_app *inc = cp->app;
238 if (!inc)
239 return;
241 if (inc->unbind_conn)
242 inc->unbind_conn(inc, cp);
243 if (inc->done_conn)
244 inc->done_conn(inc, cp);
245 ip_vs_app_inc_put(inc);
246 cp->app = NULL;
251 * Fixes th->seq based on ip_vs_seq info.
253 static inline void vs_fix_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
255 __u32 seq = ntohl(th->seq);
258 * Adjust seq with delta-offset for all packets after
259 * the most recent resized pkt seq and with previous_delta offset
260 * for all packets before most recent resized pkt seq.
262 if (vseq->delta || vseq->previous_delta) {
263 if(after(seq, vseq->init_seq)) {
264 th->seq = htonl(seq + vseq->delta);
265 IP_VS_DBG(9, "vs_fix_seq(): added delta (%d) to seq\n",
266 vseq->delta);
267 } else {
268 th->seq = htonl(seq + vseq->previous_delta);
269 IP_VS_DBG(9, "vs_fix_seq(): added previous_delta "
270 "(%d) to seq\n", vseq->previous_delta);
277 * Fixes th->ack_seq based on ip_vs_seq info.
279 static inline void
280 vs_fix_ack_seq(const struct ip_vs_seq *vseq, struct tcphdr *th)
282 __u32 ack_seq = ntohl(th->ack_seq);
285 * Adjust ack_seq with delta-offset for
286 * the packets AFTER most recent resized pkt has caused a shift
287 * for packets before most recent resized pkt, use previous_delta
289 if (vseq->delta || vseq->previous_delta) {
290 /* since ack_seq is the number of octet that is expected
291 to receive next, so compare it with init_seq+delta */
292 if(after(ack_seq, vseq->init_seq+vseq->delta)) {
293 th->ack_seq = htonl(ack_seq - vseq->delta);
294 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted delta "
295 "(%d) from ack_seq\n", vseq->delta);
297 } else {
298 th->ack_seq = htonl(ack_seq - vseq->previous_delta);
299 IP_VS_DBG(9, "vs_fix_ack_seq(): subtracted "
300 "previous_delta (%d) from ack_seq\n",
301 vseq->previous_delta);
308 * Updates ip_vs_seq if pkt has been resized
309 * Assumes already checked proto==IPPROTO_TCP and diff!=0.
311 static inline void vs_seq_update(struct ip_vs_conn *cp, struct ip_vs_seq *vseq,
312 unsigned flag, __u32 seq, int diff)
314 /* spinlock is to keep updating cp->flags atomic */
315 spin_lock(&cp->lock);
316 if (!(cp->flags & flag) || after(seq, vseq->init_seq)) {
317 vseq->previous_delta = vseq->delta;
318 vseq->delta += diff;
319 vseq->init_seq = seq;
320 cp->flags |= flag;
322 spin_unlock(&cp->lock);
325 static inline int app_tcp_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb,
326 struct ip_vs_app *app)
328 int diff;
329 const unsigned int tcp_offset = ip_hdrlen(skb);
330 struct tcphdr *th;
331 __u32 seq;
333 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
334 return 0;
336 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
339 * Remember seq number in case this pkt gets resized
341 seq = ntohl(th->seq);
344 * Fix seq stuff if flagged as so.
346 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
347 vs_fix_seq(&cp->out_seq, th);
348 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
349 vs_fix_ack_seq(&cp->in_seq, th);
352 * Call private output hook function
354 if (app->pkt_out == NULL)
355 return 1;
357 if (!app->pkt_out(app, cp, skb, &diff))
358 return 0;
361 * Update ip_vs seq stuff if len has changed.
363 if (diff != 0)
364 vs_seq_update(cp, &cp->out_seq,
365 IP_VS_CONN_F_OUT_SEQ, seq, diff);
367 return 1;
371 * Output pkt hook. Will call bound ip_vs_app specific function
372 * called by ipvs packet handler, assumes previously checked cp!=NULL
373 * returns false if it can't handle packet (oom)
375 int ip_vs_app_pkt_out(struct ip_vs_conn *cp, struct sk_buff *skb)
377 struct ip_vs_app *app;
380 * check if application module is bound to
381 * this ip_vs_conn.
383 if ((app = cp->app) == NULL)
384 return 1;
386 /* TCP is complicated */
387 if (cp->protocol == IPPROTO_TCP)
388 return app_tcp_pkt_out(cp, skb, app);
391 * Call private output hook function
393 if (app->pkt_out == NULL)
394 return 1;
396 return app->pkt_out(app, cp, skb, NULL);
400 static inline int app_tcp_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb,
401 struct ip_vs_app *app)
403 int diff;
404 const unsigned int tcp_offset = ip_hdrlen(skb);
405 struct tcphdr *th;
406 __u32 seq;
408 if (!skb_make_writable(skb, tcp_offset + sizeof(*th)))
409 return 0;
411 th = (struct tcphdr *)(skb_network_header(skb) + tcp_offset);
414 * Remember seq number in case this pkt gets resized
416 seq = ntohl(th->seq);
419 * Fix seq stuff if flagged as so.
421 if (cp->flags & IP_VS_CONN_F_IN_SEQ)
422 vs_fix_seq(&cp->in_seq, th);
423 if (cp->flags & IP_VS_CONN_F_OUT_SEQ)
424 vs_fix_ack_seq(&cp->out_seq, th);
427 * Call private input hook function
429 if (app->pkt_in == NULL)
430 return 1;
432 if (!app->pkt_in(app, cp, skb, &diff))
433 return 0;
436 * Update ip_vs seq stuff if len has changed.
438 if (diff != 0)
439 vs_seq_update(cp, &cp->in_seq,
440 IP_VS_CONN_F_IN_SEQ, seq, diff);
442 return 1;
446 * Input pkt hook. Will call bound ip_vs_app specific function
447 * called by ipvs packet handler, assumes previously checked cp!=NULL.
448 * returns false if can't handle packet (oom).
450 int ip_vs_app_pkt_in(struct ip_vs_conn *cp, struct sk_buff *skb)
452 struct ip_vs_app *app;
455 * check if application module is bound to
456 * this ip_vs_conn.
458 if ((app = cp->app) == NULL)
459 return 1;
461 /* TCP is complicated */
462 if (cp->protocol == IPPROTO_TCP)
463 return app_tcp_pkt_in(cp, skb, app);
466 * Call private input hook function
468 if (app->pkt_in == NULL)
469 return 1;
471 return app->pkt_in(app, cp, skb, NULL);
475 #ifdef CONFIG_PROC_FS
477 * /proc/net/ip_vs_app entry function
480 static struct ip_vs_app *ip_vs_app_idx(loff_t pos)
482 struct ip_vs_app *app, *inc;
484 list_for_each_entry(app, &ip_vs_app_list, a_list) {
485 list_for_each_entry(inc, &app->incs_list, a_list) {
486 if (pos-- == 0)
487 return inc;
490 return NULL;
494 static void *ip_vs_app_seq_start(struct seq_file *seq, loff_t *pos)
496 mutex_lock(&__ip_vs_app_mutex);
498 return *pos ? ip_vs_app_idx(*pos - 1) : SEQ_START_TOKEN;
501 static void *ip_vs_app_seq_next(struct seq_file *seq, void *v, loff_t *pos)
503 struct ip_vs_app *inc, *app;
504 struct list_head *e;
506 ++*pos;
507 if (v == SEQ_START_TOKEN)
508 return ip_vs_app_idx(0);
510 inc = v;
511 app = inc->app;
513 if ((e = inc->a_list.next) != &app->incs_list)
514 return list_entry(e, struct ip_vs_app, a_list);
516 /* go on to next application */
517 for (e = app->a_list.next; e != &ip_vs_app_list; e = e->next) {
518 app = list_entry(e, struct ip_vs_app, a_list);
519 list_for_each_entry(inc, &app->incs_list, a_list) {
520 return inc;
523 return NULL;
526 static void ip_vs_app_seq_stop(struct seq_file *seq, void *v)
528 mutex_unlock(&__ip_vs_app_mutex);
531 static int ip_vs_app_seq_show(struct seq_file *seq, void *v)
533 if (v == SEQ_START_TOKEN)
534 seq_puts(seq, "prot port usecnt name\n");
535 else {
536 const struct ip_vs_app *inc = v;
538 seq_printf(seq, "%-3s %-7u %-6d %-17s\n",
539 ip_vs_proto_name(inc->protocol),
540 ntohs(inc->port),
541 atomic_read(&inc->usecnt),
542 inc->name);
544 return 0;
547 static const struct seq_operations ip_vs_app_seq_ops = {
548 .start = ip_vs_app_seq_start,
549 .next = ip_vs_app_seq_next,
550 .stop = ip_vs_app_seq_stop,
551 .show = ip_vs_app_seq_show,
554 static int ip_vs_app_open(struct inode *inode, struct file *file)
556 return seq_open(file, &ip_vs_app_seq_ops);
559 static const struct file_operations ip_vs_app_fops = {
560 .owner = THIS_MODULE,
561 .open = ip_vs_app_open,
562 .read = seq_read,
563 .llseek = seq_lseek,
564 .release = seq_release,
566 #endif
570 * Replace a segment of data with a new segment
572 int ip_vs_skb_replace(struct sk_buff *skb, gfp_t pri,
573 char *o_buf, int o_len, char *n_buf, int n_len)
575 int diff;
576 int o_offset;
577 int o_left;
579 EnterFunction(9);
581 diff = n_len - o_len;
582 o_offset = o_buf - (char *)skb->data;
583 /* The length of left data after o_buf+o_len in the skb data */
584 o_left = skb->len - (o_offset + o_len);
586 if (diff <= 0) {
587 memmove(o_buf + n_len, o_buf + o_len, o_left);
588 memcpy(o_buf, n_buf, n_len);
589 skb_trim(skb, skb->len + diff);
590 } else if (diff <= skb_tailroom(skb)) {
591 skb_put(skb, diff);
592 memmove(o_buf + n_len, o_buf + o_len, o_left);
593 memcpy(o_buf, n_buf, n_len);
594 } else {
595 if (pskb_expand_head(skb, skb_headroom(skb), diff, pri))
596 return -ENOMEM;
597 skb_put(skb, diff);
598 memmove(skb->data + o_offset + n_len,
599 skb->data + o_offset + o_len, o_left);
600 skb_copy_to_linear_data_offset(skb, o_offset, n_buf, n_len);
603 /* must update the iph total length here */
604 ip_hdr(skb)->tot_len = htons(skb->len);
606 LeaveFunction(9);
607 return 0;
611 int __init ip_vs_app_init(void)
613 /* we will replace it with proc_net_ipvs_create() soon */
614 proc_net_fops_create(&init_net, "ip_vs_app", 0, &ip_vs_app_fops);
615 return 0;
619 void ip_vs_app_cleanup(void)
621 proc_net_remove(&init_net, "ip_vs_app");