Linux 2.6.33.1
[wandboard.git] / net / x25 / x25_forward.c
blob056a55f3a8719f9bc6e7ff2336b16cc5e6994292
1 /*
2 * This module:
3 * This module is free software; you can redistribute it and/or
4 * modify it under the terms of the GNU General Public License
5 * as published by the Free Software Foundation; either version
6 * 2 of the License, or (at your option) any later version.
8 * History
9 * 03-01-2007 Added forwarding for x.25 Andrew Hendry
11 #include <linux/if_arp.h>
12 #include <linux/init.h>
13 #include <net/x25.h>
15 LIST_HEAD(x25_forward_list);
16 DEFINE_RWLOCK(x25_forward_list_lock);
18 int x25_forward_call(struct x25_address *dest_addr, struct x25_neigh *from,
19 struct sk_buff *skb, int lci)
21 struct x25_route *rt;
22 struct x25_neigh *neigh_new = NULL;
23 struct list_head *entry;
24 struct x25_forward *x25_frwd, *new_frwd;
25 struct sk_buff *skbn;
26 short same_lci = 0;
27 int rc = 0;
29 if ((rt = x25_get_route(dest_addr)) == NULL)
30 goto out_no_route;
32 if ((neigh_new = x25_get_neigh(rt->dev)) == NULL) {
33 /* This shouldnt happen, if it occurs somehow
34 * do something sensible
36 goto out_put_route;
39 /* Avoid a loop. This is the normal exit path for a
40 * system with only one x.25 iface and default route
42 if (rt->dev == from->dev) {
43 goto out_put_nb;
46 /* Remote end sending a call request on an already
47 * established LCI? It shouldnt happen, just in case..
49 read_lock_bh(&x25_forward_list_lock);
50 list_for_each(entry, &x25_forward_list) {
51 x25_frwd = list_entry(entry, struct x25_forward, node);
52 if (x25_frwd->lci == lci) {
53 printk(KERN_WARNING "X.25: call request for lci which is already registered!, transmitting but not registering new pair\n");
54 same_lci = 1;
57 read_unlock_bh(&x25_forward_list_lock);
59 /* Save the forwarding details for future traffic */
60 if (!same_lci){
61 if ((new_frwd = kmalloc(sizeof(struct x25_forward),
62 GFP_ATOMIC)) == NULL){
63 rc = -ENOMEM;
64 goto out_put_nb;
66 new_frwd->lci = lci;
67 new_frwd->dev1 = rt->dev;
68 new_frwd->dev2 = from->dev;
69 write_lock_bh(&x25_forward_list_lock);
70 list_add(&new_frwd->node, &x25_forward_list);
71 write_unlock_bh(&x25_forward_list_lock);
74 /* Forward the call request */
75 if ( (skbn = skb_clone(skb, GFP_ATOMIC)) == NULL){
76 goto out_put_nb;
78 x25_transmit_link(skbn, neigh_new);
79 rc = 1;
82 out_put_nb:
83 x25_neigh_put(neigh_new);
85 out_put_route:
86 x25_route_put(rt);
88 out_no_route:
89 return rc;
93 int x25_forward_data(int lci, struct x25_neigh *from, struct sk_buff *skb) {
95 struct x25_forward *frwd;
96 struct list_head *entry;
97 struct net_device *peer = NULL;
98 struct x25_neigh *nb;
99 struct sk_buff *skbn;
100 int rc = 0;
102 read_lock_bh(&x25_forward_list_lock);
103 list_for_each(entry, &x25_forward_list) {
104 frwd = list_entry(entry, struct x25_forward, node);
105 if (frwd->lci == lci) {
106 /* The call is established, either side can send */
107 if (from->dev == frwd->dev1) {
108 peer = frwd->dev2;
109 } else {
110 peer = frwd->dev1;
112 break;
115 read_unlock_bh(&x25_forward_list_lock);
117 if ( (nb = x25_get_neigh(peer)) == NULL)
118 goto out;
120 if ( (skbn = pskb_copy(skb, GFP_ATOMIC)) == NULL){
121 goto output;
124 x25_transmit_link(skbn, nb);
126 rc = 1;
127 output:
128 x25_neigh_put(nb);
129 out:
130 return rc;
133 void x25_clear_forward_by_lci(unsigned int lci)
135 struct x25_forward *fwd;
136 struct list_head *entry, *tmp;
138 write_lock_bh(&x25_forward_list_lock);
140 list_for_each_safe(entry, tmp, &x25_forward_list) {
141 fwd = list_entry(entry, struct x25_forward, node);
142 if (fwd->lci == lci) {
143 list_del(&fwd->node);
144 kfree(fwd);
147 write_unlock_bh(&x25_forward_list_lock);
151 void x25_clear_forward_by_dev(struct net_device *dev)
153 struct x25_forward *fwd;
154 struct list_head *entry, *tmp;
156 write_lock_bh(&x25_forward_list_lock);
158 list_for_each_safe(entry, tmp, &x25_forward_list) {
159 fwd = list_entry(entry, struct x25_forward, node);
160 if ((fwd->dev1 == dev) || (fwd->dev2 == dev)){
161 list_del(&fwd->node);
162 kfree(fwd);
165 write_unlock_bh(&x25_forward_list_lock);