[POWERPC] Fix performance regression in IRQ radix tree locking
[linux-2.6.git] / include / net / pkt_act.h
blobcf5e4d2e4c21ce77be2a4f7a8f5fd111d651e469
1 #ifndef __NET_PKT_ACT_H
2 #define __NET_PKT_ACT_H
4 #include <asm/uaccess.h>
5 #include <asm/system.h>
6 #include <linux/bitops.h>
7 #include <linux/types.h>
8 #include <linux/kernel.h>
9 #include <linux/sched.h>
10 #include <linux/string.h>
11 #include <linux/mm.h>
12 #include <linux/socket.h>
13 #include <linux/sockios.h>
14 #include <linux/in.h>
15 #include <linux/errno.h>
16 #include <linux/interrupt.h>
17 #include <linux/skbuff.h>
18 #include <linux/rtnetlink.h>
19 #include <linux/module.h>
20 #include <linux/init.h>
21 #include <linux/proc_fs.h>
22 #include <net/sock.h>
23 #include <net/pkt_sched.h>
25 #define tca_st(val) (struct tcf_##val *)
26 #define PRIV(a,name) ( tca_st(name) (a)->priv)
28 #if 0 /* control */
29 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
30 #else
31 #define DPRINTK(format,args...)
32 #endif
34 #if 0 /* data */
35 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
36 #else
37 #define D2PRINTK(format,args...)
38 #endif
40 static __inline__ unsigned
41 tcf_hash(u32 index)
43 return index & MY_TAB_MASK;
46 /* probably move this from being inline
47 * and put into act_generic
49 static inline void
50 tcf_hash_destroy(struct tcf_st *p)
52 unsigned h = tcf_hash(p->index);
53 struct tcf_st **p1p;
55 for (p1p = &tcf_ht[h]; *p1p; p1p = &(*p1p)->next) {
56 if (*p1p == p) {
57 write_lock_bh(&tcf_t_lock);
58 *p1p = p->next;
59 write_unlock_bh(&tcf_t_lock);
60 #ifdef CONFIG_NET_ESTIMATOR
61 gen_kill_estimator(&p->bstats, &p->rate_est);
62 #endif
63 kfree(p);
64 return;
67 BUG_TRAP(0);
70 static inline int
71 tcf_hash_release(struct tcf_st *p, int bind )
73 int ret = 0;
74 if (p) {
75 if (bind) {
76 p->bindcnt--;
78 p->refcnt--;
79 if(p->bindcnt <=0 && p->refcnt <= 0) {
80 tcf_hash_destroy(p);
81 ret = 1;
84 return ret;
87 static __inline__ int
88 tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
89 struct tc_action *a)
91 struct tcf_st *p;
92 int err =0, index = -1,i= 0, s_i = 0, n_i = 0;
93 struct rtattr *r ;
95 read_lock(&tcf_t_lock);
97 s_i = cb->args[0];
99 for (i = 0; i < MY_TAB_SIZE; i++) {
100 p = tcf_ht[tcf_hash(i)];
102 for (; p; p = p->next) {
103 index++;
104 if (index < s_i)
105 continue;
106 a->priv = p;
107 a->order = n_i;
108 r = (struct rtattr*) skb->tail;
109 RTA_PUT(skb, a->order, 0, NULL);
110 err = tcf_action_dump_1(skb, a, 0, 0);
111 if (0 > err) {
112 index--;
113 skb_trim(skb, (u8*)r - skb->data);
114 goto done;
116 r->rta_len = skb->tail - (u8*)r;
117 n_i++;
118 if (n_i >= TCA_ACT_MAX_PRIO) {
119 goto done;
123 done:
124 read_unlock(&tcf_t_lock);
125 if (n_i)
126 cb->args[0] += n_i;
127 return n_i;
129 rtattr_failure:
130 skb_trim(skb, (u8*)r - skb->data);
131 goto done;
134 static __inline__ int
135 tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
137 struct tcf_st *p, *s_p;
138 struct rtattr *r ;
139 int i= 0, n_i = 0;
141 r = (struct rtattr*) skb->tail;
142 RTA_PUT(skb, a->order, 0, NULL);
143 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, a->ops->kind);
144 for (i = 0; i < MY_TAB_SIZE; i++) {
145 p = tcf_ht[tcf_hash(i)];
147 while (p != NULL) {
148 s_p = p->next;
149 if (ACT_P_DELETED == tcf_hash_release(p, 0)) {
150 module_put(a->ops->owner);
152 n_i++;
153 p = s_p;
156 RTA_PUT(skb, TCA_FCNT, 4, &n_i);
157 r->rta_len = skb->tail - (u8*)r;
159 return n_i;
160 rtattr_failure:
161 skb_trim(skb, (u8*)r - skb->data);
162 return -EINVAL;
165 static __inline__ int
166 tcf_generic_walker(struct sk_buff *skb, struct netlink_callback *cb, int type,
167 struct tc_action *a)
169 if (type == RTM_DELACTION) {
170 return tcf_del_walker(skb,a);
171 } else if (type == RTM_GETACTION) {
172 return tcf_dump_walker(skb,cb,a);
173 } else {
174 printk("tcf_generic_walker: unknown action %d\n",type);
175 return -EINVAL;
179 static __inline__ struct tcf_st *
180 tcf_hash_lookup(u32 index)
182 struct tcf_st *p;
184 read_lock(&tcf_t_lock);
185 for (p = tcf_ht[tcf_hash(index)]; p; p = p->next) {
186 if (p->index == index)
187 break;
189 read_unlock(&tcf_t_lock);
190 return p;
193 static __inline__ u32
194 tcf_hash_new_index(void)
196 do {
197 if (++idx_gen == 0)
198 idx_gen = 1;
199 } while (tcf_hash_lookup(idx_gen));
201 return idx_gen;
205 static inline int
206 tcf_hash_search(struct tc_action *a, u32 index)
208 struct tcf_st *p = tcf_hash_lookup(index);
210 if (p != NULL) {
211 a->priv = p;
212 return 1;
214 return 0;
217 #ifdef CONFIG_NET_ACT_INIT
218 static inline struct tcf_st *
219 tcf_hash_check(u32 index, struct tc_action *a, int ovr, int bind)
221 struct tcf_st *p = NULL;
222 if (index && (p = tcf_hash_lookup(index)) != NULL) {
223 if (bind) {
224 p->bindcnt++;
225 p->refcnt++;
227 a->priv = p;
229 return p;
232 static inline struct tcf_st *
233 tcf_hash_create(u32 index, struct rtattr *est, struct tc_action *a, int size, int ovr, int bind)
235 struct tcf_st *p = NULL;
237 p = kmalloc(size, GFP_KERNEL);
238 if (p == NULL)
239 return p;
241 memset(p, 0, size);
242 p->refcnt = 1;
244 if (bind) {
245 p->bindcnt = 1;
248 spin_lock_init(&p->lock);
249 p->stats_lock = &p->lock;
250 p->index = index ? : tcf_hash_new_index();
251 p->tm.install = jiffies;
252 p->tm.lastuse = jiffies;
253 #ifdef CONFIG_NET_ESTIMATOR
254 if (est)
255 gen_new_estimator(&p->bstats, &p->rate_est, p->stats_lock, est);
256 #endif
257 a->priv = (void *) p;
258 return p;
261 static inline void tcf_hash_insert(struct tcf_st *p)
263 unsigned h = tcf_hash(p->index);
265 write_lock_bh(&tcf_t_lock);
266 p->next = tcf_ht[h];
267 tcf_ht[h] = p;
268 write_unlock_bh(&tcf_t_lock);
271 #endif
273 #endif