replace same ds with generic one
[ana-net.git] / src / xt_engine.c
bloba07e2d651f51f6cf9b9d99da74d4dcac1e9cf4c2
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
18 #include "xt_engine.h"
19 #include "xt_skb.h"
20 #include "xt_fblock.h"
22 struct engine_iostats {
23 unsigned long long bytes;
24 unsigned long long pkts;
25 unsigned long long fblocks;
26 } ____cacheline_aligned;
28 struct engine_disc {
29 struct sk_buff_head ppe_emerg_queue;
30 struct sk_buff_head ppe_backlog_queue;
31 } ____cacheline_aligned;
33 static struct engine_iostats __percpu *iostats;
34 static struct engine_disc __percpu *emdiscs;
36 extern struct proc_dir_entry *lana_proc_dir;
37 static struct proc_dir_entry *engine_proc;
39 static inline void engine_inc_pkts_stats(void)
41 this_cpu_inc(iostats->pkts);
44 static inline void engine_inc_fblock_stats(void)
46 this_cpu_inc(iostats->fblocks);
49 static inline void engine_add_bytes_stats(unsigned long bytes)
51 this_cpu_add(iostats->bytes, bytes);
54 static inline void engine_emerg_tail(struct sk_buff *skb)
56 skb_queue_tail(&(this_cpu_ptr(emdiscs)->ppe_emerg_queue), skb);
59 void engine_backlog_tail(struct sk_buff *skb, enum path_type dir)
61 //TODO: path information
62 skb_queue_tail(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue), skb);
64 EXPORT_SYMBOL(engine_backlog_tail);
66 static inline struct sk_buff *engine_emerg_test_reduce(void)
68 return skb_dequeue(&(this_cpu_ptr(emdiscs)->ppe_emerg_queue));
71 static inline struct sk_buff *engine_backlog_test_reduce(void)
73 return skb_dequeue(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue));
76 /* TODO: handle emergency queue, or backlog
77 * idea: mark with jiffies where we definately expect the blog to be
78 * present again, peek the skbs, test for jiffies and unlink conditionally
79 * if after certain periods the fblock is still missing, drop the skb
82 /* Main function, must be called in rcu_read_lock context */
83 int process_packet(struct sk_buff *skb, enum path_type dir)
85 int ret = PPE_ERROR;
86 idp_t cont;
87 struct fblock *fb;
89 BUG_ON(!rcu_read_lock_held());
91 engine_inc_pkts_stats();
92 engine_add_bytes_stats(skb->len);
94 while ((cont = read_next_idp_from_skb(skb))) {
95 fb = __search_fblock(cont);
96 if (unlikely(!fb)) {
97 ret = PPE_ERROR;
98 break;
101 ret = fb->netfb_rx(fb, skb, &dir);
102 put_fblock(fb);
103 engine_inc_fblock_stats();
104 if (ret == PPE_DROPPED) {
105 ret = PPE_DROPPED;
106 break;
110 return ret;
112 EXPORT_SYMBOL_GPL(process_packet);
114 static int engine_procfs(char *page, char **start, off_t offset,
115 int count, int *eof, void *data)
117 unsigned int cpu;
118 off_t len = 0;
120 get_online_cpus();
121 for_each_online_cpu(cpu) {
122 struct engine_iostats *iostats_cpu;
123 struct engine_disc *emdisc_cpu;
124 iostats_cpu = per_cpu_ptr(iostats, cpu);
125 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
126 len += sprintf(page + len, "CPU%u:\t%llu\t%llu\t%llu\t%u\t%u\n",
127 cpu, iostats_cpu->pkts, iostats_cpu->bytes,
128 iostats_cpu->fblocks,
129 skb_queue_len(&emdisc_cpu->ppe_emerg_queue),
130 skb_queue_len(&emdisc_cpu->ppe_backlog_queue));
132 put_online_cpus();
134 /* FIXME: fits in page? */
135 *eof = 1;
136 return len;
139 int init_engine(void)
141 unsigned int cpu;
142 iostats = alloc_percpu(struct engine_iostats);
143 if (!iostats)
144 return -ENOMEM;
145 get_online_cpus();
146 for_each_online_cpu(cpu) {
147 struct engine_iostats *iostats_cpu;
148 iostats_cpu = per_cpu_ptr(iostats, cpu);
149 iostats_cpu->bytes = 0;
150 iostats_cpu->pkts = 0;
151 iostats_cpu->fblocks = 0;
153 put_online_cpus();
155 emdiscs = alloc_percpu(struct engine_disc);
156 if (!emdiscs)
157 goto err;
158 get_online_cpus();
159 for_each_online_cpu(cpu) {
160 struct engine_disc *emdisc_cpu;
161 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
162 skb_queue_head_init(&emdisc_cpu->ppe_emerg_queue);
163 skb_queue_head_init(&emdisc_cpu->ppe_backlog_queue);
165 put_online_cpus();
167 engine_proc = create_proc_read_entry("ppe", 0400, lana_proc_dir,
168 engine_procfs, NULL);
169 if (!engine_proc)
170 goto err1;
172 return 0;
173 err1:
174 free_percpu(emdiscs);
175 err:
176 free_percpu(iostats);
177 return -ENOMEM;
179 EXPORT_SYMBOL_GPL(init_engine);
181 void cleanup_engine(void)
183 unsigned int cpu;
184 if (iostats)
185 free_percpu(iostats);
186 if (emdiscs) {
187 get_online_cpus();
188 for_each_online_cpu(cpu) {
189 struct engine_disc *emdisc_cpu;
190 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
191 skb_queue_purge(&emdisc_cpu->ppe_emerg_queue);
192 skb_queue_purge(&emdisc_cpu->ppe_backlog_queue);
194 put_online_cpus();
195 free_percpu(emdiscs);
197 remove_proc_entry("ppe", lana_proc_dir);
199 EXPORT_SYMBOL_GPL(cleanup_engine);