ethernetgmii: make design files compile (warning: takes longer than 1 hour)
[ana-net.git] / src / xt_engine.c
blobf08f926c4bde111c7031232c263bc14a0a531231
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
17 #include <linux/hrtimer.h>
18 #include <linux/interrupt.h>
20 #include "xt_engine.h"
21 #include "xt_skb.h"
22 #include "xt_fblock.h"
24 struct engine_iostats {
25 unsigned long long bytes;
26 unsigned long long pkts;
27 unsigned long long fblocks;
28 unsigned long long timer;
29 unsigned long long timer_cpu_miss;
30 } ____cacheline_aligned;
32 struct engine_disc {
33 struct sk_buff_head ppe_backlog_queue;
34 struct tasklet_hrtimer htimer;
35 int active, cpu;
36 } ____cacheline_aligned;
38 static struct engine_iostats __percpu *iostats;
39 static struct engine_disc __percpu *emdiscs;
40 extern struct proc_dir_entry *lana_proc_dir;
41 static struct proc_dir_entry *engine_proc;
43 static inline void engine_inc_pkts_stats(void)
45 this_cpu_inc(iostats->pkts);
48 static inline void engine_inc_fblock_stats(void)
50 this_cpu_inc(iostats->fblocks);
53 static inline void engine_inc_timer_stats(void)
55 this_cpu_inc(iostats->timer);
58 static inline void engine_inc_timer_cpu_miss_stats(void)
60 this_cpu_inc(iostats->timer_cpu_miss);
63 static inline void engine_add_bytes_stats(unsigned long bytes)
65 this_cpu_add(iostats->bytes, bytes);
68 void engine_backlog_tail(struct sk_buff *skb, enum path_type dir)
70 write_path_to_skb(skb, dir);
71 skb_queue_tail(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue), skb);
73 EXPORT_SYMBOL(engine_backlog_tail);
75 static inline struct sk_buff *engine_backlog_test_reduce(enum path_type *dir)
77 struct sk_buff *skb = NULL;
78 if ((skb = skb_dequeue(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue))))
79 (*dir) = read_path_from_skb(skb);
80 return skb;
83 static inline struct sk_buff *
84 engine_backlog_queue_test_reduce(enum path_type *dir, struct sk_buff_head *list)
86 struct sk_buff *skb = NULL;
87 if ((skb = skb_dequeue(list)))
88 (*dir) = read_path_from_skb(skb);
89 return skb;
92 static inline void engine_this_cpu_set_active(void)
94 this_cpu_write(emdiscs->active, 1);
97 static inline void engine_this_cpu_set_inactive(void)
99 this_cpu_write(emdiscs->active, 0);
102 static inline int engine_this_cpu_is_active(void)
104 return this_cpu_read(emdiscs->active);
107 int process_packet(struct sk_buff *skb, enum path_type dir)
109 int ret;
110 idp_t cont;
111 struct fblock *fb;
113 BUG_ON(!rcu_read_lock_held());
114 if (engine_this_cpu_is_active()) {
115 engine_backlog_tail(skb, dir);
116 return 0;
118 pkt:
119 ret = PPE_ERROR;
121 engine_this_cpu_set_active();
122 engine_inc_pkts_stats();
123 engine_add_bytes_stats(skb->len);
125 while ((cont = read_next_idp_from_skb(skb))) {
126 fb = __search_fblock(cont);
127 if (unlikely(!fb)) {
128 /* We free the skb since the fb doesn't exist! */
129 kfree_skb(skb);
130 ret = PPE_ERROR;
131 break;
134 ret = fb->netfb_rx(fb, skb, &dir);
135 /* The FB frees the skb or not depending on its binding
136 * and we must not touch it! */
137 put_fblock(fb);
138 engine_inc_fblock_stats();
139 if (ret == PPE_DROPPED) {
140 ret = PPE_DROPPED;
141 break;
145 if ((skb = engine_backlog_test_reduce(&dir)))
146 goto pkt;
148 engine_this_cpu_set_inactive();
149 return ret;
151 EXPORT_SYMBOL_GPL(process_packet);
153 static enum hrtimer_restart engine_timer_handler(struct hrtimer *self)
155 /* Note: we could end up on a different CPU */
156 enum path_type dir;
157 struct sk_buff *skb;
158 struct tasklet_hrtimer *thr = container_of(self, struct tasklet_hrtimer, timer);
159 struct engine_disc *disc = container_of(thr, struct engine_disc, htimer);
161 if (likely(ACCESS_ONCE(disc->active)))
162 goto out;
163 if (skb_queue_empty(&disc->ppe_backlog_queue))
164 goto out;
165 if (disc->cpu != smp_processor_id()) {
166 engine_inc_timer_cpu_miss_stats();
167 if (skb_queue_len(&disc->ppe_backlog_queue) <= 150)
168 goto out;
171 rcu_read_lock();
173 skb = engine_backlog_queue_test_reduce(&dir, &disc->ppe_backlog_queue);
174 BUG_ON(!skb);
175 process_packet(skb, dir);
177 rcu_read_unlock();
178 out:
179 engine_inc_timer_stats();
180 tasklet_hrtimer_start(thr, ktime_set(0, 100000000),
181 HRTIMER_MODE_REL);
182 return HRTIMER_NORESTART;
185 static int engine_procfs(char *page, char **start, off_t offset,
186 int count, int *eof, void *data)
188 unsigned int cpu;
189 off_t len = 0;
191 get_online_cpus();
192 for_each_online_cpu(cpu) {
193 struct engine_iostats *iostats_cpu;
194 struct engine_disc *emdisc_cpu;
195 iostats_cpu = per_cpu_ptr(iostats, cpu);
196 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
197 len += sprintf(page + len, "CPU%u:\t%llu\t%llu\t%llu\t%llu\t%llu\t%u\n",
198 cpu, iostats_cpu->pkts, iostats_cpu->bytes,
199 iostats_cpu->fblocks, iostats_cpu->timer,
200 iostats_cpu->timer_cpu_miss,
201 skb_queue_len(&emdisc_cpu->ppe_backlog_queue));
203 put_online_cpus();
205 /* FIXME: fits in page? */
206 *eof = 1;
207 return len;
210 int init_engine(void)
212 unsigned int cpu;
213 iostats = alloc_percpu(struct engine_iostats);
214 if (!iostats)
215 return -ENOMEM;
216 get_online_cpus();
217 for_each_online_cpu(cpu) {
218 struct engine_iostats *iostats_cpu;
219 iostats_cpu = per_cpu_ptr(iostats, cpu);
220 iostats_cpu->bytes = 0;
221 iostats_cpu->pkts = 0;
222 iostats_cpu->fblocks = 0;
223 iostats_cpu->timer = 0;
224 iostats_cpu->timer_cpu_miss = 0;
226 put_online_cpus();
228 emdiscs = alloc_percpu(struct engine_disc);
229 if (!emdiscs)
230 goto err;
231 get_online_cpus();
232 for_each_online_cpu(cpu) {
233 struct engine_disc *emdisc_cpu;
234 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
235 emdisc_cpu->active = 0;
236 emdisc_cpu->cpu = cpu;
237 skb_queue_head_init(&emdisc_cpu->ppe_backlog_queue);
238 tasklet_hrtimer_init(&emdisc_cpu->htimer,
239 engine_timer_handler,
240 CLOCK_REALTIME, HRTIMER_MODE_ABS);
241 tasklet_hrtimer_start(&emdisc_cpu->htimer,
242 ktime_set(0, 100000000),
243 HRTIMER_MODE_REL);
245 put_online_cpus();
247 engine_proc = create_proc_read_entry("ppe", 0400, lana_proc_dir,
248 engine_procfs, NULL);
249 if (!engine_proc)
250 goto err1;
252 return 0;
253 err1:
254 free_percpu(emdiscs);
255 err:
256 free_percpu(iostats);
257 return -ENOMEM;
259 EXPORT_SYMBOL_GPL(init_engine);
261 void cleanup_engine(void)
263 unsigned int cpu;
264 if (iostats)
265 free_percpu(iostats);
266 if (emdiscs) {
267 get_online_cpus();
268 for_each_online_cpu(cpu) {
269 struct engine_disc *emdisc_cpu;
270 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
271 tasklet_hrtimer_cancel(&emdisc_cpu->htimer);
272 skb_queue_purge(&emdisc_cpu->ppe_backlog_queue);
274 put_online_cpus();
275 free_percpu(emdiscs);
277 remove_proc_entry("ppe", lana_proc_dir);
279 EXPORT_SYMBOL_GPL(cleanup_engine);