2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
17 #include <linux/hrtimer.h>
18 #include <linux/interrupt.h>
20 #include "xt_engine.h"
22 #include "xt_fblock.h"
24 struct engine_iostats
{
25 unsigned long long bytes
;
26 unsigned long long pkts
;
27 unsigned long long fblocks
;
28 unsigned long long timer
;
29 unsigned long long timer_cpu_miss
;
30 } ____cacheline_aligned
;
33 struct sk_buff_head ppe_backlog_queue
;
34 struct tasklet_hrtimer htimer
;
36 } ____cacheline_aligned
;
38 static struct engine_iostats __percpu
*iostats
;
39 static struct engine_disc __percpu
*emdiscs
;
40 extern struct proc_dir_entry
*lana_proc_dir
;
41 static struct proc_dir_entry
*engine_proc
;
43 static inline void engine_inc_pkts_stats(void)
45 this_cpu_inc(iostats
->pkts
);
48 static inline void engine_inc_fblock_stats(void)
50 this_cpu_inc(iostats
->fblocks
);
53 static inline void engine_inc_timer_stats(void)
55 this_cpu_inc(iostats
->timer
);
58 static inline void engine_inc_timer_cpu_miss_stats(void)
60 this_cpu_inc(iostats
->timer_cpu_miss
);
63 static inline void engine_add_bytes_stats(unsigned long bytes
)
65 this_cpu_add(iostats
->bytes
, bytes
);
68 void engine_backlog_tail(struct sk_buff
*skb
, enum path_type dir
)
70 write_path_to_skb(skb
, dir
);
71 skb_queue_tail(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
), skb
);
73 EXPORT_SYMBOL(engine_backlog_tail
);
75 static inline struct sk_buff
*engine_backlog_test_reduce(enum path_type
*dir
)
77 struct sk_buff
*skb
= NULL
;
78 if ((skb
= skb_dequeue(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
))))
79 (*dir
) = read_path_from_skb(skb
);
83 static inline struct sk_buff
*engine_backlog_queue_test_reduce(enum path_type
*dir
,
84 struct sk_buff_head
*list
)
86 struct sk_buff
*skb
= NULL
;
87 if ((skb
= skb_dequeue(list
)))
88 (*dir
) = read_path_from_skb(skb
);
92 static inline void engine_this_cpu_set_active(void)
94 this_cpu_write(emdiscs
->active
, 1);
97 static inline void engine_this_cpu_set_inactive(void)
99 this_cpu_write(emdiscs
->active
, 0);
102 static inline int engine_this_cpu_is_active(void)
104 return this_cpu_read(emdiscs
->active
);
107 /* Main function, must be called in rcu_read_lock context */
108 int process_packet(struct sk_buff
*skb
, enum path_type dir
)
114 BUG_ON(!rcu_read_lock_held());
116 if (engine_this_cpu_is_active()) {
117 engine_backlog_tail(skb
, dir
);
123 engine_this_cpu_set_active();
125 engine_inc_pkts_stats();
126 engine_add_bytes_stats(skb
->len
);
128 while ((cont
= read_next_idp_from_skb(skb
))) {
129 fb
= __search_fblock(cont
);
131 /* We free the skb since the fb doesn't exist! */
137 ret
= fb
->netfb_rx(fb
, skb
, &dir
);
138 /* The FB frees the skb or not depending on its binding
139 * and we must not touch it! */
141 engine_inc_fblock_stats();
142 if (ret
== PPE_DROPPED
) {
147 if ((skb
= engine_backlog_test_reduce(&dir
)))
150 engine_this_cpu_set_inactive();
153 EXPORT_SYMBOL_GPL(process_packet
);
155 static enum hrtimer_restart
engine_timer_handler(struct hrtimer
*self
)
157 /* Note: we could end up on a different CPU */
160 struct tasklet_hrtimer
*thr
= container_of(self
, struct tasklet_hrtimer
, timer
);
161 struct engine_disc
*disc
= container_of(thr
, struct engine_disc
, htimer
);
163 if (likely(ACCESS_ONCE(disc
->active
)))
165 if (skb_queue_empty(&disc
->ppe_backlog_queue
))
167 if (disc
->cpu
!= smp_processor_id()) {
168 engine_inc_timer_cpu_miss_stats();
169 if (skb_queue_len(&disc
->ppe_backlog_queue
) <= 150)
175 skb
= engine_backlog_queue_test_reduce(&dir
, &disc
->ppe_backlog_queue
);
177 process_packet(skb
, dir
);
181 engine_inc_timer_stats();
182 tasklet_hrtimer_start(thr
, ktime_set(0, 50000000),
184 return HRTIMER_NORESTART
;
187 static int engine_procfs(char *page
, char **start
, off_t offset
,
188 int count
, int *eof
, void *data
)
194 for_each_online_cpu(cpu
) {
195 struct engine_iostats
*iostats_cpu
;
196 struct engine_disc
*emdisc_cpu
;
197 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
198 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
199 len
+= sprintf(page
+ len
, "CPU%u:\t%llu\t%llu\t%llu\t%llu\t%llu\t%u\n",
200 cpu
, iostats_cpu
->pkts
, iostats_cpu
->bytes
,
201 iostats_cpu
->fblocks
, iostats_cpu
->timer
,
202 iostats_cpu
->timer_cpu_miss
,
203 skb_queue_len(&emdisc_cpu
->ppe_backlog_queue
));
207 /* FIXME: fits in page? */
212 int init_engine(void)
215 iostats
= alloc_percpu(struct engine_iostats
);
219 for_each_online_cpu(cpu
) {
220 struct engine_iostats
*iostats_cpu
;
221 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
222 iostats_cpu
->bytes
= 0;
223 iostats_cpu
->pkts
= 0;
224 iostats_cpu
->fblocks
= 0;
225 iostats_cpu
->timer
= 0;
226 iostats_cpu
->timer_cpu_miss
= 0;
230 emdiscs
= alloc_percpu(struct engine_disc
);
234 for_each_online_cpu(cpu
) {
235 struct engine_disc
*emdisc_cpu
;
236 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
237 emdisc_cpu
->active
= 0;
238 emdisc_cpu
->cpu
= cpu
;
239 skb_queue_head_init(&emdisc_cpu
->ppe_backlog_queue
);
240 tasklet_hrtimer_init(&emdisc_cpu
->htimer
,
241 engine_timer_handler
,
242 CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
243 tasklet_hrtimer_start(&emdisc_cpu
->htimer
,
244 ktime_set(0, 50000000),
249 engine_proc
= create_proc_read_entry("ppe", 0400, lana_proc_dir
,
250 engine_procfs
, NULL
);
256 free_percpu(emdiscs
);
258 free_percpu(iostats
);
261 EXPORT_SYMBOL_GPL(init_engine
);
263 void cleanup_engine(void)
267 free_percpu(iostats
);
270 for_each_online_cpu(cpu
) {
271 struct engine_disc
*emdisc_cpu
;
272 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
273 tasklet_hrtimer_cancel(&emdisc_cpu
->htimer
);
274 skb_queue_purge(&emdisc_cpu
->ppe_backlog_queue
);
277 free_percpu(emdiscs
);
279 remove_proc_entry("ppe", lana_proc_dir
);
281 EXPORT_SYMBOL_GPL(cleanup_engine
);