2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
17 #include <linux/hrtimer.h>
18 #include <linux/interrupt.h>
20 #include "xt_engine.h"
22 #include "xt_fblock.h"
24 struct engine_iostats
{
25 unsigned long long bytes
;
26 unsigned long long pkts
;
27 unsigned long long fblocks
;
28 unsigned long long timer
;
29 unsigned long long timer_cpu_miss
;
30 } ____cacheline_aligned
;
33 struct sk_buff_head ppe_backlog_queue
;
34 struct tasklet_hrtimer htimer
;
36 } ____cacheline_aligned
;
38 static struct engine_iostats __percpu
*iostats
;
39 static struct engine_disc __percpu
*emdiscs
;
40 extern struct proc_dir_entry
*lana_proc_dir
;
41 static struct proc_dir_entry
*engine_proc
;
43 static inline void engine_inc_pkts_stats(void)
45 this_cpu_inc(iostats
->pkts
);
48 static inline void engine_inc_fblock_stats(void)
50 this_cpu_inc(iostats
->fblocks
);
53 static inline void engine_inc_timer_stats(void)
55 this_cpu_inc(iostats
->timer
);
58 static inline void engine_inc_timer_cpu_miss_stats(void)
60 this_cpu_inc(iostats
->timer_cpu_miss
);
63 static inline void engine_add_bytes_stats(unsigned long bytes
)
65 this_cpu_add(iostats
->bytes
, bytes
);
68 void engine_backlog_tail(struct sk_buff
*skb
, enum path_type dir
)
70 write_path_to_skb(skb
, dir
);
71 skb_queue_tail(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
), skb
);
73 EXPORT_SYMBOL(engine_backlog_tail
);
75 static inline struct sk_buff
*engine_backlog_test_reduce(enum path_type
*dir
)
77 struct sk_buff
*skb
= NULL
;
78 if ((skb
= skb_dequeue(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
))))
79 (*dir
) = read_path_from_skb(skb
);
83 static inline struct sk_buff
*
84 engine_backlog_queue_test_reduce(enum path_type
*dir
, struct sk_buff_head
*list
)
86 struct sk_buff
*skb
= NULL
;
87 if ((skb
= skb_dequeue(list
)))
88 (*dir
) = read_path_from_skb(skb
);
92 static inline void engine_this_cpu_set_active(void)
94 this_cpu_write(emdiscs
->active
, 1);
97 static inline void engine_this_cpu_set_inactive(void)
99 this_cpu_write(emdiscs
->active
, 0);
102 static inline int engine_this_cpu_is_active(void)
104 return this_cpu_read(emdiscs
->active
);
107 int process_packet(struct sk_buff
*skb
, enum path_type dir
)
113 BUG_ON(!rcu_read_lock_held());
114 if (engine_this_cpu_is_active()) {
115 engine_backlog_tail(skb
, dir
);
121 engine_this_cpu_set_active();
122 engine_inc_pkts_stats();
123 engine_add_bytes_stats(skb
->len
);
125 while ((cont
= read_next_idp_from_skb(skb
))) {
126 fb
= __search_fblock(cont
);
128 /* We free the skb since the fb doesn't exist! */
134 ret
= fb
->netfb_rx(fb
, skb
, &dir
);
135 /* The FB frees the skb or not depending on its binding
136 * and we must not touch it! */
138 engine_inc_fblock_stats();
139 if (ret
== PPE_DROPPED
) {
145 if ((skb
= engine_backlog_test_reduce(&dir
)))
148 engine_this_cpu_set_inactive();
151 EXPORT_SYMBOL_GPL(process_packet
);
153 static enum hrtimer_restart
engine_timer_handler(struct hrtimer
*self
)
155 /* Note: we could end up on a different CPU */
158 struct tasklet_hrtimer
*thr
= container_of(self
, struct tasklet_hrtimer
, timer
);
159 struct engine_disc
*disc
= container_of(thr
, struct engine_disc
, htimer
);
161 if (likely(ACCESS_ONCE(disc
->active
)))
163 if (skb_queue_empty(&disc
->ppe_backlog_queue
))
165 if (disc
->cpu
!= smp_processor_id()) {
166 engine_inc_timer_cpu_miss_stats();
167 if (skb_queue_len(&disc
->ppe_backlog_queue
) <= 150)
173 skb
= engine_backlog_queue_test_reduce(&dir
, &disc
->ppe_backlog_queue
);
175 process_packet(skb
, dir
);
179 engine_inc_timer_stats();
180 tasklet_hrtimer_start(thr
, ktime_set(0, 100000000),
182 return HRTIMER_NORESTART
;
185 static int engine_procfs(char *page
, char **start
, off_t offset
,
186 int count
, int *eof
, void *data
)
192 for_each_online_cpu(cpu
) {
193 struct engine_iostats
*iostats_cpu
;
194 struct engine_disc
*emdisc_cpu
;
195 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
196 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
197 len
+= sprintf(page
+ len
, "CPU%u:\t%llu\t%llu\t%llu\t%llu\t%llu\t%u\n",
198 cpu
, iostats_cpu
->pkts
, iostats_cpu
->bytes
,
199 iostats_cpu
->fblocks
, iostats_cpu
->timer
,
200 iostats_cpu
->timer_cpu_miss
,
201 skb_queue_len(&emdisc_cpu
->ppe_backlog_queue
));
205 /* FIXME: fits in page? */
210 int init_engine(void)
213 iostats
= alloc_percpu(struct engine_iostats
);
217 for_each_online_cpu(cpu
) {
218 struct engine_iostats
*iostats_cpu
;
219 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
220 iostats_cpu
->bytes
= 0;
221 iostats_cpu
->pkts
= 0;
222 iostats_cpu
->fblocks
= 0;
223 iostats_cpu
->timer
= 0;
224 iostats_cpu
->timer_cpu_miss
= 0;
228 emdiscs
= alloc_percpu(struct engine_disc
);
232 for_each_online_cpu(cpu
) {
233 struct engine_disc
*emdisc_cpu
;
234 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
235 emdisc_cpu
->active
= 0;
236 emdisc_cpu
->cpu
= cpu
;
237 skb_queue_head_init(&emdisc_cpu
->ppe_backlog_queue
);
238 tasklet_hrtimer_init(&emdisc_cpu
->htimer
,
239 engine_timer_handler
,
240 CLOCK_REALTIME
, HRTIMER_MODE_ABS
);
241 tasklet_hrtimer_start(&emdisc_cpu
->htimer
,
242 ktime_set(0, 100000000),
247 engine_proc
= create_proc_read_entry("ppe", 0400, lana_proc_dir
,
248 engine_procfs
, NULL
);
254 free_percpu(emdiscs
);
256 free_percpu(iostats
);
259 EXPORT_SYMBOL_GPL(init_engine
);
261 void cleanup_engine(void)
265 free_percpu(iostats
);
268 for_each_online_cpu(cpu
) {
269 struct engine_disc
*emdisc_cpu
;
270 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
271 tasklet_hrtimer_cancel(&emdisc_cpu
->htimer
);
272 skb_queue_purge(&emdisc_cpu
->ppe_backlog_queue
);
275 free_percpu(emdiscs
);
277 remove_proc_entry("ppe", lana_proc_dir
);
279 EXPORT_SYMBOL_GPL(cleanup_engine
);