2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
18 #include "xt_engine.h"
20 #include "xt_fblock.h"
22 struct engine_iostats
{
23 unsigned long long bytes
;
24 unsigned long long pkts
;
25 unsigned long long fblocks
;
26 } ____cacheline_aligned
;
29 struct sk_buff_head ppe_emerg_queue
;
30 struct sk_buff_head ppe_backlog_queue
;
31 } ____cacheline_aligned
;
33 static struct engine_iostats __percpu
*iostats
;
34 static struct engine_disc __percpu
*emdiscs
;
36 extern struct proc_dir_entry
*lana_proc_dir
;
37 static struct proc_dir_entry
*engine_proc
;
39 static inline void engine_inc_pkts_stats(void)
41 this_cpu_inc(iostats
->pkts
);
44 static inline void engine_inc_fblock_stats(void)
46 this_cpu_inc(iostats
->fblocks
);
49 static inline void engine_add_bytes_stats(unsigned long bytes
)
51 this_cpu_add(iostats
->bytes
, bytes
);
54 static inline void engine_emerg_tail(struct sk_buff
*skb
)
56 skb_queue_tail(&(this_cpu_ptr(emdiscs
)->ppe_emerg_queue
), skb
);
59 void engine_backlog_tail(struct sk_buff
*skb
, enum path_type dir
)
61 //TODO: path information
62 skb_queue_tail(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
), skb
);
64 EXPORT_SYMBOL(engine_backlog_tail
);
66 static inline struct sk_buff
*engine_emerg_test_reduce(void)
68 return skb_dequeue(&(this_cpu_ptr(emdiscs
)->ppe_emerg_queue
));
71 static inline struct sk_buff
*engine_backlog_test_reduce(void)
73 return skb_dequeue(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
));
76 /* TODO: handle emergency queue, or backlog
77 * idea: mark with jiffies where we definately expect the blog to be
78 * present again, peek the skbs, test for jiffies and unlink conditionally
79 * if after certain periods the fblock is still missing, drop the skb
82 /* Main function, must be called in rcu_read_lock context */
83 int process_packet(struct sk_buff
*skb
, enum path_type dir
)
89 BUG_ON(!rcu_read_lock_held());
91 engine_inc_pkts_stats();
92 engine_add_bytes_stats(skb
->len
);
94 while ((cont
= read_next_idp_from_skb(skb
))) {
95 fb
= __search_fblock(cont
);
101 ret
= fb
->netfb_rx(fb
, skb
, &dir
);
103 engine_inc_fblock_stats();
104 if (ret
== PPE_DROPPED
) {
112 EXPORT_SYMBOL_GPL(process_packet
);
114 static int engine_procfs(char *page
, char **start
, off_t offset
,
115 int count
, int *eof
, void *data
)
121 for_each_online_cpu(cpu
) {
122 struct engine_iostats
*iostats_cpu
;
123 struct engine_disc
*emdisc_cpu
;
124 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
125 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
126 len
+= sprintf(page
+ len
, "CPU%u:\t%llu\t%llu\t%llu\t%u\t%u\n",
127 cpu
, iostats_cpu
->pkts
, iostats_cpu
->bytes
,
128 iostats_cpu
->fblocks
,
129 skb_queue_len(&emdisc_cpu
->ppe_emerg_queue
),
130 skb_queue_len(&emdisc_cpu
->ppe_backlog_queue
));
134 /* FIXME: fits in page? */
139 int init_engine(void)
142 iostats
= alloc_percpu(struct engine_iostats
);
146 for_each_online_cpu(cpu
) {
147 struct engine_iostats
*iostats_cpu
;
148 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
149 iostats_cpu
->bytes
= 0;
150 iostats_cpu
->pkts
= 0;
151 iostats_cpu
->fblocks
= 0;
155 emdiscs
= alloc_percpu(struct engine_disc
);
159 for_each_online_cpu(cpu
) {
160 struct engine_disc
*emdisc_cpu
;
161 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
162 skb_queue_head_init(&emdisc_cpu
->ppe_emerg_queue
);
163 skb_queue_head_init(&emdisc_cpu
->ppe_backlog_queue
);
167 engine_proc
= create_proc_read_entry("ppe", 0400, lana_proc_dir
,
168 engine_procfs
, NULL
);
174 free_percpu(emdiscs
);
176 free_percpu(iostats
);
179 EXPORT_SYMBOL_GPL(init_engine
);
181 void cleanup_engine(void)
185 free_percpu(iostats
);
188 for_each_online_cpu(cpu
) {
189 struct engine_disc
*emdisc_cpu
;
190 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
191 skb_queue_purge(&emdisc_cpu
->ppe_emerg_queue
);
192 skb_queue_purge(&emdisc_cpu
->ppe_backlog_queue
);
195 free_percpu(emdiscs
);
197 remove_proc_entry("ppe", lana_proc_dir
);
199 EXPORT_SYMBOL_GPL(cleanup_engine
);