2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
18 #include "xt_engine.h"
20 #include "xt_fblock.h"
22 struct engine_iostats
{
23 unsigned long long bytes
;
24 unsigned long long pkts
;
25 unsigned long long fblocks
;
26 } ____cacheline_aligned
;
29 struct sk_buff_head ppe_emerg_queue
;
30 struct sk_buff_head ppe_backlog_queue
;
31 } ____cacheline_aligned
;
33 static struct engine_iostats __percpu
*iostats
;
34 static struct engine_disc __percpu
*emdiscs
;
36 extern struct proc_dir_entry
*lana_proc_dir
;
37 static struct proc_dir_entry
*engine_proc
;
39 static inline void engine_inc_pkts_stats(void)
41 this_cpu_inc(iostats
->pkts
);
44 static inline void engine_inc_fblock_stats(void)
46 this_cpu_inc(iostats
->fblocks
);
49 static inline void engine_add_bytes_stats(unsigned long bytes
)
51 this_cpu_add(iostats
->bytes
, bytes
);
54 void engine_backlog_tail(struct sk_buff
*skb
, enum path_type dir
)
56 write_path_to_skb(skb
, dir
);
57 skb_queue_tail(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
), skb
);
59 EXPORT_SYMBOL(engine_backlog_tail
);
61 static inline struct sk_buff
*engine_backlog_test_reduce(enum path_type
*dir
)
63 struct sk_buff
*skb
= NULL
;
64 if ((skb
= skb_dequeue(&(this_cpu_ptr(emdiscs
)->ppe_backlog_queue
))))
65 (*dir
) = read_path_from_skb(skb
);
69 /* Main function, must be called in rcu_read_lock context */
70 int process_packet(struct sk_buff
*skb
, enum path_type dir
)
76 BUG_ON(!rcu_read_lock_held());
79 engine_inc_pkts_stats();
80 engine_add_bytes_stats(skb
->len
);
82 while ((cont
= read_next_idp_from_skb(skb
))) {
83 fb
= __search_fblock(cont
);
85 /* We free the skb since the fb doesn't exist! */
91 ret
= fb
->netfb_rx(fb
, skb
, &dir
);
92 /* The FB frees the skb or not depending on its binding
93 * and we must not touch it! */
95 engine_inc_fblock_stats();
96 if (ret
== PPE_DROPPED
) {
101 if ((skb
= engine_backlog_test_reduce(&dir
)))
105 EXPORT_SYMBOL_GPL(process_packet
);
107 static int engine_procfs(char *page
, char **start
, off_t offset
,
108 int count
, int *eof
, void *data
)
114 for_each_online_cpu(cpu
) {
115 struct engine_iostats
*iostats_cpu
;
116 struct engine_disc
*emdisc_cpu
;
117 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
118 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
119 len
+= sprintf(page
+ len
, "CPU%u:\t%llu\t%llu\t%llu\t%u\t%u\n",
120 cpu
, iostats_cpu
->pkts
, iostats_cpu
->bytes
,
121 iostats_cpu
->fblocks
,
122 skb_queue_len(&emdisc_cpu
->ppe_emerg_queue
),
123 skb_queue_len(&emdisc_cpu
->ppe_backlog_queue
));
127 /* FIXME: fits in page? */
132 int init_engine(void)
135 iostats
= alloc_percpu(struct engine_iostats
);
139 for_each_online_cpu(cpu
) {
140 struct engine_iostats
*iostats_cpu
;
141 iostats_cpu
= per_cpu_ptr(iostats
, cpu
);
142 iostats_cpu
->bytes
= 0;
143 iostats_cpu
->pkts
= 0;
144 iostats_cpu
->fblocks
= 0;
148 emdiscs
= alloc_percpu(struct engine_disc
);
152 for_each_online_cpu(cpu
) {
153 struct engine_disc
*emdisc_cpu
;
154 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
155 skb_queue_head_init(&emdisc_cpu
->ppe_emerg_queue
);
156 skb_queue_head_init(&emdisc_cpu
->ppe_backlog_queue
);
160 engine_proc
= create_proc_read_entry("ppe", 0400, lana_proc_dir
,
161 engine_procfs
, NULL
);
167 free_percpu(emdiscs
);
169 free_percpu(iostats
);
172 EXPORT_SYMBOL_GPL(init_engine
);
174 void cleanup_engine(void)
178 free_percpu(iostats
);
181 for_each_online_cpu(cpu
) {
182 struct engine_disc
*emdisc_cpu
;
183 emdisc_cpu
= per_cpu_ptr(emdiscs
, cpu
);
184 skb_queue_purge(&emdisc_cpu
->ppe_emerg_queue
);
185 skb_queue_purge(&emdisc_cpu
->ppe_backlog_queue
);
188 free_percpu(emdiscs
);
190 remove_proc_entry("ppe", lana_proc_dir
);
192 EXPORT_SYMBOL_GPL(cleanup_engine
);