backup ideas
[ana-net.git] / src / xt_engine.c
blobe7644b43c1899b99e926c7c7871b377b85427039
1 /*
2 * Lightweight Autonomic Network Architecture
4 * LANA packet processing engines (ppe).
6 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
7 * Swiss federal institute of technology (ETH Zurich)
8 * Subject to the GPL.
9 */
11 #include <linux/kernel.h>
12 #include <linux/skbuff.h>
13 #include <linux/percpu.h>
14 #include <linux/cache.h>
15 #include <linux/proc_fs.h>
16 #include <linux/rcupdate.h>
18 #include "xt_engine.h"
19 #include "xt_skb.h"
20 #include "xt_fblock.h"
22 struct engine_iostats {
23 unsigned long long bytes;
24 unsigned long long pkts;
25 unsigned long long fblocks;
26 } ____cacheline_aligned;
28 struct engine_disc {
29 struct sk_buff_head ppe_emerg_queue;
30 struct sk_buff_head ppe_backlog_queue;
31 } ____cacheline_aligned;
33 static struct engine_iostats __percpu *iostats;
34 static struct engine_disc __percpu *emdiscs;
36 extern struct proc_dir_entry *lana_proc_dir;
37 static struct proc_dir_entry *engine_proc;
39 static inline void engine_inc_pkts_stats(void)
41 this_cpu_inc(iostats->pkts);
44 static inline void engine_inc_fblock_stats(void)
46 this_cpu_inc(iostats->fblocks);
49 static inline void engine_add_bytes_stats(unsigned long bytes)
51 this_cpu_add(iostats->bytes, bytes);
54 void engine_backlog_tail(struct sk_buff *skb, enum path_type dir)
56 write_path_to_skb(skb, dir);
57 skb_queue_tail(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue), skb);
59 EXPORT_SYMBOL(engine_backlog_tail);
61 static inline struct sk_buff *engine_backlog_test_reduce(enum path_type *dir)
63 struct sk_buff *skb = NULL;
64 if ((skb = skb_dequeue(&(this_cpu_ptr(emdiscs)->ppe_backlog_queue))))
65 (*dir) = read_path_from_skb(skb);
66 return skb;
69 /* Main function, must be called in rcu_read_lock context */
70 int process_packet(struct sk_buff *skb, enum path_type dir)
72 int ret;
73 idp_t cont;
74 struct fblock *fb;
76 BUG_ON(!rcu_read_lock_held());
77 pkt:
78 ret = PPE_ERROR;
79 engine_inc_pkts_stats();
80 engine_add_bytes_stats(skb->len);
82 while ((cont = read_next_idp_from_skb(skb))) {
83 fb = __search_fblock(cont);
84 if (unlikely(!fb)) {
85 /* We free the skb since the fb doesn't exist! */
86 kfree_skb(skb);
87 ret = PPE_ERROR;
88 break;
91 ret = fb->netfb_rx(fb, skb, &dir);
92 /* The FB frees the skb or not depending on its binding
93 * and we must not touch it! */
94 put_fblock(fb);
95 engine_inc_fblock_stats();
96 if (ret == PPE_DROPPED) {
97 ret = PPE_DROPPED;
98 break;
101 if ((skb = engine_backlog_test_reduce(&dir)))
102 goto pkt;
103 return ret;
105 EXPORT_SYMBOL_GPL(process_packet);
107 static int engine_procfs(char *page, char **start, off_t offset,
108 int count, int *eof, void *data)
110 unsigned int cpu;
111 off_t len = 0;
113 get_online_cpus();
114 for_each_online_cpu(cpu) {
115 struct engine_iostats *iostats_cpu;
116 struct engine_disc *emdisc_cpu;
117 iostats_cpu = per_cpu_ptr(iostats, cpu);
118 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
119 len += sprintf(page + len, "CPU%u:\t%llu\t%llu\t%llu\t%u\t%u\n",
120 cpu, iostats_cpu->pkts, iostats_cpu->bytes,
121 iostats_cpu->fblocks,
122 skb_queue_len(&emdisc_cpu->ppe_emerg_queue),
123 skb_queue_len(&emdisc_cpu->ppe_backlog_queue));
125 put_online_cpus();
127 /* FIXME: fits in page? */
128 *eof = 1;
129 return len;
132 int init_engine(void)
134 unsigned int cpu;
135 iostats = alloc_percpu(struct engine_iostats);
136 if (!iostats)
137 return -ENOMEM;
138 get_online_cpus();
139 for_each_online_cpu(cpu) {
140 struct engine_iostats *iostats_cpu;
141 iostats_cpu = per_cpu_ptr(iostats, cpu);
142 iostats_cpu->bytes = 0;
143 iostats_cpu->pkts = 0;
144 iostats_cpu->fblocks = 0;
146 put_online_cpus();
148 emdiscs = alloc_percpu(struct engine_disc);
149 if (!emdiscs)
150 goto err;
151 get_online_cpus();
152 for_each_online_cpu(cpu) {
153 struct engine_disc *emdisc_cpu;
154 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
155 skb_queue_head_init(&emdisc_cpu->ppe_emerg_queue);
156 skb_queue_head_init(&emdisc_cpu->ppe_backlog_queue);
158 put_online_cpus();
160 engine_proc = create_proc_read_entry("ppe", 0400, lana_proc_dir,
161 engine_procfs, NULL);
162 if (!engine_proc)
163 goto err1;
165 return 0;
166 err1:
167 free_percpu(emdiscs);
168 err:
169 free_percpu(iostats);
170 return -ENOMEM;
172 EXPORT_SYMBOL_GPL(init_engine);
174 void cleanup_engine(void)
176 unsigned int cpu;
177 if (iostats)
178 free_percpu(iostats);
179 if (emdiscs) {
180 get_online_cpus();
181 for_each_online_cpu(cpu) {
182 struct engine_disc *emdisc_cpu;
183 emdisc_cpu = per_cpu_ptr(emdiscs, cpu);
184 skb_queue_purge(&emdisc_cpu->ppe_emerg_queue);
185 skb_queue_purge(&emdisc_cpu->ppe_backlog_queue);
187 put_online_cpus();
188 free_percpu(emdiscs);
190 remove_proc_entry("ppe", lana_proc_dir);
192 EXPORT_SYMBOL_GPL(cleanup_engine);