update on engines
[ana-net.git] / src / xt_engine.h
blob4961c7f68bfe6532e567b878772a25f999c26419
1 /*
2 * Lightweight Autonomic Network Architecture
4 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
5 * Swiss federal institute of technology (ETH Zurich)
6 * Subject to the GPL.
7 */
9 #ifndef XT_ENGINE_H
10 #define XT_ENGINE_H
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/u64_stats_sync.h>
17 #include <linux/atomic.h>
18 #include <linux/hrtimer.h>
19 #include <linux/interrupt.h>
21 #include "xt_conf.h"
22 #include "xt_fblock.h"
24 #define NUM_QUEUES NUM_TYPES
26 #define PPE_SUCCESS 0
27 #define PPE_DROPPED 1
28 #define PPE_ERROR 2
30 struct worker_estats {
31 u64 packets;
32 u64 bytes;
33 u64 dropped;
34 struct u64_stats_sync syncp;
35 u32 errors;
38 struct ppe_queue {
39 enum path_type type;
40 struct sk_buff_head queue;
41 struct worker_estats stats;
44 struct worker_engine {
45 struct ppe_queue inqs[NUM_QUEUES];
46 ktime_t timef, timel;
47 struct proc_dir_entry *proc;
48 struct task_struct *thread;
49 struct tasklet_hrtimer htimer;
50 unsigned int cpu;
51 unsigned int pkts;
52 volatile int ppe_timer_set;
53 } ____cacheline_aligned;
55 extern int init_worker_engines(void);
56 extern void cleanup_worker_engines(void);
57 extern struct worker_engine __percpu *engines;
59 #define WAKE_TIME_MAX (1 << 30)
60 #define WAKE_TIME_MIN (1 << 15)
62 static inline void wake_engine_cond(unsigned int cpu)
64 unsigned long n = 0;
65 struct worker_engine *ppe = per_cpu_ptr(engines, cpu);
66 #ifdef __MIGRATE
67 if (cpu == USERSPACECPU)
68 return;
69 #endif /* __MIGRATE */
70 if (ppe->ppe_timer_set)
71 return;
72 ppe->ppe_timer_set = 1;
73 n = (WAKE_TIME_MIN | ppe->pkts) & 0xffffffff;
74 n = ((n >> 1) & 0x55555555) | ((n << 1) & 0xaaaaaaaa);
75 n = ((n >> 2) & 0x33333333) | ((n << 2) & 0xcccccccc);
76 n = ((n >> 4) & 0x0f0f0f0f) | ((n << 4) & 0xf0f0f0f0);
77 n = ((n >> 8) & 0x00ff00ff) | ((n << 8) & 0xff00ff00);
78 n = ((n >> 16) & 0x0000ffff) | ((n << 16) & 0xffff0000);
79 n = n & (WAKE_TIME_MAX - 1);
80 tasklet_hrtimer_start(&ppe->htimer, ktime_set(0, n),
81 HRTIMER_MODE_REL);
84 static inline void enqueue_egress_on_engine(struct sk_buff *skb,
85 unsigned int cpu)
87 struct worker_engine *ppe = per_cpu_ptr(engines, cpu);
88 #ifdef __MIGRATE
89 if (cpu == USERSPACECPU)
90 return;
91 #endif /* __MIGRATE */
92 skb_queue_tail(&ppe->inqs[TYPE_EGRESS].queue, skb);
93 wake_engine_cond(cpu);
96 static inline void enqueue_ingress_on_engine(struct sk_buff *skb,
97 unsigned int cpu)
99 struct worker_engine *ppe = per_cpu_ptr(engines, cpu);
100 #ifdef __MIGRATE
101 if (cpu == USERSPACECPU)
102 return;
103 #endif /* __MIGRATE */
104 skb_queue_tail(&ppe->inqs[TYPE_INGRESS].queue, skb);
105 wake_engine_cond(cpu);
108 static inline void enqueue_on_engine(struct sk_buff *skb,
109 unsigned int cpu,
110 enum path_type type)
112 struct worker_engine *ppe = per_cpu_ptr(engines, cpu);
113 #ifdef __MIGRATE
114 if (cpu == USERSPACECPU)
115 return;
116 #endif /* __MIGRATE */
117 skb_queue_tail(&ppe->inqs[type].queue, skb);
118 wake_engine_cond(cpu);
121 #endif /* XT_ENGINE_H */