2 * Lightweight Autonomic Network Architecture
4 * Copyright 2011 Daniel Borkmann <dborkma@tik.ee.ethz.ch>,
5 * Swiss federal institute of technology (ETH Zurich)
12 #include <linux/skbuff.h>
13 #include <linux/wait.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/u64_stats_sync.h>
17 #include <linux/atomic.h>
18 #include <linux/hrtimer.h>
19 #include <linux/interrupt.h>
22 #include "xt_fblock.h"
24 #define NUM_QUEUES NUM_TYPES
30 struct worker_estats
{
34 struct u64_stats_sync syncp
;
40 struct sk_buff_head queue
;
41 struct worker_estats stats
;
44 struct worker_engine
{
45 struct ppe_queue inqs
[NUM_QUEUES
];
47 struct proc_dir_entry
*proc
;
48 struct task_struct
*thread
;
49 struct tasklet_hrtimer htimer
;
52 volatile int ppe_timer_set
;
53 } ____cacheline_aligned
;
55 extern int init_worker_engines(void);
56 extern void cleanup_worker_engines(void);
57 extern struct worker_engine __percpu
*engines
;
59 #define WAKE_TIME_MAX (1 << 30)
60 #define WAKE_TIME_MIN (1 << 15)
62 static inline void wake_engine_cond(unsigned int cpu
)
65 struct worker_engine
*ppe
= per_cpu_ptr(engines
, cpu
);
67 if (cpu
== USERSPACECPU
)
69 #endif /* __MIGRATE */
70 if (ppe
->ppe_timer_set
)
72 ppe
->ppe_timer_set
= 1;
73 n
= (WAKE_TIME_MIN
| ppe
->pkts
) & 0xffffffff;
74 n
= ((n
>> 1) & 0x55555555) | ((n
<< 1) & 0xaaaaaaaa);
75 n
= ((n
>> 2) & 0x33333333) | ((n
<< 2) & 0xcccccccc);
76 n
= ((n
>> 4) & 0x0f0f0f0f) | ((n
<< 4) & 0xf0f0f0f0);
77 n
= ((n
>> 8) & 0x00ff00ff) | ((n
<< 8) & 0xff00ff00);
78 n
= ((n
>> 16) & 0x0000ffff) | ((n
<< 16) & 0xffff0000);
79 n
= n
& (WAKE_TIME_MAX
- 1);
80 tasklet_hrtimer_start(&ppe
->htimer
, ktime_set(0, n
),
84 static inline void enqueue_egress_on_engine(struct sk_buff
*skb
,
87 struct worker_engine
*ppe
= per_cpu_ptr(engines
, cpu
);
89 if (cpu
== USERSPACECPU
)
91 #endif /* __MIGRATE */
92 skb_queue_tail(&ppe
->inqs
[TYPE_EGRESS
].queue
, skb
);
93 wake_engine_cond(cpu
);
96 static inline void enqueue_ingress_on_engine(struct sk_buff
*skb
,
99 struct worker_engine
*ppe
= per_cpu_ptr(engines
, cpu
);
101 if (cpu
== USERSPACECPU
)
103 #endif /* __MIGRATE */
104 skb_queue_tail(&ppe
->inqs
[TYPE_INGRESS
].queue
, skb
);
105 wake_engine_cond(cpu
);
108 static inline void enqueue_on_engine(struct sk_buff
*skb
,
112 struct worker_engine
*ppe
= per_cpu_ptr(engines
, cpu
);
114 if (cpu
== USERSPACECPU
)
116 #endif /* __MIGRATE */
117 skb_queue_tail(&ppe
->inqs
[type
].queue
, skb
);
118 wake_engine_cond(cpu
);
121 #endif /* XT_ENGINE_H */