From 202a26dbc1d0a1cbb3f2ce9001df80eed5f6e27d Mon Sep 17 00:00:00 2001 From: Daniel Borkmann Date: Fri, 3 Jun 2011 11:47:35 +0200 Subject: [PATCH] added emergency queue, so that packet can be queued up if a fblock is unavailable for a short period of time --- src/xt_engine.c | 58 +++++++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 52 insertions(+), 6 deletions(-) diff --git a/src/xt_engine.c b/src/xt_engine.c index 9042a71..ecaa632 100644 --- a/src/xt_engine.c +++ b/src/xt_engine.c @@ -24,7 +24,12 @@ struct engine_iostats { unsigned long long fblocks; } ____cacheline_aligned; +struct engine_disc { + struct sk_buff_head ppe_rcv_emerg_queue; +} ____cacheline_aligned; + static struct engine_iostats __percpu *iostats; +static struct engine_disc __percpu *emdiscs; extern struct proc_dir_entry *lana_proc_dir; static struct proc_dir_entry *engine_proc; @@ -44,6 +49,18 @@ static inline void engine_add_bytes_stats(unsigned long bytes) this_cpu_add(iostats->bytes, bytes); } +static inline void engine_rcv_emerg_tail(struct sk_buff *skb) +{ + skb_queue_tail(&(this_cpu_ptr(emdiscs)->ppe_rcv_emerg_queue), skb); +} + +static inline struct sk_buff *engine_rcv_emerg_test_reduce(void) +{ + return skb_dequeue(&(this_cpu_ptr(emdiscs)->ppe_rcv_emerg_queue)); +} + +/* TODO: handle emergency queue, or backlog */ + /* Main function, must be called in rcu_read_lock context */ int process_packet(struct sk_buff *skb, enum path_type dir) { @@ -82,10 +99,13 @@ static int engine_procfs(char *page, char **start, off_t offset, get_online_cpus(); for_each_online_cpu(cpu) { struct engine_iostats *iostats_cpu; + struct engine_disc *emdisc_cpu; iostats_cpu = per_cpu_ptr(iostats, cpu); - len += sprintf(page + len, "CPU%u:\t%llu\t%llu\t%llu\n", + emdisc_cpu = per_cpu_ptr(emdiscs, cpu); + len += sprintf(page + len, "CPU%u:\t%llu\t%llu\t%llu\t%u\n", cpu, iostats_cpu->pkts, iostats_cpu->bytes, - iostats_cpu->fblocks); + iostats_cpu->fblocks, + skb_queue_len(&emdisc_cpu->ppe_rcv_emerg_queue)); } put_online_cpus(); @@ -109,21 +129,47 @@ int init_engine(void) iostats_cpu->fblocks = 0; } put_online_cpus(); + + emdiscs = alloc_percpu(struct engine_disc); + if (!emdiscs) + goto err; + get_online_cpus(); + for_each_online_cpu(cpu) { + struct engine_disc *emdisc_cpu; + emdisc_cpu = per_cpu_ptr(emdiscs, cpu); + skb_queue_head_init(&emdisc_cpu->ppe_rcv_emerg_queue); + } + put_online_cpus(); + engine_proc = create_proc_read_entry("ppe", 0400, lana_proc_dir, engine_procfs, NULL); - if (!engine_proc) { - free_percpu(iostats); - return -ENOMEM; - } + if (!engine_proc) + goto err1; return 0; +err1: + free_percpu(emdiscs); +err: + free_percpu(iostats); + return -ENOMEM; } EXPORT_SYMBOL_GPL(init_engine); void cleanup_engine(void) { + unsigned int cpu; if (iostats) free_percpu(iostats); + if (emdiscs) { + get_online_cpus(); + for_each_online_cpu(cpu) { + struct engine_disc *emdisc_cpu; + emdisc_cpu = per_cpu_ptr(emdiscs, cpu); + skb_queue_purge(&emdisc_cpu->ppe_rcv_emerg_queue); + } + put_online_cpus(); + free_percpu(emdiscs); + } remove_proc_entry("ppe", lana_proc_dir); } EXPORT_SYMBOL_GPL(cleanup_engine); -- 2.11.4.GIT