dm thin: wake worker when discard is prepared
[linux-2.6.git] / net / netfilter / nf_conntrack_extend.c
blob1a9545965c0d285ebe6967863865ebbcc552cb60
1 /* Structure dynamic extension infrastructure
2 * Copyright (C) 2004 Rusty Russell IBM Corporation
3 * Copyright (C) 2007 Netfilter Core Team <coreteam@netfilter.org>
4 * Copyright (C) 2007 USAGI/WIDE Project <http://www.linux-ipv6.org>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/rcupdate.h>
15 #include <linux/slab.h>
16 #include <linux/skbuff.h>
17 #include <net/netfilter/nf_conntrack_extend.h>
19 static struct nf_ct_ext_type __rcu *nf_ct_ext_types[NF_CT_EXT_NUM];
20 static DEFINE_MUTEX(nf_ct_ext_type_mutex);
22 void __nf_ct_ext_destroy(struct nf_conn *ct)
24 unsigned int i;
25 struct nf_ct_ext_type *t;
26 struct nf_ct_ext *ext = ct->ext;
28 for (i = 0; i < NF_CT_EXT_NUM; i++) {
29 if (!__nf_ct_ext_exist(ext, i))
30 continue;
32 rcu_read_lock();
33 t = rcu_dereference(nf_ct_ext_types[i]);
35 /* Here the nf_ct_ext_type might have been unregisterd.
36 * I.e., it has responsible to cleanup private
37 * area in all conntracks when it is unregisterd.
39 if (t && t->destroy)
40 t->destroy(ct);
41 rcu_read_unlock();
44 EXPORT_SYMBOL(__nf_ct_ext_destroy);
46 static void *
47 nf_ct_ext_create(struct nf_ct_ext **ext, enum nf_ct_ext_id id,
48 size_t var_alloc_len, gfp_t gfp)
50 unsigned int off, len;
51 struct nf_ct_ext_type *t;
52 size_t alloc_size;
54 rcu_read_lock();
55 t = rcu_dereference(nf_ct_ext_types[id]);
56 BUG_ON(t == NULL);
57 off = ALIGN(sizeof(struct nf_ct_ext), t->align);
58 len = off + t->len + var_alloc_len;
59 alloc_size = t->alloc_size + var_alloc_len;
60 rcu_read_unlock();
62 *ext = kzalloc(alloc_size, gfp);
63 if (!*ext)
64 return NULL;
66 (*ext)->offset[id] = off;
67 (*ext)->len = len;
69 return (void *)(*ext) + off;
72 void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
73 size_t var_alloc_len, gfp_t gfp)
75 struct nf_ct_ext *old, *new;
76 int i, newlen, newoff;
77 struct nf_ct_ext_type *t;
79 /* Conntrack must not be confirmed to avoid races on reallocation. */
80 NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
82 old = ct->ext;
83 if (!old)
84 return nf_ct_ext_create(&ct->ext, id, var_alloc_len, gfp);
86 if (__nf_ct_ext_exist(old, id))
87 return NULL;
89 rcu_read_lock();
90 t = rcu_dereference(nf_ct_ext_types[id]);
91 BUG_ON(t == NULL);
93 newoff = ALIGN(old->len, t->align);
94 newlen = newoff + t->len + var_alloc_len;
95 rcu_read_unlock();
97 new = __krealloc(old, newlen, gfp);
98 if (!new)
99 return NULL;
101 if (new != old) {
102 for (i = 0; i < NF_CT_EXT_NUM; i++) {
103 if (!__nf_ct_ext_exist(old, i))
104 continue;
106 rcu_read_lock();
107 t = rcu_dereference(nf_ct_ext_types[i]);
108 if (t && t->move)
109 t->move((void *)new + new->offset[i],
110 (void *)old + old->offset[i]);
111 rcu_read_unlock();
113 kfree_rcu(old, rcu);
114 ct->ext = new;
117 new->offset[id] = newoff;
118 new->len = newlen;
119 memset((void *)new + newoff, 0, newlen - newoff);
120 return (void *)new + newoff;
122 EXPORT_SYMBOL(__nf_ct_ext_add_length);
124 static void update_alloc_size(struct nf_ct_ext_type *type)
126 int i, j;
127 struct nf_ct_ext_type *t1, *t2;
128 enum nf_ct_ext_id min = 0, max = NF_CT_EXT_NUM - 1;
130 /* unnecessary to update all types */
131 if ((type->flags & NF_CT_EXT_F_PREALLOC) == 0) {
132 min = type->id;
133 max = type->id;
136 /* This assumes that extended areas in conntrack for the types
137 whose NF_CT_EXT_F_PREALLOC bit set are allocated in order */
138 for (i = min; i <= max; i++) {
139 t1 = rcu_dereference_protected(nf_ct_ext_types[i],
140 lockdep_is_held(&nf_ct_ext_type_mutex));
141 if (!t1)
142 continue;
144 t1->alloc_size = ALIGN(sizeof(struct nf_ct_ext), t1->align) +
145 t1->len;
146 for (j = 0; j < NF_CT_EXT_NUM; j++) {
147 t2 = rcu_dereference_protected(nf_ct_ext_types[j],
148 lockdep_is_held(&nf_ct_ext_type_mutex));
149 if (t2 == NULL || t2 == t1 ||
150 (t2->flags & NF_CT_EXT_F_PREALLOC) == 0)
151 continue;
153 t1->alloc_size = ALIGN(t1->alloc_size, t2->align)
154 + t2->len;
159 /* This MUST be called in process context. */
160 int nf_ct_extend_register(struct nf_ct_ext_type *type)
162 int ret = 0;
164 mutex_lock(&nf_ct_ext_type_mutex);
165 if (nf_ct_ext_types[type->id]) {
166 ret = -EBUSY;
167 goto out;
170 /* This ensures that nf_ct_ext_create() can allocate enough area
171 before updating alloc_size */
172 type->alloc_size = ALIGN(sizeof(struct nf_ct_ext), type->align)
173 + type->len;
174 rcu_assign_pointer(nf_ct_ext_types[type->id], type);
175 update_alloc_size(type);
176 out:
177 mutex_unlock(&nf_ct_ext_type_mutex);
178 return ret;
180 EXPORT_SYMBOL_GPL(nf_ct_extend_register);
182 /* This MUST be called in process context. */
183 void nf_ct_extend_unregister(struct nf_ct_ext_type *type)
185 mutex_lock(&nf_ct_ext_type_mutex);
186 RCU_INIT_POINTER(nf_ct_ext_types[type->id], NULL);
187 update_alloc_size(type);
188 mutex_unlock(&nf_ct_ext_type_mutex);
189 rcu_barrier(); /* Wait for completion of call_rcu()'s */
191 EXPORT_SYMBOL_GPL(nf_ct_extend_unregister);