ext3: Add support for non-native signed/unsigned htree hash algorithms
[linux-2.6/libata-dev.git] / net / dccp / ccid.c
blobbcc643f992aeb6c70ed218ebb7248e9a072ea758
1 /*
2 * net/dccp/ccid.c
4 * An implementation of the DCCP protocol
5 * Arnaldo Carvalho de Melo <acme@conectiva.com.br>
7 * CCID infrastructure
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include "ccid.h"
16 static u8 builtin_ccids[] = {
17 DCCPC_CCID2, /* CCID2 is supported by default */
18 #if defined(CONFIG_IP_DCCP_CCID3) || defined(CONFIG_IP_DCCP_CCID3_MODULE)
19 DCCPC_CCID3,
20 #endif
23 static struct ccid_operations *ccids[CCID_MAX];
24 #if defined(CONFIG_SMP) || defined(CONFIG_PREEMPT)
25 static atomic_t ccids_lockct = ATOMIC_INIT(0);
26 static DEFINE_SPINLOCK(ccids_lock);
29 * The strategy is: modifications ccids vector are short, do not sleep and
30 * veeery rare, but read access should be free of any exclusive locks.
32 static void ccids_write_lock(void)
34 spin_lock(&ccids_lock);
35 while (atomic_read(&ccids_lockct) != 0) {
36 spin_unlock(&ccids_lock);
37 yield();
38 spin_lock(&ccids_lock);
42 static inline void ccids_write_unlock(void)
44 spin_unlock(&ccids_lock);
47 static inline void ccids_read_lock(void)
49 atomic_inc(&ccids_lockct);
50 smp_mb__after_atomic_inc();
51 spin_unlock_wait(&ccids_lock);
54 static inline void ccids_read_unlock(void)
56 atomic_dec(&ccids_lockct);
59 #else
60 #define ccids_write_lock() do { } while(0)
61 #define ccids_write_unlock() do { } while(0)
62 #define ccids_read_lock() do { } while(0)
63 #define ccids_read_unlock() do { } while(0)
64 #endif
66 static struct kmem_cache *ccid_kmem_cache_create(int obj_size, const char *fmt,...)
68 struct kmem_cache *slab;
69 char slab_name_fmt[32], *slab_name;
70 va_list args;
72 va_start(args, fmt);
73 vsnprintf(slab_name_fmt, sizeof(slab_name_fmt), fmt, args);
74 va_end(args);
76 slab_name = kstrdup(slab_name_fmt, GFP_KERNEL);
77 if (slab_name == NULL)
78 return NULL;
79 slab = kmem_cache_create(slab_name, sizeof(struct ccid) + obj_size, 0,
80 SLAB_HWCACHE_ALIGN, NULL);
81 if (slab == NULL)
82 kfree(slab_name);
83 return slab;
86 static void ccid_kmem_cache_destroy(struct kmem_cache *slab)
88 if (slab != NULL) {
89 const char *name = kmem_cache_name(slab);
91 kmem_cache_destroy(slab);
92 kfree(name);
96 /* check that up to @array_len members in @ccid_array are supported */
97 bool ccid_support_check(u8 const *ccid_array, u8 array_len)
99 u8 i, j, found;
101 for (i = 0, found = 0; i < array_len; i++, found = 0) {
102 for (j = 0; !found && j < ARRAY_SIZE(builtin_ccids); j++)
103 found = (ccid_array[i] == builtin_ccids[j]);
104 if (!found)
105 return false;
107 return true;
111 * ccid_get_builtin_ccids - Provide copy of `builtin' CCID array
112 * @ccid_array: pointer to copy into
113 * @array_len: value to return length into
114 * This function allocates memory - caller must see that it is freed after use.
116 int ccid_get_builtin_ccids(u8 **ccid_array, u8 *array_len)
118 *ccid_array = kmemdup(builtin_ccids, sizeof(builtin_ccids), gfp_any());
119 if (*ccid_array == NULL)
120 return -ENOBUFS;
121 *array_len = ARRAY_SIZE(builtin_ccids);
122 return 0;
125 int ccid_getsockopt_builtin_ccids(struct sock *sk, int len,
126 char __user *optval, int __user *optlen)
128 if (len < sizeof(builtin_ccids))
129 return -EINVAL;
131 if (put_user(sizeof(builtin_ccids), optlen) ||
132 copy_to_user(optval, builtin_ccids, sizeof(builtin_ccids)))
133 return -EFAULT;
134 return 0;
137 int ccid_register(struct ccid_operations *ccid_ops)
139 int err = -ENOBUFS;
141 ccid_ops->ccid_hc_rx_slab =
142 ccid_kmem_cache_create(ccid_ops->ccid_hc_rx_obj_size,
143 "ccid%u_hc_rx_sock",
144 ccid_ops->ccid_id);
145 if (ccid_ops->ccid_hc_rx_slab == NULL)
146 goto out;
148 ccid_ops->ccid_hc_tx_slab =
149 ccid_kmem_cache_create(ccid_ops->ccid_hc_tx_obj_size,
150 "ccid%u_hc_tx_sock",
151 ccid_ops->ccid_id);
152 if (ccid_ops->ccid_hc_tx_slab == NULL)
153 goto out_free_rx_slab;
155 ccids_write_lock();
156 err = -EEXIST;
157 if (ccids[ccid_ops->ccid_id] == NULL) {
158 ccids[ccid_ops->ccid_id] = ccid_ops;
159 err = 0;
161 ccids_write_unlock();
162 if (err != 0)
163 goto out_free_tx_slab;
165 pr_info("CCID: Registered CCID %d (%s)\n",
166 ccid_ops->ccid_id, ccid_ops->ccid_name);
167 out:
168 return err;
169 out_free_tx_slab:
170 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
171 ccid_ops->ccid_hc_tx_slab = NULL;
172 goto out;
173 out_free_rx_slab:
174 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
175 ccid_ops->ccid_hc_rx_slab = NULL;
176 goto out;
179 EXPORT_SYMBOL_GPL(ccid_register);
181 int ccid_unregister(struct ccid_operations *ccid_ops)
183 ccids_write_lock();
184 ccids[ccid_ops->ccid_id] = NULL;
185 ccids_write_unlock();
187 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_tx_slab);
188 ccid_ops->ccid_hc_tx_slab = NULL;
189 ccid_kmem_cache_destroy(ccid_ops->ccid_hc_rx_slab);
190 ccid_ops->ccid_hc_rx_slab = NULL;
192 pr_info("CCID: Unregistered CCID %d (%s)\n",
193 ccid_ops->ccid_id, ccid_ops->ccid_name);
194 return 0;
197 EXPORT_SYMBOL_GPL(ccid_unregister);
199 struct ccid *ccid_new(unsigned char id, struct sock *sk, int rx, gfp_t gfp)
201 struct ccid_operations *ccid_ops;
202 struct ccid *ccid = NULL;
204 ccids_read_lock();
205 #ifdef CONFIG_MODULES
206 if (ccids[id] == NULL) {
207 /* We only try to load if in process context */
208 ccids_read_unlock();
209 if (gfp & GFP_ATOMIC)
210 goto out;
211 request_module("net-dccp-ccid-%d", id);
212 ccids_read_lock();
214 #endif
215 ccid_ops = ccids[id];
216 if (ccid_ops == NULL)
217 goto out_unlock;
219 if (!try_module_get(ccid_ops->ccid_owner))
220 goto out_unlock;
222 ccids_read_unlock();
224 ccid = kmem_cache_alloc(rx ? ccid_ops->ccid_hc_rx_slab :
225 ccid_ops->ccid_hc_tx_slab, gfp);
226 if (ccid == NULL)
227 goto out_module_put;
228 ccid->ccid_ops = ccid_ops;
229 if (rx) {
230 memset(ccid + 1, 0, ccid_ops->ccid_hc_rx_obj_size);
231 if (ccid->ccid_ops->ccid_hc_rx_init != NULL &&
232 ccid->ccid_ops->ccid_hc_rx_init(ccid, sk) != 0)
233 goto out_free_ccid;
234 } else {
235 memset(ccid + 1, 0, ccid_ops->ccid_hc_tx_obj_size);
236 if (ccid->ccid_ops->ccid_hc_tx_init != NULL &&
237 ccid->ccid_ops->ccid_hc_tx_init(ccid, sk) != 0)
238 goto out_free_ccid;
240 out:
241 return ccid;
242 out_unlock:
243 ccids_read_unlock();
244 goto out;
245 out_free_ccid:
246 kmem_cache_free(rx ? ccid_ops->ccid_hc_rx_slab :
247 ccid_ops->ccid_hc_tx_slab, ccid);
248 ccid = NULL;
249 out_module_put:
250 module_put(ccid_ops->ccid_owner);
251 goto out;
254 EXPORT_SYMBOL_GPL(ccid_new);
256 static void ccid_delete(struct ccid *ccid, struct sock *sk, int rx)
258 struct ccid_operations *ccid_ops;
260 if (ccid == NULL)
261 return;
263 ccid_ops = ccid->ccid_ops;
264 if (rx) {
265 if (ccid_ops->ccid_hc_rx_exit != NULL)
266 ccid_ops->ccid_hc_rx_exit(sk);
267 kmem_cache_free(ccid_ops->ccid_hc_rx_slab, ccid);
268 } else {
269 if (ccid_ops->ccid_hc_tx_exit != NULL)
270 ccid_ops->ccid_hc_tx_exit(sk);
271 kmem_cache_free(ccid_ops->ccid_hc_tx_slab, ccid);
273 ccids_read_lock();
274 if (ccids[ccid_ops->ccid_id] != NULL)
275 module_put(ccid_ops->ccid_owner);
276 ccids_read_unlock();
279 void ccid_hc_rx_delete(struct ccid *ccid, struct sock *sk)
281 ccid_delete(ccid, sk, 1);
284 EXPORT_SYMBOL_GPL(ccid_hc_rx_delete);
286 void ccid_hc_tx_delete(struct ccid *ccid, struct sock *sk)
288 ccid_delete(ccid, sk, 0);
291 EXPORT_SYMBOL_GPL(ccid_hc_tx_delete);