[PATCH] knfsd: Don't ignore kstrdup failure in rpc caches
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / security / keys / key.c
blobac9326c5f1da3f1e1781d8654ac816bb83355560
1 /* key.c: basic authentication token and access key management
3 * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
21 #include "internal.h"
23 static struct kmem_cache *key_jar;
24 struct rb_root key_serial_tree; /* tree of keys indexed by serial */
25 DEFINE_SPINLOCK(key_serial_lock);
27 struct rb_root key_user_tree; /* tree of quota records indexed by UID */
28 DEFINE_SPINLOCK(key_user_lock);
30 static LIST_HEAD(key_types_list);
31 static DECLARE_RWSEM(key_types_sem);
33 static void key_cleanup(struct work_struct *work);
34 static DECLARE_WORK(key_cleanup_task, key_cleanup);
36 /* we serialise key instantiation and link */
37 DECLARE_RWSEM(key_construction_sem);
39 /* any key who's type gets unegistered will be re-typed to this */
40 static struct key_type key_type_dead = {
41 .name = "dead",
44 #ifdef KEY_DEBUGGING
45 void __key_check(const struct key *key)
47 printk("__key_check: key %p {%08x} should be {%08x}\n",
48 key, key->magic, KEY_DEBUG_MAGIC);
49 BUG();
51 #endif
53 /*****************************************************************************/
55 * get the key quota record for a user, allocating a new record if one doesn't
56 * already exist
58 struct key_user *key_user_lookup(uid_t uid)
60 struct key_user *candidate = NULL, *user;
61 struct rb_node *parent = NULL;
62 struct rb_node **p;
64 try_again:
65 p = &key_user_tree.rb_node;
66 spin_lock(&key_user_lock);
68 /* search the tree for a user record with a matching UID */
69 while (*p) {
70 parent = *p;
71 user = rb_entry(parent, struct key_user, node);
73 if (uid < user->uid)
74 p = &(*p)->rb_left;
75 else if (uid > user->uid)
76 p = &(*p)->rb_right;
77 else
78 goto found;
81 /* if we get here, we failed to find a match in the tree */
82 if (!candidate) {
83 /* allocate a candidate user record if we don't already have
84 * one */
85 spin_unlock(&key_user_lock);
87 user = NULL;
88 candidate = kmalloc(sizeof(struct key_user), GFP_KERNEL);
89 if (unlikely(!candidate))
90 goto out;
92 /* the allocation may have scheduled, so we need to repeat the
93 * search lest someone else added the record whilst we were
94 * asleep */
95 goto try_again;
98 /* if we get here, then the user record still hadn't appeared on the
99 * second pass - so we use the candidate record */
100 atomic_set(&candidate->usage, 1);
101 atomic_set(&candidate->nkeys, 0);
102 atomic_set(&candidate->nikeys, 0);
103 candidate->uid = uid;
104 candidate->qnkeys = 0;
105 candidate->qnbytes = 0;
106 spin_lock_init(&candidate->lock);
107 INIT_LIST_HEAD(&candidate->consq);
109 rb_link_node(&candidate->node, parent, p);
110 rb_insert_color(&candidate->node, &key_user_tree);
111 spin_unlock(&key_user_lock);
112 user = candidate;
113 goto out;
115 /* okay - we found a user record for this UID */
116 found:
117 atomic_inc(&user->usage);
118 spin_unlock(&key_user_lock);
119 kfree(candidate);
120 out:
121 return user;
123 } /* end key_user_lookup() */
125 /*****************************************************************************/
127 * dispose of a user structure
129 void key_user_put(struct key_user *user)
131 if (atomic_dec_and_lock(&user->usage, &key_user_lock)) {
132 rb_erase(&user->node, &key_user_tree);
133 spin_unlock(&key_user_lock);
135 kfree(user);
138 } /* end key_user_put() */
140 /*****************************************************************************/
142 * insert a key with a fixed serial number
144 static void __init __key_insert_serial(struct key *key)
146 struct rb_node *parent, **p;
147 struct key *xkey;
149 parent = NULL;
150 p = &key_serial_tree.rb_node;
152 while (*p) {
153 parent = *p;
154 xkey = rb_entry(parent, struct key, serial_node);
156 if (key->serial < xkey->serial)
157 p = &(*p)->rb_left;
158 else if (key->serial > xkey->serial)
159 p = &(*p)->rb_right;
160 else
161 BUG();
164 /* we've found a suitable hole - arrange for this key to occupy it */
165 rb_link_node(&key->serial_node, parent, p);
166 rb_insert_color(&key->serial_node, &key_serial_tree);
168 } /* end __key_insert_serial() */
170 /*****************************************************************************/
172 * assign a key the next unique serial number
173 * - these are assigned randomly to avoid security issues through covert
174 * channel problems
176 static inline void key_alloc_serial(struct key *key)
178 struct rb_node *parent, **p;
179 struct key *xkey;
181 /* propose a random serial number and look for a hole for it in the
182 * serial number tree */
183 do {
184 get_random_bytes(&key->serial, sizeof(key->serial));
186 key->serial >>= 1; /* negative numbers are not permitted */
187 } while (key->serial < 3);
189 spin_lock(&key_serial_lock);
191 parent = NULL;
192 p = &key_serial_tree.rb_node;
194 while (*p) {
195 parent = *p;
196 xkey = rb_entry(parent, struct key, serial_node);
198 if (key->serial < xkey->serial)
199 p = &(*p)->rb_left;
200 else if (key->serial > xkey->serial)
201 p = &(*p)->rb_right;
202 else
203 goto serial_exists;
205 goto insert_here;
207 /* we found a key with the proposed serial number - walk the tree from
208 * that point looking for the next unused serial number */
209 serial_exists:
210 for (;;) {
211 key->serial++;
212 if (key->serial < 2)
213 key->serial = 2;
215 if (!rb_parent(parent))
216 p = &key_serial_tree.rb_node;
217 else if (rb_parent(parent)->rb_left == parent)
218 p = &(rb_parent(parent)->rb_left);
219 else
220 p = &(rb_parent(parent)->rb_right);
222 parent = rb_next(parent);
223 if (!parent)
224 break;
226 xkey = rb_entry(parent, struct key, serial_node);
227 if (key->serial < xkey->serial)
228 goto insert_here;
231 /* we've found a suitable hole - arrange for this key to occupy it */
232 insert_here:
233 rb_link_node(&key->serial_node, parent, p);
234 rb_insert_color(&key->serial_node, &key_serial_tree);
236 spin_unlock(&key_serial_lock);
238 } /* end key_alloc_serial() */
240 /*****************************************************************************/
242 * allocate a key of the specified type
243 * - update the user's quota to reflect the existence of the key
244 * - called from a key-type operation with key_types_sem read-locked by
245 * key_create_or_update()
246 * - this prevents unregistration of the key type
247 * - upon return the key is as yet uninstantiated; the caller needs to either
248 * instantiate the key or discard it before returning
250 struct key *key_alloc(struct key_type *type, const char *desc,
251 uid_t uid, gid_t gid, struct task_struct *ctx,
252 key_perm_t perm, unsigned long flags)
254 struct key_user *user = NULL;
255 struct key *key;
256 size_t desclen, quotalen;
257 int ret;
259 key = ERR_PTR(-EINVAL);
260 if (!desc || !*desc)
261 goto error;
263 desclen = strlen(desc) + 1;
264 quotalen = desclen + type->def_datalen;
266 /* get hold of the key tracking for this user */
267 user = key_user_lookup(uid);
268 if (!user)
269 goto no_memory_1;
271 /* check that the user's quota permits allocation of another key and
272 * its description */
273 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
274 spin_lock(&user->lock);
275 if (!(flags & KEY_ALLOC_QUOTA_OVERRUN)) {
276 if (user->qnkeys + 1 >= KEYQUOTA_MAX_KEYS ||
277 user->qnbytes + quotalen >= KEYQUOTA_MAX_BYTES
279 goto no_quota;
282 user->qnkeys++;
283 user->qnbytes += quotalen;
284 spin_unlock(&user->lock);
287 /* allocate and initialise the key and its description */
288 key = kmem_cache_alloc(key_jar, GFP_KERNEL);
289 if (!key)
290 goto no_memory_2;
292 if (desc) {
293 key->description = kmemdup(desc, desclen, GFP_KERNEL);
294 if (!key->description)
295 goto no_memory_3;
298 atomic_set(&key->usage, 1);
299 init_rwsem(&key->sem);
300 key->type = type;
301 key->user = user;
302 key->quotalen = quotalen;
303 key->datalen = type->def_datalen;
304 key->uid = uid;
305 key->gid = gid;
306 key->perm = perm;
307 key->flags = 0;
308 key->expiry = 0;
309 key->payload.data = NULL;
310 key->security = NULL;
312 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA))
313 key->flags |= 1 << KEY_FLAG_IN_QUOTA;
315 memset(&key->type_data, 0, sizeof(key->type_data));
317 #ifdef KEY_DEBUGGING
318 key->magic = KEY_DEBUG_MAGIC;
319 #endif
321 /* let the security module know about the key */
322 ret = security_key_alloc(key, ctx, flags);
323 if (ret < 0)
324 goto security_error;
326 /* publish the key by giving it a serial number */
327 atomic_inc(&user->nkeys);
328 key_alloc_serial(key);
330 error:
331 return key;
333 security_error:
334 kfree(key->description);
335 kmem_cache_free(key_jar, key);
336 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
337 spin_lock(&user->lock);
338 user->qnkeys--;
339 user->qnbytes -= quotalen;
340 spin_unlock(&user->lock);
342 key_user_put(user);
343 key = ERR_PTR(ret);
344 goto error;
346 no_memory_3:
347 kmem_cache_free(key_jar, key);
348 no_memory_2:
349 if (!(flags & KEY_ALLOC_NOT_IN_QUOTA)) {
350 spin_lock(&user->lock);
351 user->qnkeys--;
352 user->qnbytes -= quotalen;
353 spin_unlock(&user->lock);
355 key_user_put(user);
356 no_memory_1:
357 key = ERR_PTR(-ENOMEM);
358 goto error;
360 no_quota:
361 spin_unlock(&user->lock);
362 key_user_put(user);
363 key = ERR_PTR(-EDQUOT);
364 goto error;
366 } /* end key_alloc() */
368 EXPORT_SYMBOL(key_alloc);
370 /*****************************************************************************/
372 * reserve an amount of quota for the key's payload
374 int key_payload_reserve(struct key *key, size_t datalen)
376 int delta = (int) datalen - key->datalen;
377 int ret = 0;
379 key_check(key);
381 /* contemplate the quota adjustment */
382 if (delta != 0 && test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
383 spin_lock(&key->user->lock);
385 if (delta > 0 &&
386 key->user->qnbytes + delta > KEYQUOTA_MAX_BYTES
388 ret = -EDQUOT;
390 else {
391 key->user->qnbytes += delta;
392 key->quotalen += delta;
394 spin_unlock(&key->user->lock);
397 /* change the recorded data length if that didn't generate an error */
398 if (ret == 0)
399 key->datalen = datalen;
401 return ret;
403 } /* end key_payload_reserve() */
405 EXPORT_SYMBOL(key_payload_reserve);
407 /*****************************************************************************/
409 * instantiate a key and link it into the target keyring atomically
410 * - called with the target keyring's semaphore writelocked
412 static int __key_instantiate_and_link(struct key *key,
413 const void *data,
414 size_t datalen,
415 struct key *keyring,
416 struct key *instkey)
418 int ret, awaken;
420 key_check(key);
421 key_check(keyring);
423 awaken = 0;
424 ret = -EBUSY;
426 down_write(&key_construction_sem);
428 /* can't instantiate twice */
429 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
430 /* instantiate the key */
431 ret = key->type->instantiate(key, data, datalen);
433 if (ret == 0) {
434 /* mark the key as being instantiated */
435 atomic_inc(&key->user->nikeys);
436 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
438 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
439 awaken = 1;
441 /* and link it into the destination keyring */
442 if (keyring)
443 ret = __key_link(keyring, key);
445 /* disable the authorisation key */
446 if (instkey)
447 key_revoke(instkey);
451 up_write(&key_construction_sem);
453 /* wake up anyone waiting for a key to be constructed */
454 if (awaken)
455 wake_up_all(&request_key_conswq);
457 return ret;
459 } /* end __key_instantiate_and_link() */
461 /*****************************************************************************/
463 * instantiate a key and link it into the target keyring atomically
465 int key_instantiate_and_link(struct key *key,
466 const void *data,
467 size_t datalen,
468 struct key *keyring,
469 struct key *instkey)
471 int ret;
473 if (keyring)
474 down_write(&keyring->sem);
476 ret = __key_instantiate_and_link(key, data, datalen, keyring, instkey);
478 if (keyring)
479 up_write(&keyring->sem);
481 return ret;
483 } /* end key_instantiate_and_link() */
485 EXPORT_SYMBOL(key_instantiate_and_link);
487 /*****************************************************************************/
489 * negatively instantiate a key and link it into the target keyring atomically
491 int key_negate_and_link(struct key *key,
492 unsigned timeout,
493 struct key *keyring,
494 struct key *instkey)
496 struct timespec now;
497 int ret, awaken;
499 key_check(key);
500 key_check(keyring);
502 awaken = 0;
503 ret = -EBUSY;
505 if (keyring)
506 down_write(&keyring->sem);
508 down_write(&key_construction_sem);
510 /* can't instantiate twice */
511 if (!test_bit(KEY_FLAG_INSTANTIATED, &key->flags)) {
512 /* mark the key as being negatively instantiated */
513 atomic_inc(&key->user->nikeys);
514 set_bit(KEY_FLAG_NEGATIVE, &key->flags);
515 set_bit(KEY_FLAG_INSTANTIATED, &key->flags);
516 now = current_kernel_time();
517 key->expiry = now.tv_sec + timeout;
519 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT, &key->flags))
520 awaken = 1;
522 ret = 0;
524 /* and link it into the destination keyring */
525 if (keyring)
526 ret = __key_link(keyring, key);
528 /* disable the authorisation key */
529 if (instkey)
530 key_revoke(instkey);
533 up_write(&key_construction_sem);
535 if (keyring)
536 up_write(&keyring->sem);
538 /* wake up anyone waiting for a key to be constructed */
539 if (awaken)
540 wake_up_all(&request_key_conswq);
542 return ret;
544 } /* end key_negate_and_link() */
546 EXPORT_SYMBOL(key_negate_and_link);
548 /*****************************************************************************/
550 * do cleaning up in process context so that we don't have to disable
551 * interrupts all over the place
553 static void key_cleanup(struct work_struct *work)
555 struct rb_node *_n;
556 struct key *key;
558 go_again:
559 /* look for a dead key in the tree */
560 spin_lock(&key_serial_lock);
562 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
563 key = rb_entry(_n, struct key, serial_node);
565 if (atomic_read(&key->usage) == 0)
566 goto found_dead_key;
569 spin_unlock(&key_serial_lock);
570 return;
572 found_dead_key:
573 /* we found a dead key - once we've removed it from the tree, we can
574 * drop the lock */
575 rb_erase(&key->serial_node, &key_serial_tree);
576 spin_unlock(&key_serial_lock);
578 key_check(key);
580 security_key_free(key);
582 /* deal with the user's key tracking and quota */
583 if (test_bit(KEY_FLAG_IN_QUOTA, &key->flags)) {
584 spin_lock(&key->user->lock);
585 key->user->qnkeys--;
586 key->user->qnbytes -= key->quotalen;
587 spin_unlock(&key->user->lock);
590 atomic_dec(&key->user->nkeys);
591 if (test_bit(KEY_FLAG_INSTANTIATED, &key->flags))
592 atomic_dec(&key->user->nikeys);
594 key_user_put(key->user);
596 /* now throw away the key memory */
597 if (key->type->destroy)
598 key->type->destroy(key);
600 kfree(key->description);
602 #ifdef KEY_DEBUGGING
603 key->magic = KEY_DEBUG_MAGIC_X;
604 #endif
605 kmem_cache_free(key_jar, key);
607 /* there may, of course, be more than one key to destroy */
608 goto go_again;
610 } /* end key_cleanup() */
612 /*****************************************************************************/
614 * dispose of a reference to a key
615 * - when all the references are gone, we schedule the cleanup task to come and
616 * pull it out of the tree in definite process context
618 void key_put(struct key *key)
620 if (key) {
621 key_check(key);
623 if (atomic_dec_and_test(&key->usage))
624 schedule_work(&key_cleanup_task);
627 } /* end key_put() */
629 EXPORT_SYMBOL(key_put);
631 /*****************************************************************************/
633 * find a key by its serial number
635 struct key *key_lookup(key_serial_t id)
637 struct rb_node *n;
638 struct key *key;
640 spin_lock(&key_serial_lock);
642 /* search the tree for the specified key */
643 n = key_serial_tree.rb_node;
644 while (n) {
645 key = rb_entry(n, struct key, serial_node);
647 if (id < key->serial)
648 n = n->rb_left;
649 else if (id > key->serial)
650 n = n->rb_right;
651 else
652 goto found;
655 not_found:
656 key = ERR_PTR(-ENOKEY);
657 goto error;
659 found:
660 /* pretend it doesn't exist if it's dead */
661 if (atomic_read(&key->usage) == 0 ||
662 test_bit(KEY_FLAG_DEAD, &key->flags) ||
663 key->type == &key_type_dead)
664 goto not_found;
666 /* this races with key_put(), but that doesn't matter since key_put()
667 * doesn't actually change the key
669 atomic_inc(&key->usage);
671 error:
672 spin_unlock(&key_serial_lock);
673 return key;
675 } /* end key_lookup() */
677 /*****************************************************************************/
679 * find and lock the specified key type against removal
680 * - we return with the sem readlocked
682 struct key_type *key_type_lookup(const char *type)
684 struct key_type *ktype;
686 down_read(&key_types_sem);
688 /* look up the key type to see if it's one of the registered kernel
689 * types */
690 list_for_each_entry(ktype, &key_types_list, link) {
691 if (strcmp(ktype->name, type) == 0)
692 goto found_kernel_type;
695 up_read(&key_types_sem);
696 ktype = ERR_PTR(-ENOKEY);
698 found_kernel_type:
699 return ktype;
701 } /* end key_type_lookup() */
703 /*****************************************************************************/
705 * unlock a key type
707 void key_type_put(struct key_type *ktype)
709 up_read(&key_types_sem);
711 } /* end key_type_put() */
713 /*****************************************************************************/
715 * attempt to update an existing key
716 * - the key has an incremented refcount
717 * - we need to put the key if we get an error
719 static inline key_ref_t __key_update(key_ref_t key_ref,
720 const void *payload, size_t plen)
722 struct key *key = key_ref_to_ptr(key_ref);
723 int ret;
725 /* need write permission on the key to update it */
726 ret = key_permission(key_ref, KEY_WRITE);
727 if (ret < 0)
728 goto error;
730 ret = -EEXIST;
731 if (!key->type->update)
732 goto error;
734 down_write(&key->sem);
736 ret = key->type->update(key, payload, plen);
737 if (ret == 0)
738 /* updating a negative key instantiates it */
739 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
741 up_write(&key->sem);
743 if (ret < 0)
744 goto error;
745 out:
746 return key_ref;
748 error:
749 key_put(key);
750 key_ref = ERR_PTR(ret);
751 goto out;
753 } /* end __key_update() */
755 /*****************************************************************************/
757 * search the specified keyring for a key of the same description; if one is
758 * found, update it, otherwise add a new one
760 key_ref_t key_create_or_update(key_ref_t keyring_ref,
761 const char *type,
762 const char *description,
763 const void *payload,
764 size_t plen,
765 unsigned long flags)
767 struct key_type *ktype;
768 struct key *keyring, *key = NULL;
769 key_perm_t perm;
770 key_ref_t key_ref;
771 int ret;
773 /* look up the key type to see if it's one of the registered kernel
774 * types */
775 ktype = key_type_lookup(type);
776 if (IS_ERR(ktype)) {
777 key_ref = ERR_PTR(-ENODEV);
778 goto error;
781 key_ref = ERR_PTR(-EINVAL);
782 if (!ktype->match || !ktype->instantiate)
783 goto error_2;
785 keyring = key_ref_to_ptr(keyring_ref);
787 key_check(keyring);
789 key_ref = ERR_PTR(-ENOTDIR);
790 if (keyring->type != &key_type_keyring)
791 goto error_2;
793 down_write(&keyring->sem);
795 /* if we're going to allocate a new key, we're going to have
796 * to modify the keyring */
797 ret = key_permission(keyring_ref, KEY_WRITE);
798 if (ret < 0) {
799 key_ref = ERR_PTR(ret);
800 goto error_3;
803 /* if it's possible to update this type of key, search for an existing
804 * key of the same type and description in the destination keyring and
805 * update that instead if possible
807 if (ktype->update) {
808 key_ref = __keyring_search_one(keyring_ref, ktype, description,
810 if (!IS_ERR(key_ref))
811 goto found_matching_key;
814 /* decide on the permissions we want */
815 perm = KEY_POS_VIEW | KEY_POS_SEARCH | KEY_POS_LINK | KEY_POS_SETATTR;
816 perm |= KEY_USR_VIEW | KEY_USR_SEARCH | KEY_USR_LINK | KEY_USR_SETATTR;
818 if (ktype->read)
819 perm |= KEY_POS_READ | KEY_USR_READ;
821 if (ktype == &key_type_keyring || ktype->update)
822 perm |= KEY_USR_WRITE;
824 /* allocate a new key */
825 key = key_alloc(ktype, description, current->fsuid, current->fsgid,
826 current, perm, flags);
827 if (IS_ERR(key)) {
828 key_ref = ERR_PTR(PTR_ERR(key));
829 goto error_3;
832 /* instantiate it and link it into the target keyring */
833 ret = __key_instantiate_and_link(key, payload, plen, keyring, NULL);
834 if (ret < 0) {
835 key_put(key);
836 key_ref = ERR_PTR(ret);
837 goto error_3;
840 key_ref = make_key_ref(key, is_key_possessed(keyring_ref));
842 error_3:
843 up_write(&keyring->sem);
844 error_2:
845 key_type_put(ktype);
846 error:
847 return key_ref;
849 found_matching_key:
850 /* we found a matching key, so we're going to try to update it
851 * - we can drop the locks first as we have the key pinned
853 up_write(&keyring->sem);
854 key_type_put(ktype);
856 key_ref = __key_update(key_ref, payload, plen);
857 goto error;
859 } /* end key_create_or_update() */
861 EXPORT_SYMBOL(key_create_or_update);
863 /*****************************************************************************/
865 * update a key
867 int key_update(key_ref_t key_ref, const void *payload, size_t plen)
869 struct key *key = key_ref_to_ptr(key_ref);
870 int ret;
872 key_check(key);
874 /* the key must be writable */
875 ret = key_permission(key_ref, KEY_WRITE);
876 if (ret < 0)
877 goto error;
879 /* attempt to update it if supported */
880 ret = -EOPNOTSUPP;
881 if (key->type->update) {
882 down_write(&key->sem);
884 ret = key->type->update(key, payload, plen);
885 if (ret == 0)
886 /* updating a negative key instantiates it */
887 clear_bit(KEY_FLAG_NEGATIVE, &key->flags);
889 up_write(&key->sem);
892 error:
893 return ret;
895 } /* end key_update() */
897 EXPORT_SYMBOL(key_update);
899 /*****************************************************************************/
901 * revoke a key
903 void key_revoke(struct key *key)
905 key_check(key);
907 /* make sure no one's trying to change or use the key when we mark
908 * it */
909 down_write(&key->sem);
910 set_bit(KEY_FLAG_REVOKED, &key->flags);
912 if (key->type->revoke)
913 key->type->revoke(key);
915 up_write(&key->sem);
917 } /* end key_revoke() */
919 EXPORT_SYMBOL(key_revoke);
921 /*****************************************************************************/
923 * register a type of key
925 int register_key_type(struct key_type *ktype)
927 struct key_type *p;
928 int ret;
930 ret = -EEXIST;
931 down_write(&key_types_sem);
933 /* disallow key types with the same name */
934 list_for_each_entry(p, &key_types_list, link) {
935 if (strcmp(p->name, ktype->name) == 0)
936 goto out;
939 /* store the type */
940 list_add(&ktype->link, &key_types_list);
941 ret = 0;
943 out:
944 up_write(&key_types_sem);
945 return ret;
947 } /* end register_key_type() */
949 EXPORT_SYMBOL(register_key_type);
951 /*****************************************************************************/
953 * unregister a type of key
955 void unregister_key_type(struct key_type *ktype)
957 struct rb_node *_n;
958 struct key *key;
960 down_write(&key_types_sem);
962 /* withdraw the key type */
963 list_del_init(&ktype->link);
965 /* mark all the keys of this type dead */
966 spin_lock(&key_serial_lock);
968 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
969 key = rb_entry(_n, struct key, serial_node);
971 if (key->type == ktype)
972 key->type = &key_type_dead;
975 spin_unlock(&key_serial_lock);
977 /* make sure everyone revalidates their keys */
978 synchronize_rcu();
980 /* we should now be able to destroy the payloads of all the keys of
981 * this type with impunity */
982 spin_lock(&key_serial_lock);
984 for (_n = rb_first(&key_serial_tree); _n; _n = rb_next(_n)) {
985 key = rb_entry(_n, struct key, serial_node);
987 if (key->type == ktype) {
988 if (ktype->destroy)
989 ktype->destroy(key);
990 memset(&key->payload, KEY_DESTROY, sizeof(key->payload));
994 spin_unlock(&key_serial_lock);
995 up_write(&key_types_sem);
997 } /* end unregister_key_type() */
999 EXPORT_SYMBOL(unregister_key_type);
1001 /*****************************************************************************/
1003 * initialise the key management stuff
1005 void __init key_init(void)
1007 /* allocate a slab in which we can store keys */
1008 key_jar = kmem_cache_create("key_jar", sizeof(struct key),
1009 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
1011 /* add the special key types */
1012 list_add_tail(&key_type_keyring.link, &key_types_list);
1013 list_add_tail(&key_type_dead.link, &key_types_list);
1014 list_add_tail(&key_type_user.link, &key_types_list);
1016 /* record the root user tracking */
1017 rb_link_node(&root_key_user.node,
1018 NULL,
1019 &key_user_tree.rb_node);
1021 rb_insert_color(&root_key_user.node,
1022 &key_user_tree);
1024 /* record root's user standard keyrings */
1025 key_check(&root_user_keyring);
1026 key_check(&root_session_keyring);
1028 __key_insert_serial(&root_user_keyring);
1029 __key_insert_serial(&root_session_keyring);
1031 keyring_publish_name(&root_user_keyring);
1032 keyring_publish_name(&root_session_keyring);
1034 /* link the two root keyrings together */
1035 key_link(&root_session_keyring, &root_user_keyring);
1037 } /* end key_init() */