1 /* key.c: basic authentication token and access key management
3 * Copyright (C) 2004-6 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
23 static kmem_cache_t
*key_jar
;
24 struct rb_root key_serial_tree
; /* tree of keys indexed by serial */
25 DEFINE_SPINLOCK(key_serial_lock
);
27 struct rb_root key_user_tree
; /* tree of quota records indexed by UID */
28 DEFINE_SPINLOCK(key_user_lock
);
30 static LIST_HEAD(key_types_list
);
31 static DECLARE_RWSEM(key_types_sem
);
33 static void key_cleanup(void *data
);
34 static DECLARE_WORK(key_cleanup_task
, key_cleanup
, NULL
);
36 /* we serialise key instantiation and link */
37 DECLARE_RWSEM(key_construction_sem
);
39 /* any key who's type gets unegistered will be re-typed to this */
40 static struct key_type key_type_dead
= {
45 void __key_check(const struct key
*key
)
47 printk("__key_check: key %p {%08x} should be {%08x}\n",
48 key
, key
->magic
, KEY_DEBUG_MAGIC
);
53 /*****************************************************************************/
55 * get the key quota record for a user, allocating a new record if one doesn't
58 struct key_user
*key_user_lookup(uid_t uid
)
60 struct key_user
*candidate
= NULL
, *user
;
61 struct rb_node
*parent
= NULL
;
65 p
= &key_user_tree
.rb_node
;
66 spin_lock(&key_user_lock
);
68 /* search the tree for a user record with a matching UID */
71 user
= rb_entry(parent
, struct key_user
, node
);
75 else if (uid
> user
->uid
)
81 /* if we get here, we failed to find a match in the tree */
83 /* allocate a candidate user record if we don't already have
85 spin_unlock(&key_user_lock
);
88 candidate
= kmalloc(sizeof(struct key_user
), GFP_KERNEL
);
89 if (unlikely(!candidate
))
92 /* the allocation may have scheduled, so we need to repeat the
93 * search lest someone else added the record whilst we were
98 /* if we get here, then the user record still hadn't appeared on the
99 * second pass - so we use the candidate record */
100 atomic_set(&candidate
->usage
, 1);
101 atomic_set(&candidate
->nkeys
, 0);
102 atomic_set(&candidate
->nikeys
, 0);
103 candidate
->uid
= uid
;
104 candidate
->qnkeys
= 0;
105 candidate
->qnbytes
= 0;
106 spin_lock_init(&candidate
->lock
);
107 INIT_LIST_HEAD(&candidate
->consq
);
109 rb_link_node(&candidate
->node
, parent
, p
);
110 rb_insert_color(&candidate
->node
, &key_user_tree
);
111 spin_unlock(&key_user_lock
);
115 /* okay - we found a user record for this UID */
117 atomic_inc(&user
->usage
);
118 spin_unlock(&key_user_lock
);
123 } /* end key_user_lookup() */
125 /*****************************************************************************/
127 * dispose of a user structure
129 void key_user_put(struct key_user
*user
)
131 if (atomic_dec_and_lock(&user
->usage
, &key_user_lock
)) {
132 rb_erase(&user
->node
, &key_user_tree
);
133 spin_unlock(&key_user_lock
);
138 } /* end key_user_put() */
140 /*****************************************************************************/
142 * insert a key with a fixed serial number
144 static void __init
__key_insert_serial(struct key
*key
)
146 struct rb_node
*parent
, **p
;
150 p
= &key_serial_tree
.rb_node
;
154 xkey
= rb_entry(parent
, struct key
, serial_node
);
156 if (key
->serial
< xkey
->serial
)
158 else if (key
->serial
> xkey
->serial
)
164 /* we've found a suitable hole - arrange for this key to occupy it */
165 rb_link_node(&key
->serial_node
, parent
, p
);
166 rb_insert_color(&key
->serial_node
, &key_serial_tree
);
168 } /* end __key_insert_serial() */
170 /*****************************************************************************/
172 * assign a key the next unique serial number
173 * - these are assigned randomly to avoid security issues through covert
176 static inline void key_alloc_serial(struct key
*key
)
178 struct rb_node
*parent
, **p
;
181 /* propose a random serial number and look for a hole for it in the
182 * serial number tree */
184 get_random_bytes(&key
->serial
, sizeof(key
->serial
));
186 key
->serial
>>= 1; /* negative numbers are not permitted */
187 } while (key
->serial
< 3);
189 spin_lock(&key_serial_lock
);
192 p
= &key_serial_tree
.rb_node
;
196 xkey
= rb_entry(parent
, struct key
, serial_node
);
198 if (key
->serial
< xkey
->serial
)
200 else if (key
->serial
> xkey
->serial
)
207 /* we found a key with the proposed serial number - walk the tree from
208 * that point looking for the next unused serial number */
215 if (!rb_parent(parent
))
216 p
= &key_serial_tree
.rb_node
;
217 else if (rb_parent(parent
)->rb_left
== parent
)
218 p
= &(rb_parent(parent
)->rb_left
);
220 p
= &(rb_parent(parent
)->rb_right
);
222 parent
= rb_next(parent
);
226 xkey
= rb_entry(parent
, struct key
, serial_node
);
227 if (key
->serial
< xkey
->serial
)
231 /* we've found a suitable hole - arrange for this key to occupy it */
233 rb_link_node(&key
->serial_node
, parent
, p
);
234 rb_insert_color(&key
->serial_node
, &key_serial_tree
);
236 spin_unlock(&key_serial_lock
);
238 } /* end key_alloc_serial() */
240 /*****************************************************************************/
242 * allocate a key of the specified type
243 * - update the user's quota to reflect the existence of the key
244 * - called from a key-type operation with key_types_sem read-locked by
245 * key_create_or_update()
246 * - this prevents unregistration of the key type
247 * - upon return the key is as yet uninstantiated; the caller needs to either
248 * instantiate the key or discard it before returning
250 struct key
*key_alloc(struct key_type
*type
, const char *desc
,
251 uid_t uid
, gid_t gid
, struct task_struct
*ctx
,
252 key_perm_t perm
, unsigned long flags
)
254 struct key_user
*user
= NULL
;
256 size_t desclen
, quotalen
;
259 key
= ERR_PTR(-EINVAL
);
263 desclen
= strlen(desc
) + 1;
264 quotalen
= desclen
+ type
->def_datalen
;
266 /* get hold of the key tracking for this user */
267 user
= key_user_lookup(uid
);
271 /* check that the user's quota permits allocation of another key and
273 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
274 spin_lock(&user
->lock
);
275 if (!(flags
& KEY_ALLOC_QUOTA_OVERRUN
)) {
276 if (user
->qnkeys
+ 1 >= KEYQUOTA_MAX_KEYS
||
277 user
->qnbytes
+ quotalen
>= KEYQUOTA_MAX_BYTES
283 user
->qnbytes
+= quotalen
;
284 spin_unlock(&user
->lock
);
287 /* allocate and initialise the key and its description */
288 key
= kmem_cache_alloc(key_jar
, SLAB_KERNEL
);
293 key
->description
= kmalloc(desclen
, GFP_KERNEL
);
294 if (!key
->description
)
297 memcpy(key
->description
, desc
, desclen
);
300 atomic_set(&key
->usage
, 1);
301 init_rwsem(&key
->sem
);
304 key
->quotalen
= quotalen
;
305 key
->datalen
= type
->def_datalen
;
311 key
->payload
.data
= NULL
;
312 key
->security
= NULL
;
314 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
))
315 key
->flags
|= 1 << KEY_FLAG_IN_QUOTA
;
317 memset(&key
->type_data
, 0, sizeof(key
->type_data
));
320 key
->magic
= KEY_DEBUG_MAGIC
;
323 /* let the security module know about the key */
324 ret
= security_key_alloc(key
, ctx
, flags
);
328 /* publish the key by giving it a serial number */
329 atomic_inc(&user
->nkeys
);
330 key_alloc_serial(key
);
336 kfree(key
->description
);
337 kmem_cache_free(key_jar
, key
);
338 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
339 spin_lock(&user
->lock
);
341 user
->qnbytes
-= quotalen
;
342 spin_unlock(&user
->lock
);
349 kmem_cache_free(key_jar
, key
);
351 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
352 spin_lock(&user
->lock
);
354 user
->qnbytes
-= quotalen
;
355 spin_unlock(&user
->lock
);
359 key
= ERR_PTR(-ENOMEM
);
363 spin_unlock(&user
->lock
);
365 key
= ERR_PTR(-EDQUOT
);
368 } /* end key_alloc() */
370 EXPORT_SYMBOL(key_alloc
);
372 /*****************************************************************************/
374 * reserve an amount of quota for the key's payload
376 int key_payload_reserve(struct key
*key
, size_t datalen
)
378 int delta
= (int) datalen
- key
->datalen
;
383 /* contemplate the quota adjustment */
384 if (delta
!= 0 && test_bit(KEY_FLAG_IN_QUOTA
, &key
->flags
)) {
385 spin_lock(&key
->user
->lock
);
388 key
->user
->qnbytes
+ delta
> KEYQUOTA_MAX_BYTES
393 key
->user
->qnbytes
+= delta
;
394 key
->quotalen
+= delta
;
396 spin_unlock(&key
->user
->lock
);
399 /* change the recorded data length if that didn't generate an error */
401 key
->datalen
= datalen
;
405 } /* end key_payload_reserve() */
407 EXPORT_SYMBOL(key_payload_reserve
);
409 /*****************************************************************************/
411 * instantiate a key and link it into the target keyring atomically
412 * - called with the target keyring's semaphore writelocked
414 static int __key_instantiate_and_link(struct key
*key
,
428 down_write(&key_construction_sem
);
430 /* can't instantiate twice */
431 if (!test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
)) {
432 /* instantiate the key */
433 ret
= key
->type
->instantiate(key
, data
, datalen
);
436 /* mark the key as being instantiated */
437 atomic_inc(&key
->user
->nikeys
);
438 set_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
);
440 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT
, &key
->flags
))
443 /* and link it into the destination keyring */
445 ret
= __key_link(keyring
, key
);
447 /* disable the authorisation key */
453 up_write(&key_construction_sem
);
455 /* wake up anyone waiting for a key to be constructed */
457 wake_up_all(&request_key_conswq
);
461 } /* end __key_instantiate_and_link() */
463 /*****************************************************************************/
465 * instantiate a key and link it into the target keyring atomically
467 int key_instantiate_and_link(struct key
*key
,
476 down_write(&keyring
->sem
);
478 ret
= __key_instantiate_and_link(key
, data
, datalen
, keyring
, instkey
);
481 up_write(&keyring
->sem
);
485 } /* end key_instantiate_and_link() */
487 EXPORT_SYMBOL(key_instantiate_and_link
);
489 /*****************************************************************************/
491 * negatively instantiate a key and link it into the target keyring atomically
493 int key_negate_and_link(struct key
*key
,
508 down_write(&keyring
->sem
);
510 down_write(&key_construction_sem
);
512 /* can't instantiate twice */
513 if (!test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
)) {
514 /* mark the key as being negatively instantiated */
515 atomic_inc(&key
->user
->nikeys
);
516 set_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
517 set_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
);
518 now
= current_kernel_time();
519 key
->expiry
= now
.tv_sec
+ timeout
;
521 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT
, &key
->flags
))
526 /* and link it into the destination keyring */
528 ret
= __key_link(keyring
, key
);
530 /* disable the authorisation key */
535 up_write(&key_construction_sem
);
538 up_write(&keyring
->sem
);
540 /* wake up anyone waiting for a key to be constructed */
542 wake_up_all(&request_key_conswq
);
546 } /* end key_negate_and_link() */
548 EXPORT_SYMBOL(key_negate_and_link
);
550 /*****************************************************************************/
552 * do cleaning up in process context so that we don't have to disable
553 * interrupts all over the place
555 static void key_cleanup(void *data
)
561 /* look for a dead key in the tree */
562 spin_lock(&key_serial_lock
);
564 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
565 key
= rb_entry(_n
, struct key
, serial_node
);
567 if (atomic_read(&key
->usage
) == 0)
571 spin_unlock(&key_serial_lock
);
575 /* we found a dead key - once we've removed it from the tree, we can
577 rb_erase(&key
->serial_node
, &key_serial_tree
);
578 spin_unlock(&key_serial_lock
);
582 security_key_free(key
);
584 /* deal with the user's key tracking and quota */
585 if (test_bit(KEY_FLAG_IN_QUOTA
, &key
->flags
)) {
586 spin_lock(&key
->user
->lock
);
588 key
->user
->qnbytes
-= key
->quotalen
;
589 spin_unlock(&key
->user
->lock
);
592 atomic_dec(&key
->user
->nkeys
);
593 if (test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
))
594 atomic_dec(&key
->user
->nikeys
);
596 key_user_put(key
->user
);
598 /* now throw away the key memory */
599 if (key
->type
->destroy
)
600 key
->type
->destroy(key
);
602 kfree(key
->description
);
605 key
->magic
= KEY_DEBUG_MAGIC_X
;
607 kmem_cache_free(key_jar
, key
);
609 /* there may, of course, be more than one key to destroy */
612 } /* end key_cleanup() */
614 /*****************************************************************************/
616 * dispose of a reference to a key
617 * - when all the references are gone, we schedule the cleanup task to come and
618 * pull it out of the tree in definite process context
620 void key_put(struct key
*key
)
625 if (atomic_dec_and_test(&key
->usage
))
626 schedule_work(&key_cleanup_task
);
629 } /* end key_put() */
631 EXPORT_SYMBOL(key_put
);
633 /*****************************************************************************/
635 * find a key by its serial number
637 struct key
*key_lookup(key_serial_t id
)
642 spin_lock(&key_serial_lock
);
644 /* search the tree for the specified key */
645 n
= key_serial_tree
.rb_node
;
647 key
= rb_entry(n
, struct key
, serial_node
);
649 if (id
< key
->serial
)
651 else if (id
> key
->serial
)
658 key
= ERR_PTR(-ENOKEY
);
662 /* pretend it doesn't exist if it's dead */
663 if (atomic_read(&key
->usage
) == 0 ||
664 test_bit(KEY_FLAG_DEAD
, &key
->flags
) ||
665 key
->type
== &key_type_dead
)
668 /* this races with key_put(), but that doesn't matter since key_put()
669 * doesn't actually change the key
671 atomic_inc(&key
->usage
);
674 spin_unlock(&key_serial_lock
);
677 } /* end key_lookup() */
679 /*****************************************************************************/
681 * find and lock the specified key type against removal
682 * - we return with the sem readlocked
684 struct key_type
*key_type_lookup(const char *type
)
686 struct key_type
*ktype
;
688 down_read(&key_types_sem
);
690 /* look up the key type to see if it's one of the registered kernel
692 list_for_each_entry(ktype
, &key_types_list
, link
) {
693 if (strcmp(ktype
->name
, type
) == 0)
694 goto found_kernel_type
;
697 up_read(&key_types_sem
);
698 ktype
= ERR_PTR(-ENOKEY
);
703 } /* end key_type_lookup() */
705 /*****************************************************************************/
709 void key_type_put(struct key_type
*ktype
)
711 up_read(&key_types_sem
);
713 } /* end key_type_put() */
715 /*****************************************************************************/
717 * attempt to update an existing key
718 * - the key has an incremented refcount
719 * - we need to put the key if we get an error
721 static inline key_ref_t
__key_update(key_ref_t key_ref
,
722 const void *payload
, size_t plen
)
724 struct key
*key
= key_ref_to_ptr(key_ref
);
727 /* need write permission on the key to update it */
728 ret
= key_permission(key_ref
, KEY_WRITE
);
733 if (!key
->type
->update
)
736 down_write(&key
->sem
);
738 ret
= key
->type
->update(key
, payload
, plen
);
740 /* updating a negative key instantiates it */
741 clear_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
752 key_ref
= ERR_PTR(ret
);
755 } /* end __key_update() */
757 /*****************************************************************************/
759 * search the specified keyring for a key of the same description; if one is
760 * found, update it, otherwise add a new one
762 key_ref_t
key_create_or_update(key_ref_t keyring_ref
,
764 const char *description
,
769 struct key_type
*ktype
;
770 struct key
*keyring
, *key
= NULL
;
775 /* look up the key type to see if it's one of the registered kernel
777 ktype
= key_type_lookup(type
);
779 key_ref
= ERR_PTR(-ENODEV
);
783 key_ref
= ERR_PTR(-EINVAL
);
784 if (!ktype
->match
|| !ktype
->instantiate
)
787 keyring
= key_ref_to_ptr(keyring_ref
);
791 key_ref
= ERR_PTR(-ENOTDIR
);
792 if (keyring
->type
!= &key_type_keyring
)
795 down_write(&keyring
->sem
);
797 /* if we're going to allocate a new key, we're going to have
798 * to modify the keyring */
799 ret
= key_permission(keyring_ref
, KEY_WRITE
);
801 key_ref
= ERR_PTR(ret
);
805 /* if it's possible to update this type of key, search for an existing
806 * key of the same type and description in the destination keyring and
807 * update that instead if possible
810 key_ref
= __keyring_search_one(keyring_ref
, ktype
, description
,
812 if (!IS_ERR(key_ref
))
813 goto found_matching_key
;
816 /* decide on the permissions we want */
817 perm
= KEY_POS_VIEW
| KEY_POS_SEARCH
| KEY_POS_LINK
| KEY_POS_SETATTR
;
818 perm
|= KEY_USR_VIEW
| KEY_USR_SEARCH
| KEY_USR_LINK
| KEY_USR_SETATTR
;
821 perm
|= KEY_POS_READ
| KEY_USR_READ
;
823 if (ktype
== &key_type_keyring
|| ktype
->update
)
824 perm
|= KEY_USR_WRITE
;
826 /* allocate a new key */
827 key
= key_alloc(ktype
, description
, current
->fsuid
, current
->fsgid
,
828 current
, perm
, flags
);
830 key_ref
= ERR_PTR(PTR_ERR(key
));
834 /* instantiate it and link it into the target keyring */
835 ret
= __key_instantiate_and_link(key
, payload
, plen
, keyring
, NULL
);
838 key_ref
= ERR_PTR(ret
);
842 key_ref
= make_key_ref(key
, is_key_possessed(keyring_ref
));
845 up_write(&keyring
->sem
);
852 /* we found a matching key, so we're going to try to update it
853 * - we can drop the locks first as we have the key pinned
855 up_write(&keyring
->sem
);
858 key_ref
= __key_update(key_ref
, payload
, plen
);
861 } /* end key_create_or_update() */
863 EXPORT_SYMBOL(key_create_or_update
);
865 /*****************************************************************************/
869 int key_update(key_ref_t key_ref
, const void *payload
, size_t plen
)
871 struct key
*key
= key_ref_to_ptr(key_ref
);
876 /* the key must be writable */
877 ret
= key_permission(key_ref
, KEY_WRITE
);
881 /* attempt to update it if supported */
883 if (key
->type
->update
) {
884 down_write(&key
->sem
);
886 ret
= key
->type
->update(key
, payload
, plen
);
888 /* updating a negative key instantiates it */
889 clear_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
897 } /* end key_update() */
899 EXPORT_SYMBOL(key_update
);
901 /*****************************************************************************/
905 void key_revoke(struct key
*key
)
909 /* make sure no one's trying to change or use the key when we mark
911 down_write(&key
->sem
);
912 set_bit(KEY_FLAG_REVOKED
, &key
->flags
);
914 if (key
->type
->revoke
)
915 key
->type
->revoke(key
);
919 } /* end key_revoke() */
921 EXPORT_SYMBOL(key_revoke
);
923 /*****************************************************************************/
925 * register a type of key
927 int register_key_type(struct key_type
*ktype
)
933 down_write(&key_types_sem
);
935 /* disallow key types with the same name */
936 list_for_each_entry(p
, &key_types_list
, link
) {
937 if (strcmp(p
->name
, ktype
->name
) == 0)
942 list_add(&ktype
->link
, &key_types_list
);
946 up_write(&key_types_sem
);
949 } /* end register_key_type() */
951 EXPORT_SYMBOL(register_key_type
);
953 /*****************************************************************************/
955 * unregister a type of key
957 void unregister_key_type(struct key_type
*ktype
)
962 down_write(&key_types_sem
);
964 /* withdraw the key type */
965 list_del_init(&ktype
->link
);
967 /* mark all the keys of this type dead */
968 spin_lock(&key_serial_lock
);
970 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
971 key
= rb_entry(_n
, struct key
, serial_node
);
973 if (key
->type
== ktype
)
974 key
->type
= &key_type_dead
;
977 spin_unlock(&key_serial_lock
);
979 /* make sure everyone revalidates their keys */
982 /* we should now be able to destroy the payloads of all the keys of
983 * this type with impunity */
984 spin_lock(&key_serial_lock
);
986 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
987 key
= rb_entry(_n
, struct key
, serial_node
);
989 if (key
->type
== ktype
) {
992 memset(&key
->payload
, KEY_DESTROY
, sizeof(key
->payload
));
996 spin_unlock(&key_serial_lock
);
997 up_write(&key_types_sem
);
999 } /* end unregister_key_type() */
1001 EXPORT_SYMBOL(unregister_key_type
);
1003 /*****************************************************************************/
1005 * initialise the key management stuff
1007 void __init
key_init(void)
1009 /* allocate a slab in which we can store keys */
1010 key_jar
= kmem_cache_create("key_jar", sizeof(struct key
),
1011 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
, NULL
);
1013 /* add the special key types */
1014 list_add_tail(&key_type_keyring
.link
, &key_types_list
);
1015 list_add_tail(&key_type_dead
.link
, &key_types_list
);
1016 list_add_tail(&key_type_user
.link
, &key_types_list
);
1018 /* record the root user tracking */
1019 rb_link_node(&root_key_user
.node
,
1021 &key_user_tree
.rb_node
);
1023 rb_insert_color(&root_key_user
.node
,
1026 /* record root's user standard keyrings */
1027 key_check(&root_user_keyring
);
1028 key_check(&root_session_keyring
);
1030 __key_insert_serial(&root_user_keyring
);
1031 __key_insert_serial(&root_session_keyring
);
1033 keyring_publish_name(&root_user_keyring
);
1034 keyring_publish_name(&root_session_keyring
);
1036 /* link the two root keyrings together */
1037 key_link(&root_session_keyring
, &root_user_keyring
);
1039 } /* end key_init() */