1 /* Basic authentication token and access key management
3 * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
12 #include <linux/module.h>
13 #include <linux/init.h>
14 #include <linux/poison.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/security.h>
18 #include <linux/workqueue.h>
19 #include <linux/random.h>
20 #include <linux/err.h>
23 static struct kmem_cache
*key_jar
;
24 struct rb_root key_serial_tree
; /* tree of keys indexed by serial */
25 DEFINE_SPINLOCK(key_serial_lock
);
27 struct rb_root key_user_tree
; /* tree of quota records indexed by UID */
28 DEFINE_SPINLOCK(key_user_lock
);
30 unsigned int key_quota_root_maxkeys
= 200; /* root's key count quota */
31 unsigned int key_quota_root_maxbytes
= 20000; /* root's key space quota */
32 unsigned int key_quota_maxkeys
= 200; /* general key count quota */
33 unsigned int key_quota_maxbytes
= 20000; /* general key space quota */
35 static LIST_HEAD(key_types_list
);
36 static DECLARE_RWSEM(key_types_sem
);
38 static void key_cleanup(struct work_struct
*work
);
39 static DECLARE_WORK(key_cleanup_task
, key_cleanup
);
41 /* we serialise key instantiation and link */
42 DEFINE_MUTEX(key_construction_mutex
);
44 /* any key who's type gets unegistered will be re-typed to this */
45 static struct key_type key_type_dead
= {
50 void __key_check(const struct key
*key
)
52 printk("__key_check: key %p {%08x} should be {%08x}\n",
53 key
, key
->magic
, KEY_DEBUG_MAGIC
);
58 /*****************************************************************************/
60 * get the key quota record for a user, allocating a new record if one doesn't
63 struct key_user
*key_user_lookup(uid_t uid
)
65 struct key_user
*candidate
= NULL
, *user
;
66 struct rb_node
*parent
= NULL
;
70 p
= &key_user_tree
.rb_node
;
71 spin_lock(&key_user_lock
);
73 /* search the tree for a user record with a matching UID */
76 user
= rb_entry(parent
, struct key_user
, node
);
80 else if (uid
> user
->uid
)
86 /* if we get here, we failed to find a match in the tree */
88 /* allocate a candidate user record if we don't already have
90 spin_unlock(&key_user_lock
);
93 candidate
= kmalloc(sizeof(struct key_user
), GFP_KERNEL
);
94 if (unlikely(!candidate
))
97 /* the allocation may have scheduled, so we need to repeat the
98 * search lest someone else added the record whilst we were
103 /* if we get here, then the user record still hadn't appeared on the
104 * second pass - so we use the candidate record */
105 atomic_set(&candidate
->usage
, 1);
106 atomic_set(&candidate
->nkeys
, 0);
107 atomic_set(&candidate
->nikeys
, 0);
108 candidate
->uid
= uid
;
109 candidate
->qnkeys
= 0;
110 candidate
->qnbytes
= 0;
111 spin_lock_init(&candidate
->lock
);
112 mutex_init(&candidate
->cons_lock
);
114 rb_link_node(&candidate
->node
, parent
, p
);
115 rb_insert_color(&candidate
->node
, &key_user_tree
);
116 spin_unlock(&key_user_lock
);
120 /* okay - we found a user record for this UID */
122 atomic_inc(&user
->usage
);
123 spin_unlock(&key_user_lock
);
128 } /* end key_user_lookup() */
130 /*****************************************************************************/
132 * dispose of a user structure
134 void key_user_put(struct key_user
*user
)
136 if (atomic_dec_and_lock(&user
->usage
, &key_user_lock
)) {
137 rb_erase(&user
->node
, &key_user_tree
);
138 spin_unlock(&key_user_lock
);
143 } /* end key_user_put() */
145 /*****************************************************************************/
147 * assign a key the next unique serial number
148 * - these are assigned randomly to avoid security issues through covert
151 static inline void key_alloc_serial(struct key
*key
)
153 struct rb_node
*parent
, **p
;
156 /* propose a random serial number and look for a hole for it in the
157 * serial number tree */
159 get_random_bytes(&key
->serial
, sizeof(key
->serial
));
161 key
->serial
>>= 1; /* negative numbers are not permitted */
162 } while (key
->serial
< 3);
164 spin_lock(&key_serial_lock
);
168 p
= &key_serial_tree
.rb_node
;
172 xkey
= rb_entry(parent
, struct key
, serial_node
);
174 if (key
->serial
< xkey
->serial
)
176 else if (key
->serial
> xkey
->serial
)
182 /* we've found a suitable hole - arrange for this key to occupy it */
183 rb_link_node(&key
->serial_node
, parent
, p
);
184 rb_insert_color(&key
->serial_node
, &key_serial_tree
);
186 spin_unlock(&key_serial_lock
);
189 /* we found a key with the proposed serial number - walk the tree from
190 * that point looking for the next unused serial number */
194 if (key
->serial
< 3) {
196 goto attempt_insertion
;
199 parent
= rb_next(parent
);
201 goto attempt_insertion
;
203 xkey
= rb_entry(parent
, struct key
, serial_node
);
204 if (key
->serial
< xkey
->serial
)
205 goto attempt_insertion
;
208 } /* end key_alloc_serial() */
210 /*****************************************************************************/
212 * allocate a key of the specified type
213 * - update the user's quota to reflect the existence of the key
214 * - called from a key-type operation with key_types_sem read-locked by
215 * key_create_or_update()
216 * - this prevents unregistration of the key type
217 * - upon return the key is as yet uninstantiated; the caller needs to either
218 * instantiate the key or discard it before returning
220 struct key
*key_alloc(struct key_type
*type
, const char *desc
,
221 uid_t uid
, gid_t gid
, const struct cred
*cred
,
222 key_perm_t perm
, unsigned long flags
)
224 struct key_user
*user
= NULL
;
226 size_t desclen
, quotalen
;
229 key
= ERR_PTR(-EINVAL
);
233 desclen
= strlen(desc
) + 1;
234 quotalen
= desclen
+ type
->def_datalen
;
236 /* get hold of the key tracking for this user */
237 user
= key_user_lookup(uid
);
241 /* check that the user's quota permits allocation of another key and
243 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
244 unsigned maxkeys
= (uid
== 0) ?
245 key_quota_root_maxkeys
: key_quota_maxkeys
;
246 unsigned maxbytes
= (uid
== 0) ?
247 key_quota_root_maxbytes
: key_quota_maxbytes
;
249 spin_lock(&user
->lock
);
250 if (!(flags
& KEY_ALLOC_QUOTA_OVERRUN
)) {
251 if (user
->qnkeys
+ 1 >= maxkeys
||
252 user
->qnbytes
+ quotalen
>= maxbytes
||
253 user
->qnbytes
+ quotalen
< user
->qnbytes
)
258 user
->qnbytes
+= quotalen
;
259 spin_unlock(&user
->lock
);
262 /* allocate and initialise the key and its description */
263 key
= kmem_cache_alloc(key_jar
, GFP_KERNEL
);
268 key
->description
= kmemdup(desc
, desclen
, GFP_KERNEL
);
269 if (!key
->description
)
273 atomic_set(&key
->usage
, 1);
274 init_rwsem(&key
->sem
);
277 key
->quotalen
= quotalen
;
278 key
->datalen
= type
->def_datalen
;
284 key
->payload
.data
= NULL
;
285 key
->security
= NULL
;
287 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
))
288 key
->flags
|= 1 << KEY_FLAG_IN_QUOTA
;
290 memset(&key
->type_data
, 0, sizeof(key
->type_data
));
293 key
->magic
= KEY_DEBUG_MAGIC
;
296 /* let the security module know about the key */
297 ret
= security_key_alloc(key
, cred
, flags
);
301 /* publish the key by giving it a serial number */
302 atomic_inc(&user
->nkeys
);
303 key_alloc_serial(key
);
309 kfree(key
->description
);
310 kmem_cache_free(key_jar
, key
);
311 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
312 spin_lock(&user
->lock
);
314 user
->qnbytes
-= quotalen
;
315 spin_unlock(&user
->lock
);
322 kmem_cache_free(key_jar
, key
);
324 if (!(flags
& KEY_ALLOC_NOT_IN_QUOTA
)) {
325 spin_lock(&user
->lock
);
327 user
->qnbytes
-= quotalen
;
328 spin_unlock(&user
->lock
);
332 key
= ERR_PTR(-ENOMEM
);
336 spin_unlock(&user
->lock
);
338 key
= ERR_PTR(-EDQUOT
);
341 } /* end key_alloc() */
343 EXPORT_SYMBOL(key_alloc
);
345 /*****************************************************************************/
347 * reserve an amount of quota for the key's payload
349 int key_payload_reserve(struct key
*key
, size_t datalen
)
351 int delta
= (int) datalen
- key
->datalen
;
356 /* contemplate the quota adjustment */
357 if (delta
!= 0 && test_bit(KEY_FLAG_IN_QUOTA
, &key
->flags
)) {
358 unsigned maxbytes
= (key
->user
->uid
== 0) ?
359 key_quota_root_maxbytes
: key_quota_maxbytes
;
361 spin_lock(&key
->user
->lock
);
364 (key
->user
->qnbytes
+ delta
>= maxbytes
||
365 key
->user
->qnbytes
+ delta
< key
->user
->qnbytes
)) {
369 key
->user
->qnbytes
+= delta
;
370 key
->quotalen
+= delta
;
372 spin_unlock(&key
->user
->lock
);
375 /* change the recorded data length if that didn't generate an error */
377 key
->datalen
= datalen
;
381 } /* end key_payload_reserve() */
383 EXPORT_SYMBOL(key_payload_reserve
);
385 /*****************************************************************************/
387 * instantiate a key and link it into the target keyring atomically
388 * - called with the target keyring's semaphore writelocked
390 static int __key_instantiate_and_link(struct key
*key
,
404 mutex_lock(&key_construction_mutex
);
406 /* can't instantiate twice */
407 if (!test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
)) {
408 /* instantiate the key */
409 ret
= key
->type
->instantiate(key
, data
, datalen
);
412 /* mark the key as being instantiated */
413 atomic_inc(&key
->user
->nikeys
);
414 set_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
);
416 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT
, &key
->flags
))
419 /* and link it into the destination keyring */
421 ret
= __key_link(keyring
, key
);
423 /* disable the authorisation key */
429 mutex_unlock(&key_construction_mutex
);
431 /* wake up anyone waiting for a key to be constructed */
433 wake_up_bit(&key
->flags
, KEY_FLAG_USER_CONSTRUCT
);
437 } /* end __key_instantiate_and_link() */
439 /*****************************************************************************/
441 * instantiate a key and link it into the target keyring atomically
443 int key_instantiate_and_link(struct key
*key
,
452 down_write(&keyring
->sem
);
454 ret
= __key_instantiate_and_link(key
, data
, datalen
, keyring
, authkey
);
457 up_write(&keyring
->sem
);
461 } /* end key_instantiate_and_link() */
463 EXPORT_SYMBOL(key_instantiate_and_link
);
465 /*****************************************************************************/
467 * negatively instantiate a key and link it into the target keyring atomically
469 int key_negate_and_link(struct key
*key
,
484 down_write(&keyring
->sem
);
486 mutex_lock(&key_construction_mutex
);
488 /* can't instantiate twice */
489 if (!test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
)) {
490 /* mark the key as being negatively instantiated */
491 atomic_inc(&key
->user
->nikeys
);
492 set_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
493 set_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
);
494 now
= current_kernel_time();
495 key
->expiry
= now
.tv_sec
+ timeout
;
497 if (test_and_clear_bit(KEY_FLAG_USER_CONSTRUCT
, &key
->flags
))
502 /* and link it into the destination keyring */
504 ret
= __key_link(keyring
, key
);
506 /* disable the authorisation key */
511 mutex_unlock(&key_construction_mutex
);
514 up_write(&keyring
->sem
);
516 /* wake up anyone waiting for a key to be constructed */
518 wake_up_bit(&key
->flags
, KEY_FLAG_USER_CONSTRUCT
);
522 } /* end key_negate_and_link() */
524 EXPORT_SYMBOL(key_negate_and_link
);
526 /*****************************************************************************/
528 * do cleaning up in process context so that we don't have to disable
529 * interrupts all over the place
531 static void key_cleanup(struct work_struct
*work
)
537 /* look for a dead key in the tree */
538 spin_lock(&key_serial_lock
);
540 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
541 key
= rb_entry(_n
, struct key
, serial_node
);
543 if (atomic_read(&key
->usage
) == 0)
547 spin_unlock(&key_serial_lock
);
551 /* we found a dead key - once we've removed it from the tree, we can
553 rb_erase(&key
->serial_node
, &key_serial_tree
);
554 spin_unlock(&key_serial_lock
);
558 security_key_free(key
);
560 /* deal with the user's key tracking and quota */
561 if (test_bit(KEY_FLAG_IN_QUOTA
, &key
->flags
)) {
562 spin_lock(&key
->user
->lock
);
564 key
->user
->qnbytes
-= key
->quotalen
;
565 spin_unlock(&key
->user
->lock
);
568 atomic_dec(&key
->user
->nkeys
);
569 if (test_bit(KEY_FLAG_INSTANTIATED
, &key
->flags
))
570 atomic_dec(&key
->user
->nikeys
);
572 key_user_put(key
->user
);
574 /* now throw away the key memory */
575 if (key
->type
->destroy
)
576 key
->type
->destroy(key
);
578 kfree(key
->description
);
581 key
->magic
= KEY_DEBUG_MAGIC_X
;
583 kmem_cache_free(key_jar
, key
);
585 /* there may, of course, be more than one key to destroy */
588 } /* end key_cleanup() */
590 /*****************************************************************************/
592 * dispose of a reference to a key
593 * - when all the references are gone, we schedule the cleanup task to come and
594 * pull it out of the tree in definite process context
596 void key_put(struct key
*key
)
601 if (atomic_dec_and_test(&key
->usage
))
602 schedule_work(&key_cleanup_task
);
605 } /* end key_put() */
607 EXPORT_SYMBOL(key_put
);
609 /*****************************************************************************/
611 * find a key by its serial number
613 struct key
*key_lookup(key_serial_t id
)
618 spin_lock(&key_serial_lock
);
620 /* search the tree for the specified key */
621 n
= key_serial_tree
.rb_node
;
623 key
= rb_entry(n
, struct key
, serial_node
);
625 if (id
< key
->serial
)
627 else if (id
> key
->serial
)
634 key
= ERR_PTR(-ENOKEY
);
638 /* pretend it doesn't exist if it's dead */
639 if (atomic_read(&key
->usage
) == 0 ||
640 test_bit(KEY_FLAG_DEAD
, &key
->flags
) ||
641 key
->type
== &key_type_dead
)
644 /* this races with key_put(), but that doesn't matter since key_put()
645 * doesn't actually change the key
647 atomic_inc(&key
->usage
);
650 spin_unlock(&key_serial_lock
);
653 } /* end key_lookup() */
655 /*****************************************************************************/
657 * find and lock the specified key type against removal
658 * - we return with the sem readlocked
660 struct key_type
*key_type_lookup(const char *type
)
662 struct key_type
*ktype
;
664 down_read(&key_types_sem
);
666 /* look up the key type to see if it's one of the registered kernel
668 list_for_each_entry(ktype
, &key_types_list
, link
) {
669 if (strcmp(ktype
->name
, type
) == 0)
670 goto found_kernel_type
;
673 up_read(&key_types_sem
);
674 ktype
= ERR_PTR(-ENOKEY
);
679 } /* end key_type_lookup() */
681 /*****************************************************************************/
685 void key_type_put(struct key_type
*ktype
)
687 up_read(&key_types_sem
);
689 } /* end key_type_put() */
691 /*****************************************************************************/
693 * attempt to update an existing key
694 * - the key has an incremented refcount
695 * - we need to put the key if we get an error
697 static inline key_ref_t
__key_update(key_ref_t key_ref
,
698 const void *payload
, size_t plen
)
700 struct key
*key
= key_ref_to_ptr(key_ref
);
703 /* need write permission on the key to update it */
704 ret
= key_permission(key_ref
, KEY_WRITE
);
709 if (!key
->type
->update
)
712 down_write(&key
->sem
);
714 ret
= key
->type
->update(key
, payload
, plen
);
716 /* updating a negative key instantiates it */
717 clear_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
728 key_ref
= ERR_PTR(ret
);
731 } /* end __key_update() */
733 /*****************************************************************************/
735 * search the specified keyring for a key of the same description; if one is
736 * found, update it, otherwise add a new one
738 key_ref_t
key_create_or_update(key_ref_t keyring_ref
,
740 const char *description
,
746 const struct cred
*cred
= current_cred();
747 struct key_type
*ktype
;
748 struct key
*keyring
, *key
= NULL
;
752 /* look up the key type to see if it's one of the registered kernel
754 ktype
= key_type_lookup(type
);
756 key_ref
= ERR_PTR(-ENODEV
);
760 key_ref
= ERR_PTR(-EINVAL
);
761 if (!ktype
->match
|| !ktype
->instantiate
)
764 keyring
= key_ref_to_ptr(keyring_ref
);
768 key_ref
= ERR_PTR(-ENOTDIR
);
769 if (keyring
->type
!= &key_type_keyring
)
772 down_write(&keyring
->sem
);
774 /* if we're going to allocate a new key, we're going to have
775 * to modify the keyring */
776 ret
= key_permission(keyring_ref
, KEY_WRITE
);
778 key_ref
= ERR_PTR(ret
);
782 /* if it's possible to update this type of key, search for an existing
783 * key of the same type and description in the destination keyring and
784 * update that instead if possible
787 key_ref
= __keyring_search_one(keyring_ref
, ktype
, description
,
789 if (!IS_ERR(key_ref
))
790 goto found_matching_key
;
793 /* if the client doesn't provide, decide on the permissions we want */
794 if (perm
== KEY_PERM_UNDEF
) {
795 perm
= KEY_POS_VIEW
| KEY_POS_SEARCH
| KEY_POS_LINK
| KEY_POS_SETATTR
;
796 perm
|= KEY_USR_VIEW
| KEY_USR_SEARCH
| KEY_USR_LINK
| KEY_USR_SETATTR
;
799 perm
|= KEY_POS_READ
| KEY_USR_READ
;
801 if (ktype
== &key_type_keyring
|| ktype
->update
)
802 perm
|= KEY_USR_WRITE
;
805 /* allocate a new key */
806 key
= key_alloc(ktype
, description
, cred
->fsuid
, cred
->fsgid
, cred
,
809 key_ref
= ERR_CAST(key
);
813 /* instantiate it and link it into the target keyring */
814 ret
= __key_instantiate_and_link(key
, payload
, plen
, keyring
, NULL
);
817 key_ref
= ERR_PTR(ret
);
821 key_ref
= make_key_ref(key
, is_key_possessed(keyring_ref
));
824 up_write(&keyring
->sem
);
831 /* we found a matching key, so we're going to try to update it
832 * - we can drop the locks first as we have the key pinned
834 up_write(&keyring
->sem
);
837 key_ref
= __key_update(key_ref
, payload
, plen
);
840 } /* end key_create_or_update() */
842 EXPORT_SYMBOL(key_create_or_update
);
844 /*****************************************************************************/
848 int key_update(key_ref_t key_ref
, const void *payload
, size_t plen
)
850 struct key
*key
= key_ref_to_ptr(key_ref
);
855 /* the key must be writable */
856 ret
= key_permission(key_ref
, KEY_WRITE
);
860 /* attempt to update it if supported */
862 if (key
->type
->update
) {
863 down_write(&key
->sem
);
865 ret
= key
->type
->update(key
, payload
, plen
);
867 /* updating a negative key instantiates it */
868 clear_bit(KEY_FLAG_NEGATIVE
, &key
->flags
);
876 } /* end key_update() */
878 EXPORT_SYMBOL(key_update
);
880 /*****************************************************************************/
884 void key_revoke(struct key
*key
)
888 /* make sure no one's trying to change or use the key when we mark it
889 * - we tell lockdep that we might nest because we might be revoking an
890 * authorisation key whilst holding the sem on a key we've just
893 down_write_nested(&key
->sem
, 1);
894 if (!test_and_set_bit(KEY_FLAG_REVOKED
, &key
->flags
) &&
896 key
->type
->revoke(key
);
900 } /* end key_revoke() */
902 EXPORT_SYMBOL(key_revoke
);
904 /*****************************************************************************/
906 * register a type of key
908 int register_key_type(struct key_type
*ktype
)
914 down_write(&key_types_sem
);
916 /* disallow key types with the same name */
917 list_for_each_entry(p
, &key_types_list
, link
) {
918 if (strcmp(p
->name
, ktype
->name
) == 0)
923 list_add(&ktype
->link
, &key_types_list
);
927 up_write(&key_types_sem
);
930 } /* end register_key_type() */
932 EXPORT_SYMBOL(register_key_type
);
934 /*****************************************************************************/
936 * unregister a type of key
938 void unregister_key_type(struct key_type
*ktype
)
943 down_write(&key_types_sem
);
945 /* withdraw the key type */
946 list_del_init(&ktype
->link
);
948 /* mark all the keys of this type dead */
949 spin_lock(&key_serial_lock
);
951 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
952 key
= rb_entry(_n
, struct key
, serial_node
);
954 if (key
->type
== ktype
)
955 key
->type
= &key_type_dead
;
958 spin_unlock(&key_serial_lock
);
960 /* make sure everyone revalidates their keys */
963 /* we should now be able to destroy the payloads of all the keys of
964 * this type with impunity */
965 spin_lock(&key_serial_lock
);
967 for (_n
= rb_first(&key_serial_tree
); _n
; _n
= rb_next(_n
)) {
968 key
= rb_entry(_n
, struct key
, serial_node
);
970 if (key
->type
== ktype
) {
973 memset(&key
->payload
, KEY_DESTROY
, sizeof(key
->payload
));
977 spin_unlock(&key_serial_lock
);
978 up_write(&key_types_sem
);
980 } /* end unregister_key_type() */
982 EXPORT_SYMBOL(unregister_key_type
);
984 /*****************************************************************************/
986 * initialise the key management stuff
988 void __init
key_init(void)
990 /* allocate a slab in which we can store keys */
991 key_jar
= kmem_cache_create("key_jar", sizeof(struct key
),
992 0, SLAB_HWCACHE_ALIGN
|SLAB_PANIC
, NULL
);
994 /* add the special key types */
995 list_add_tail(&key_type_keyring
.link
, &key_types_list
);
996 list_add_tail(&key_type_dead
.link
, &key_types_list
);
997 list_add_tail(&key_type_user
.link
, &key_types_list
);
999 /* record the root user tracking */
1000 rb_link_node(&root_key_user
.node
,
1002 &key_user_tree
.rb_node
);
1004 rb_insert_color(&root_key_user
.node
,
1007 } /* end key_init() */