Merge commit '7e3488dc6cdcb0c04e1ce167a1a3bfef83b5f2e0'
[unleashed.git] / include / sys / crypto / sched_impl.h
blobb46495decbb4752b0cbe781b421d4a704d67f1a9
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
26 * Copyright 2010 Nexenta Systems, Inc. All rights reserved.
29 #ifndef _SYS_CRYPTO_SCHED_IMPL_H
30 #define _SYS_CRYPTO_SCHED_IMPL_H
33 * Scheduler internal structures.
36 #ifdef __cplusplus
37 extern "C" {
38 #endif
40 #include <sys/types.h>
41 #include <sys/mutex.h>
42 #include <sys/condvar.h>
43 #include <sys/door.h>
44 #include <sys/crypto/api.h>
45 #include <sys/crypto/spi.h>
46 #include <sys/crypto/impl.h>
47 #include <sys/crypto/common.h>
48 #include <sys/crypto/ops_impl.h>
50 typedef void (kcf_func_t)(void *, int);
52 typedef enum kcf_req_status {
53 REQ_ALLOCATED = 1,
54 REQ_WAITING, /* At the framework level */
55 REQ_INPROGRESS, /* At the provider level */
56 REQ_DONE,
57 REQ_CANCELED
58 } kcf_req_status_t;
60 typedef enum kcf_call_type {
61 CRYPTO_SYNCH = 1,
62 CRYPTO_ASYNCH
63 } kcf_call_type_t;
65 #define CHECK_FASTPATH(crq, pd) ((crq) == NULL || \
66 !((crq)->cr_flag & CRYPTO_ALWAYS_QUEUE)) && \
67 (pd)->pd_prov_type == CRYPTO_SW_PROVIDER
69 #define KCF_KMFLAG(crq) (((crq) == NULL) ? KM_SLEEP : KM_NOSLEEP)
72 * The framework keeps an internal handle to use in the adaptive
73 * asynchronous case. This is the case when a client has the
74 * CRYPTO_ALWAYS_QUEUE bit clear and a software provider is used for
75 * the request. The request is completed in the context of the calling
76 * thread and kernel memory must be allocated with KM_NOSLEEP.
78 * The framework passes a pointer to the handle in crypto_req_handle_t
79 * argument when it calls the SPI of the software provider. The macros
80 * KCF_RHNDL() and KCF_SWFP_RHNDL() are used to do this.
82 * When a provider asks the framework for kmflag value via
83 * crypto_kmflag(9S) we use REQHNDL2_KMFLAG() macro.
85 extern ulong_t kcf_swprov_hndl;
86 #define KCF_RHNDL(kmflag) (((kmflag) == KM_SLEEP) ? NULL : &kcf_swprov_hndl)
87 #define KCF_SWFP_RHNDL(crq) (((crq) == NULL) ? NULL : &kcf_swprov_hndl)
88 #define REQHNDL2_KMFLAG(rhndl) \
89 ((rhndl == &kcf_swprov_hndl) ? KM_NOSLEEP : KM_SLEEP)
91 /* Internal call_req flags. They start after the public ones in api.h */
93 #define CRYPTO_SETDUAL 0x00001000 /* Set the 'cont' boolean before */
94 /* submitting the request */
95 #define KCF_ISDUALREQ(crq) \
96 (((crq) == NULL) ? B_FALSE : (crq->cr_flag & CRYPTO_SETDUAL))
98 typedef struct kcf_prov_tried {
99 kcf_provider_desc_t *pt_pd;
100 struct kcf_prov_tried *pt_next;
101 } kcf_prov_tried_t;
103 /* Must be different from KM_SLEEP and KM_NOSLEEP */
104 #define KCF_HOLD_PROV 0x1000
106 #define IS_FG_SUPPORTED(mdesc, fg) \
107 (((mdesc)->pm_mech_info.cm_func_group_mask & (fg)) != 0)
109 #define IS_PROVIDER_TRIED(pd, tlist) \
110 (tlist != NULL && is_in_triedlist(pd, tlist))
112 #define IS_RECOVERABLE(error) \
113 (error == CRYPTO_BUFFER_TOO_BIG || \
114 error == CRYPTO_BUSY || \
115 error == CRYPTO_DEVICE_ERROR || \
116 error == CRYPTO_DEVICE_MEMORY || \
117 error == CRYPTO_KEY_SIZE_RANGE || \
118 error == CRYPTO_NO_PERMISSION)
120 #define KCF_ATOMIC_INCR(x) atomic_inc_32(&(x))
121 #define KCF_ATOMIC_DECR(x) atomic_dec_32(&(x))
124 * Node structure for synchronous requests.
126 typedef struct kcf_sreq_node {
127 /* Should always be the first field in this structure */
128 kcf_call_type_t sn_type;
130 * sn_cv and sr_lock are used to wait for the
131 * operation to complete. sn_lock also protects
132 * the sn_state field.
134 kcondvar_t sn_cv;
135 kmutex_t sn_lock;
136 kcf_req_status_t sn_state;
139 * Return value from the operation. This will be
140 * one of the CRYPTO_* errors defined in common.h.
142 int sn_rv;
145 * parameters to call the SPI with. This can be
146 * a pointer as we know the caller context/stack stays.
148 struct kcf_req_params *sn_params;
150 /* Internal context for this request */
151 struct kcf_context *sn_context;
153 /* Provider handling this request */
154 kcf_provider_desc_t *sn_provider;
156 kcf_prov_cpu_t *sn_mp;
157 } kcf_sreq_node_t;
160 * Node structure for asynchronous requests. A node can be on
161 * on a chain of requests hanging of the internal context
162 * structure and can be in the global software provider queue.
164 typedef struct kcf_areq_node {
165 /* Should always be the first field in this structure */
166 kcf_call_type_t an_type;
168 /* an_lock protects the field an_state */
169 kmutex_t an_lock;
170 kcf_req_status_t an_state;
171 crypto_call_req_t an_reqarg;
174 * parameters to call the SPI with. We need to
175 * save the params since the caller stack can go away.
177 struct kcf_req_params an_params;
180 * The next two fields should be NULL for operations that
181 * don't need a context.
183 /* Internal context for this request */
184 struct kcf_context *an_context;
186 /* next in chain of requests for context */
187 struct kcf_areq_node *an_ctxchain_next;
189 kcondvar_t an_turn_cv;
190 boolean_t an_is_my_turn;
191 boolean_t an_isdual; /* for internal reuse */
194 * Next and previous nodes in the global software
195 * queue. These fields are NULL for a hardware
196 * provider since we use a taskq there.
198 struct kcf_areq_node *an_next;
199 struct kcf_areq_node *an_prev;
201 /* Provider handling this request */
202 kcf_provider_desc_t *an_provider;
203 kcf_prov_cpu_t *an_mp;
204 kcf_prov_tried_t *an_tried_plist;
206 struct kcf_areq_node *an_idnext; /* Next in ID hash */
207 struct kcf_areq_node *an_idprev; /* Prev in ID hash */
208 kcondvar_t an_done; /* Signal request completion */
209 uint_t an_refcnt;
210 } kcf_areq_node_t;
212 #define KCF_AREQ_REFHOLD(areq) { \
213 atomic_inc_32(&(areq)->an_refcnt); \
214 ASSERT((areq)->an_refcnt != 0); \
217 #define KCF_AREQ_REFRELE(areq) { \
218 ASSERT((areq)->an_refcnt != 0); \
219 membar_exit(); \
220 if (atomic_dec_32_nv(&(areq)->an_refcnt) == 0) \
221 kcf_free_req(areq); \
224 #define GET_REQ_TYPE(arg) *((kcf_call_type_t *)(arg))
226 #define NOTIFY_CLIENT(areq, err) (*(areq)->an_reqarg.cr_callback_func)(\
227 (areq)->an_reqarg.cr_callback_arg, err);
229 /* For internally generated call requests for dual operations */
230 typedef struct kcf_call_req {
231 crypto_call_req_t kr_callreq; /* external client call req */
232 kcf_req_params_t kr_params; /* Params saved for next call */
233 kcf_areq_node_t *kr_areq; /* Use this areq */
234 off_t kr_saveoffset;
235 size_t kr_savelen;
236 } kcf_dual_req_t;
239 * The following are some what similar to macros in callo.h, which implement
240 * callout tables.
242 * The lower four bits of the ID are used to encode the table ID to
243 * index in to. The REQID_COUNTER_HIGH bit is used to avoid any check for
244 * wrap around when generating ID. We assume that there won't be a request
245 * which takes more time than 2^^(sizeof (long) - 5) other requests submitted
246 * after it. This ensures there won't be any ID collision.
248 #define REQID_COUNTER_HIGH (1UL << (8 * sizeof (long) - 1))
249 #define REQID_COUNTER_SHIFT 4
250 #define REQID_COUNTER_LOW (1 << REQID_COUNTER_SHIFT)
251 #define REQID_TABLES 16
252 #define REQID_TABLE_MASK (REQID_TABLES - 1)
254 #define REQID_BUCKETS 512
255 #define REQID_BUCKET_MASK (REQID_BUCKETS - 1)
256 #define REQID_HASH(id) (((id) >> REQID_COUNTER_SHIFT) & REQID_BUCKET_MASK)
258 #define GET_REQID(areq) (areq)->an_reqarg.cr_reqid
259 #define SET_REQID(areq, val) GET_REQID(areq) = val
262 * Hash table for async requests.
264 typedef struct kcf_reqid_table {
265 kmutex_t rt_lock;
266 crypto_req_id_t rt_curid;
267 kcf_areq_node_t *rt_idhash[REQID_BUCKETS];
268 } kcf_reqid_table_t;
271 * Global software provider queue structure. Requests to be
272 * handled by a SW provider and have the ALWAYS_QUEUE flag set
273 * get queued here.
275 typedef struct kcf_global_swq {
277 * gs_cv and gs_lock are used to wait for new requests.
278 * gs_lock protects the changes to the queue.
280 kcondvar_t gs_cv;
281 kmutex_t gs_lock;
282 uint_t gs_njobs;
283 uint_t gs_maxjobs;
284 kcf_areq_node_t *gs_first;
285 kcf_areq_node_t *gs_last;
286 } kcf_global_swq_t;
290 * Internal representation of a canonical context. We contain crypto_ctx_t
291 * structure in order to have just one memory allocation. The SPI
292 * ((crypto_ctx_t *)ctx)->cc_framework_private maps to this structure.
294 typedef struct kcf_context {
295 crypto_ctx_t kc_glbl_ctx;
296 uint_t kc_refcnt;
297 kmutex_t kc_in_use_lock;
299 * kc_req_chain_first and kc_req_chain_last are used to chain
300 * multiple async requests using the same context. They should be
301 * NULL for sync requests.
303 kcf_areq_node_t *kc_req_chain_first;
304 kcf_areq_node_t *kc_req_chain_last;
305 kcf_provider_desc_t *kc_prov_desc; /* Prov. descriptor */
306 kcf_provider_desc_t *kc_sw_prov_desc; /* Prov. descriptor */
307 kcf_mech_entry_t *kc_mech;
308 struct kcf_context *kc_secondctx; /* for dual contexts */
309 } kcf_context_t;
312 * Bump up the reference count on the framework private context. A
313 * global context or a request that references this structure should
314 * do a hold.
316 #define KCF_CONTEXT_REFHOLD(ictx) { \
317 atomic_inc_32(&(ictx)->kc_refcnt); \
318 ASSERT((ictx)->kc_refcnt != 0); \
322 * Decrement the reference count on the framework private context.
323 * When the last reference is released, the framework private
324 * context structure is freed along with the global context.
326 #define KCF_CONTEXT_REFRELE(ictx) { \
327 ASSERT((ictx)->kc_refcnt != 0); \
328 membar_exit(); \
329 if (atomic_dec_32_nv(&(ictx)->kc_refcnt) == 0) \
330 kcf_free_context(ictx); \
334 * Check if we can release the context now. In case of CRYPTO_QUEUED
335 * we do not release it as we can do it only after the provider notified
336 * us. In case of CRYPTO_BUSY, the client can retry the request using
337 * the context, so we do not release the context.
339 * This macro should be called only from the final routine in
340 * an init/update/final sequence. We do not release the context in case
341 * of update operations. We require the consumer to free it
342 * explicitly, in case it wants to abandon the operation. This is done
343 * as there may be mechanisms in ECB mode that can continue even if
344 * an operation on a block fails.
346 #define KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx) { \
347 if (KCF_CONTEXT_DONE(rv)) \
348 KCF_CONTEXT_REFRELE(kcf_ctx); \
352 * This macro determines whether we're done with a context.
354 #define KCF_CONTEXT_DONE(rv) \
355 ((rv) != CRYPTO_QUEUED && (rv) != CRYPTO_BUSY && \
356 (rv) != CRYPTO_BUFFER_TOO_SMALL)
359 * A crypto_ctx_template_t is internally a pointer to this struct
361 typedef struct kcf_ctx_template {
362 crypto_kcf_provider_handle_t ct_prov_handle; /* provider handle */
363 uint_t ct_generation; /* generation # */
364 size_t ct_size; /* for freeing */
365 crypto_spi_ctx_template_t ct_prov_tmpl; /* context template */
366 /* from the SW prov */
367 } kcf_ctx_template_t;
370 * Structure for pool of threads working on global software queue.
372 typedef struct kcf_pool {
373 uint32_t kp_threads; /* Number of threads in pool */
374 uint32_t kp_idlethreads; /* Idle threads in pool */
375 uint32_t kp_blockedthreads; /* Blocked threads in pool */
378 * cv & lock for the condition where more threads need to be created.
380 kcondvar_t kp_cv; /* Creator cond. variable */
381 kmutex_t kp_lock; /* Creator lock */
383 } kcf_pool_t;
387 * State of a crypto bufcall element.
389 typedef enum cbuf_state {
390 CBUF_FREE = 1,
391 CBUF_WAITING,
392 CBUF_RUNNING
393 } cbuf_state_t;
396 * Structure of a crypto bufcall element.
398 typedef struct kcf_cbuf_elem {
400 * lock and cv to wait for CBUF_RUNNING to be done
401 * kc_lock also protects kc_state.
403 kmutex_t kc_lock;
404 kcondvar_t kc_cv;
405 cbuf_state_t kc_state;
407 struct kcf_cbuf_elem *kc_next;
408 struct kcf_cbuf_elem *kc_prev;
410 void (*kc_func)(void *arg);
411 void *kc_arg;
412 } kcf_cbuf_elem_t;
415 * State of a notify element.
417 typedef enum ntfy_elem_state {
418 NTFY_WAITING = 1,
419 NTFY_RUNNING
420 } ntfy_elem_state_t;
423 * Structure of a notify list element.
425 typedef struct kcf_ntfy_elem {
427 * lock and cv to wait for NTFY_RUNNING to be done.
428 * kn_lock also protects kn_state.
430 kmutex_t kn_lock;
431 kcondvar_t kn_cv;
432 ntfy_elem_state_t kn_state;
434 struct kcf_ntfy_elem *kn_next;
435 struct kcf_ntfy_elem *kn_prev;
437 crypto_notify_callback_t kn_func;
438 uint32_t kn_event_mask;
439 } kcf_ntfy_elem_t;
443 * The following values are based on the assumption that it would
444 * take around eight cpus to load a hardware provider (This is true for
445 * at least one product) and a kernel client may come from different
446 * low-priority interrupt levels. We will have CYRPTO_TASKQ_MIN number
447 * of cached taskq entries. The CRYPTO_TASKQ_MAX number is based on
448 * a throughput of 1GB/s using 512-byte buffers. These are just
449 * reasonable estimates and might need to change in future.
451 #define CRYPTO_TASKQ_THREADS 8
452 #define CYRPTO_TASKQ_MIN 64
453 #define CRYPTO_TASKQ_MAX 2 * 1024 * 1024
455 extern int crypto_taskq_threads;
456 extern int crypto_taskq_minalloc;
457 extern int crypto_taskq_maxalloc;
458 extern kcf_global_swq_t *gswq;
459 extern int kcf_maxthreads;
460 extern int kcf_minthreads;
463 * All pending crypto bufcalls are put on a list. cbuf_list_lock
464 * protects changes to this list.
466 extern kmutex_t cbuf_list_lock;
467 extern kcondvar_t cbuf_list_cv;
470 * All event subscribers are put on a list. kcf_notify_list_lock
471 * protects changes to this list.
473 extern kmutex_t ntfy_list_lock;
474 extern kcondvar_t ntfy_list_cv;
476 boolean_t kcf_get_next_logical_provider_member(kcf_provider_desc_t *,
477 kcf_provider_desc_t *, kcf_provider_desc_t **);
478 extern int kcf_get_hardware_provider(crypto_mech_type_t, crypto_key_t *,
479 crypto_mech_type_t, crypto_key_t *,
480 kcf_provider_desc_t *, kcf_provider_desc_t **,
481 crypto_func_group_t);
482 extern int kcf_get_hardware_provider_nomech(offset_t, offset_t,
483 kcf_provider_desc_t *, kcf_provider_desc_t **);
484 extern void kcf_free_triedlist(kcf_prov_tried_t *);
485 extern kcf_prov_tried_t *kcf_insert_triedlist(kcf_prov_tried_t **,
486 kcf_provider_desc_t *, int);
487 extern kcf_provider_desc_t *kcf_get_mech_provider(crypto_mech_type_t,
488 crypto_key_t *, kcf_mech_entry_t **, int *, kcf_prov_tried_t *,
489 crypto_func_group_t, size_t);
490 extern kcf_provider_desc_t *kcf_get_dual_provider(crypto_mechanism_t *,
491 crypto_key_t *, crypto_mechanism_t *, crypto_key_t *,
492 kcf_mech_entry_t **, crypto_mech_type_t *,
493 crypto_mech_type_t *, int *, kcf_prov_tried_t *,
494 crypto_func_group_t, crypto_func_group_t, size_t);
495 extern crypto_ctx_t *kcf_new_ctx(crypto_call_req_t *, kcf_provider_desc_t *,
496 crypto_session_id_t);
497 extern int kcf_submit_request(kcf_provider_desc_t *, crypto_ctx_t *,
498 crypto_call_req_t *, kcf_req_params_t *, boolean_t);
499 extern void kcf_sched_init(void);
500 extern void kcf_sched_start(void);
501 extern void kcf_sop_done(kcf_sreq_node_t *, int);
502 extern void kcf_aop_done(kcf_areq_node_t *, int);
503 extern int common_submit_request(kcf_provider_desc_t *,
504 crypto_ctx_t *, kcf_req_params_t *, crypto_req_handle_t);
505 extern void kcf_free_context(kcf_context_t *);
507 extern struct modctl *kcf_get_modctl(crypto_provider_info_t *);
508 extern void kcf_free_req(kcf_areq_node_t *areq);
509 extern void crypto_bufcall_service(void);
511 extern void kcf_walk_ntfylist(uint32_t, void *);
512 extern void kcf_do_notify(kcf_provider_desc_t *, boolean_t);
514 extern kcf_dual_req_t *kcf_alloc_req(crypto_call_req_t *);
515 extern void kcf_next_req(void *, int);
516 extern void kcf_last_req(void *, int);
518 #ifdef __cplusplus
520 #endif
522 #endif /* _SYS_CRYPTO_SCHED_IMPL_H */