dpaa_eth: use big endian accessors
[linux-2.6/btrfs-unstable.git] / drivers / crypto / mxc-scc.c
blobee4be1b0d30ba1e0eb38262026b6e8a13d32b6fb
1 /*
2 * Copyright (C) 2016 Pengutronix, Steffen Trumtrar <kernel@pengutronix.de>
4 * The driver is based on information gathered from
5 * drivers/mxc/security/mxc_scc.c which can be found in
6 * the Freescale linux-2.6-imx.git in the imx_2.6.35_maintain branch.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2, as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
18 #include <linux/clk.h>
19 #include <linux/crypto.h>
20 #include <linux/interrupt.h>
21 #include <linux/io.h>
22 #include <linux/irq.h>
23 #include <linux/kernel.h>
24 #include <linux/module.h>
25 #include <linux/mutex.h>
26 #include <linux/of.h>
27 #include <linux/of_device.h>
28 #include <linux/platform_device.h>
30 #include <crypto/algapi.h>
31 #include <crypto/des.h>
33 /* Secure Memory (SCM) registers */
34 #define SCC_SCM_RED_START 0x0000
35 #define SCC_SCM_BLACK_START 0x0004
36 #define SCC_SCM_LENGTH 0x0008
37 #define SCC_SCM_CTRL 0x000C
38 #define SCC_SCM_STATUS 0x0010
39 #define SCC_SCM_ERROR_STATUS 0x0014
40 #define SCC_SCM_INTR_CTRL 0x0018
41 #define SCC_SCM_CFG 0x001C
42 #define SCC_SCM_INIT_VECTOR_0 0x0020
43 #define SCC_SCM_INIT_VECTOR_1 0x0024
44 #define SCC_SCM_RED_MEMORY 0x0400
45 #define SCC_SCM_BLACK_MEMORY 0x0800
47 /* Security Monitor (SMN) Registers */
48 #define SCC_SMN_STATUS 0x1000
49 #define SCC_SMN_COMMAND 0x1004
50 #define SCC_SMN_SEQ_START 0x1008
51 #define SCC_SMN_SEQ_END 0x100C
52 #define SCC_SMN_SEQ_CHECK 0x1010
53 #define SCC_SMN_BIT_COUNT 0x1014
54 #define SCC_SMN_BITBANK_INC_SIZE 0x1018
55 #define SCC_SMN_BITBANK_DECREMENT 0x101C
56 #define SCC_SMN_COMPARE_SIZE 0x1020
57 #define SCC_SMN_PLAINTEXT_CHECK 0x1024
58 #define SCC_SMN_CIPHERTEXT_CHECK 0x1028
59 #define SCC_SMN_TIMER_IV 0x102C
60 #define SCC_SMN_TIMER_CONTROL 0x1030
61 #define SCC_SMN_DEBUG_DETECT_STAT 0x1034
62 #define SCC_SMN_TIMER 0x1038
64 #define SCC_SCM_CTRL_START_CIPHER BIT(2)
65 #define SCC_SCM_CTRL_CBC_MODE BIT(1)
66 #define SCC_SCM_CTRL_DECRYPT_MODE BIT(0)
68 #define SCC_SCM_STATUS_LEN_ERR BIT(12)
69 #define SCC_SCM_STATUS_SMN_UNBLOCKED BIT(11)
70 #define SCC_SCM_STATUS_CIPHERING_DONE BIT(10)
71 #define SCC_SCM_STATUS_ZEROIZING_DONE BIT(9)
72 #define SCC_SCM_STATUS_INTR_STATUS BIT(8)
73 #define SCC_SCM_STATUS_SEC_KEY BIT(7)
74 #define SCC_SCM_STATUS_INTERNAL_ERR BIT(6)
75 #define SCC_SCM_STATUS_BAD_SEC_KEY BIT(5)
76 #define SCC_SCM_STATUS_ZEROIZE_FAIL BIT(4)
77 #define SCC_SCM_STATUS_SMN_BLOCKED BIT(3)
78 #define SCC_SCM_STATUS_CIPHERING BIT(2)
79 #define SCC_SCM_STATUS_ZEROIZING BIT(1)
80 #define SCC_SCM_STATUS_BUSY BIT(0)
82 #define SCC_SMN_STATUS_STATE_MASK 0x0000001F
83 #define SCC_SMN_STATE_START 0x0
84 /* The SMN is zeroizing its RAM during reset */
85 #define SCC_SMN_STATE_ZEROIZE_RAM 0x5
86 /* SMN has passed internal checks */
87 #define SCC_SMN_STATE_HEALTH_CHECK 0x6
88 /* Fatal Security Violation. SMN is locked, SCM is inoperative. */
89 #define SCC_SMN_STATE_FAIL 0x9
90 /* SCC is in secure state. SCM is using secret key. */
91 #define SCC_SMN_STATE_SECURE 0xA
92 /* SCC is not secure. SCM is using default key. */
93 #define SCC_SMN_STATE_NON_SECURE 0xC
95 #define SCC_SCM_INTR_CTRL_ZEROIZE_MEM BIT(2)
96 #define SCC_SCM_INTR_CTRL_CLR_INTR BIT(1)
97 #define SCC_SCM_INTR_CTRL_MASK_INTR BIT(0)
99 /* Size, in blocks, of Red memory. */
100 #define SCC_SCM_CFG_BLACK_SIZE_MASK 0x07fe0000
101 #define SCC_SCM_CFG_BLACK_SIZE_SHIFT 17
102 /* Size, in blocks, of Black memory. */
103 #define SCC_SCM_CFG_RED_SIZE_MASK 0x0001ff80
104 #define SCC_SCM_CFG_RED_SIZE_SHIFT 7
105 /* Number of bytes per block. */
106 #define SCC_SCM_CFG_BLOCK_SIZE_MASK 0x0000007f
108 #define SCC_SMN_COMMAND_TAMPER_LOCK BIT(4)
109 #define SCC_SMN_COMMAND_CLR_INTR BIT(3)
110 #define SCC_SMN_COMMAND_CLR_BIT_BANK BIT(2)
111 #define SCC_SMN_COMMAND_EN_INTR BIT(1)
112 #define SCC_SMN_COMMAND_SET_SOFTWARE_ALARM BIT(0)
114 #define SCC_KEY_SLOTS 20
115 #define SCC_MAX_KEY_SIZE 32
116 #define SCC_KEY_SLOT_SIZE 32
118 #define SCC_CRC_CCITT_START 0xFFFF
121 * Offset into each RAM of the base of the area which is not
122 * used for Stored Keys.
124 #define SCC_NON_RESERVED_OFFSET (SCC_KEY_SLOTS * SCC_KEY_SLOT_SIZE)
126 /* Fixed padding for appending to plaintext to fill out a block */
127 static char scc_block_padding[8] = { 0x80, 0, 0, 0, 0, 0, 0, 0 };
129 enum mxc_scc_state {
130 SCC_STATE_OK,
131 SCC_STATE_UNIMPLEMENTED,
132 SCC_STATE_FAILED
135 struct mxc_scc {
136 struct device *dev;
137 void __iomem *base;
138 struct clk *clk;
139 bool hw_busy;
140 spinlock_t lock;
141 struct crypto_queue queue;
142 struct crypto_async_request *req;
143 int block_size_bytes;
144 int black_ram_size_blocks;
145 int memory_size_bytes;
146 int bytes_remaining;
148 void __iomem *red_memory;
149 void __iomem *black_memory;
152 struct mxc_scc_ctx {
153 struct mxc_scc *scc;
154 struct scatterlist *sg_src;
155 size_t src_nents;
156 struct scatterlist *sg_dst;
157 size_t dst_nents;
158 unsigned int offset;
159 unsigned int size;
160 unsigned int ctrl;
163 struct mxc_scc_crypto_tmpl {
164 struct mxc_scc *scc;
165 struct crypto_alg alg;
168 static int mxc_scc_get_data(struct mxc_scc_ctx *ctx,
169 struct crypto_async_request *req)
171 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
172 struct mxc_scc *scc = ctx->scc;
173 size_t len;
174 void __iomem *from;
176 if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
177 from = scc->red_memory;
178 else
179 from = scc->black_memory;
181 dev_dbg(scc->dev, "pcopy: from 0x%p %d bytes\n", from,
182 ctx->dst_nents * 8);
183 len = sg_pcopy_from_buffer(ablkreq->dst, ctx->dst_nents,
184 from, ctx->size, ctx->offset);
185 if (!len) {
186 dev_err(scc->dev, "pcopy err from 0x%p (len=%d)\n", from, len);
187 return -EINVAL;
190 #ifdef DEBUG
191 print_hex_dump(KERN_ERR,
192 "red memory@"__stringify(__LINE__)": ",
193 DUMP_PREFIX_ADDRESS, 16, 4,
194 scc->red_memory, ctx->size, 1);
195 print_hex_dump(KERN_ERR,
196 "black memory@"__stringify(__LINE__)": ",
197 DUMP_PREFIX_ADDRESS, 16, 4,
198 scc->black_memory, ctx->size, 1);
199 #endif
201 ctx->offset += len;
203 if (ctx->offset < ablkreq->nbytes)
204 return -EINPROGRESS;
206 return 0;
209 static int mxc_scc_ablkcipher_req_init(struct ablkcipher_request *req,
210 struct mxc_scc_ctx *ctx)
212 struct mxc_scc *scc = ctx->scc;
213 int nents;
215 nents = sg_nents_for_len(req->src, req->nbytes);
216 if (nents < 0) {
217 dev_err(scc->dev, "Invalid number of src SC");
218 return nents;
220 ctx->src_nents = nents;
222 nents = sg_nents_for_len(req->dst, req->nbytes);
223 if (nents < 0) {
224 dev_err(scc->dev, "Invalid number of dst SC");
225 return nents;
227 ctx->dst_nents = nents;
229 ctx->size = 0;
230 ctx->offset = 0;
232 return 0;
235 static int mxc_scc_ablkcipher_req_complete(struct crypto_async_request *req,
236 struct mxc_scc_ctx *ctx,
237 int result)
239 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
240 struct mxc_scc *scc = ctx->scc;
242 scc->req = NULL;
243 scc->bytes_remaining = scc->memory_size_bytes;
245 if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE)
246 memcpy(ablkreq->info, scc->base + SCC_SCM_INIT_VECTOR_0,
247 scc->block_size_bytes);
249 req->complete(req, result);
250 scc->hw_busy = false;
252 return 0;
255 static int mxc_scc_put_data(struct mxc_scc_ctx *ctx,
256 struct ablkcipher_request *req)
258 u8 padding_buffer[sizeof(u16) + sizeof(scc_block_padding)];
259 size_t len = min_t(size_t, req->nbytes - ctx->offset,
260 ctx->scc->bytes_remaining);
261 unsigned int padding_byte_count = 0;
262 struct mxc_scc *scc = ctx->scc;
263 void __iomem *to;
265 if (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE)
266 to = scc->black_memory;
267 else
268 to = scc->red_memory;
270 if (ctx->ctrl & SCC_SCM_CTRL_CBC_MODE && req->info)
271 memcpy(scc->base + SCC_SCM_INIT_VECTOR_0, req->info,
272 scc->block_size_bytes);
274 len = sg_pcopy_to_buffer(req->src, ctx->src_nents,
275 to, len, ctx->offset);
276 if (!len) {
277 dev_err(scc->dev, "pcopy err to 0x%p (len=%d)\n", to, len);
278 return -EINVAL;
281 ctx->size = len;
283 #ifdef DEBUG
284 dev_dbg(scc->dev, "copied %d bytes to 0x%p\n", len, to);
285 print_hex_dump(KERN_ERR,
286 "init vector0@"__stringify(__LINE__)": ",
287 DUMP_PREFIX_ADDRESS, 16, 4,
288 scc->base + SCC_SCM_INIT_VECTOR_0, scc->block_size_bytes,
290 print_hex_dump(KERN_ERR,
291 "red memory@"__stringify(__LINE__)": ",
292 DUMP_PREFIX_ADDRESS, 16, 4,
293 scc->red_memory, ctx->size, 1);
294 print_hex_dump(KERN_ERR,
295 "black memory@"__stringify(__LINE__)": ",
296 DUMP_PREFIX_ADDRESS, 16, 4,
297 scc->black_memory, ctx->size, 1);
298 #endif
300 scc->bytes_remaining -= len;
302 padding_byte_count = len % scc->block_size_bytes;
304 if (padding_byte_count) {
305 memcpy(padding_buffer, scc_block_padding, padding_byte_count);
306 memcpy(to + len, padding_buffer, padding_byte_count);
307 ctx->size += padding_byte_count;
310 #ifdef DEBUG
311 print_hex_dump(KERN_ERR,
312 "data to encrypt@"__stringify(__LINE__)": ",
313 DUMP_PREFIX_ADDRESS, 16, 4,
314 to, ctx->size, 1);
315 #endif
317 return 0;
320 static void mxc_scc_ablkcipher_next(struct mxc_scc_ctx *ctx,
321 struct crypto_async_request *req)
323 struct ablkcipher_request *ablkreq = ablkcipher_request_cast(req);
324 struct mxc_scc *scc = ctx->scc;
325 int err;
327 dev_dbg(scc->dev, "dispatch request (nbytes=%d, src=%p, dst=%p)\n",
328 ablkreq->nbytes, ablkreq->src, ablkreq->dst);
330 writel(0, scc->base + SCC_SCM_ERROR_STATUS);
332 err = mxc_scc_put_data(ctx, ablkreq);
333 if (err) {
334 mxc_scc_ablkcipher_req_complete(req, ctx, err);
335 return;
338 dev_dbg(scc->dev, "Start encryption (0x%p/0x%p)\n",
339 (void *)readl(scc->base + SCC_SCM_RED_START),
340 (void *)readl(scc->base + SCC_SCM_BLACK_START));
342 /* clear interrupt control registers */
343 writel(SCC_SCM_INTR_CTRL_CLR_INTR,
344 scc->base + SCC_SCM_INTR_CTRL);
346 writel((ctx->size / ctx->scc->block_size_bytes) - 1,
347 scc->base + SCC_SCM_LENGTH);
349 dev_dbg(scc->dev, "Process %d block(s) in 0x%p\n",
350 ctx->size / ctx->scc->block_size_bytes,
351 (ctx->ctrl & SCC_SCM_CTRL_DECRYPT_MODE) ? scc->black_memory :
352 scc->red_memory);
354 writel(ctx->ctrl, scc->base + SCC_SCM_CTRL);
357 static irqreturn_t mxc_scc_int(int irq, void *priv)
359 struct crypto_async_request *req;
360 struct mxc_scc_ctx *ctx;
361 struct mxc_scc *scc = priv;
362 int status;
363 int ret;
365 status = readl(scc->base + SCC_SCM_STATUS);
367 /* clear interrupt control registers */
368 writel(SCC_SCM_INTR_CTRL_CLR_INTR, scc->base + SCC_SCM_INTR_CTRL);
370 if (status & SCC_SCM_STATUS_BUSY)
371 return IRQ_NONE;
373 req = scc->req;
374 if (req) {
375 ctx = crypto_tfm_ctx(req->tfm);
376 ret = mxc_scc_get_data(ctx, req);
377 if (ret != -EINPROGRESS)
378 mxc_scc_ablkcipher_req_complete(req, ctx, ret);
379 else
380 mxc_scc_ablkcipher_next(ctx, req);
383 return IRQ_HANDLED;
386 static int mxc_scc_cra_init(struct crypto_tfm *tfm)
388 struct mxc_scc_ctx *ctx = crypto_tfm_ctx(tfm);
389 struct crypto_alg *alg = tfm->__crt_alg;
390 struct mxc_scc_crypto_tmpl *algt;
392 algt = container_of(alg, struct mxc_scc_crypto_tmpl, alg);
394 ctx->scc = algt->scc;
395 return 0;
398 static void mxc_scc_dequeue_req_unlocked(struct mxc_scc_ctx *ctx)
400 struct crypto_async_request *req, *backlog;
402 if (ctx->scc->hw_busy)
403 return;
405 spin_lock_bh(&ctx->scc->lock);
406 backlog = crypto_get_backlog(&ctx->scc->queue);
407 req = crypto_dequeue_request(&ctx->scc->queue);
408 ctx->scc->req = req;
409 ctx->scc->hw_busy = true;
410 spin_unlock_bh(&ctx->scc->lock);
412 if (!req)
413 return;
415 if (backlog)
416 backlog->complete(backlog, -EINPROGRESS);
418 mxc_scc_ablkcipher_next(ctx, req);
421 static int mxc_scc_queue_req(struct mxc_scc_ctx *ctx,
422 struct crypto_async_request *req)
424 int ret;
426 spin_lock_bh(&ctx->scc->lock);
427 ret = crypto_enqueue_request(&ctx->scc->queue, req);
428 spin_unlock_bh(&ctx->scc->lock);
430 if (ret != -EINPROGRESS)
431 return ret;
433 mxc_scc_dequeue_req_unlocked(ctx);
435 return -EINPROGRESS;
438 static int mxc_scc_des3_op(struct mxc_scc_ctx *ctx,
439 struct ablkcipher_request *req)
441 int err;
443 err = mxc_scc_ablkcipher_req_init(req, ctx);
444 if (err)
445 return err;
447 return mxc_scc_queue_req(ctx, &req->base);
450 static int mxc_scc_ecb_des_encrypt(struct ablkcipher_request *req)
452 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
453 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
455 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
457 return mxc_scc_des3_op(ctx, req);
460 static int mxc_scc_ecb_des_decrypt(struct ablkcipher_request *req)
462 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
463 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
465 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
466 ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
468 return mxc_scc_des3_op(ctx, req);
471 static int mxc_scc_cbc_des_encrypt(struct ablkcipher_request *req)
473 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
474 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
476 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
477 ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
479 return mxc_scc_des3_op(ctx, req);
482 static int mxc_scc_cbc_des_decrypt(struct ablkcipher_request *req)
484 struct crypto_ablkcipher *cipher = crypto_ablkcipher_reqtfm(req);
485 struct mxc_scc_ctx *ctx = crypto_ablkcipher_ctx(cipher);
487 ctx->ctrl = SCC_SCM_CTRL_START_CIPHER;
488 ctx->ctrl |= SCC_SCM_CTRL_CBC_MODE;
489 ctx->ctrl |= SCC_SCM_CTRL_DECRYPT_MODE;
491 return mxc_scc_des3_op(ctx, req);
494 static void mxc_scc_hw_init(struct mxc_scc *scc)
496 int offset;
498 offset = SCC_NON_RESERVED_OFFSET / scc->block_size_bytes;
500 /* Fill the RED_START register */
501 writel(offset, scc->base + SCC_SCM_RED_START);
503 /* Fill the BLACK_START register */
504 writel(offset, scc->base + SCC_SCM_BLACK_START);
506 scc->red_memory = scc->base + SCC_SCM_RED_MEMORY +
507 SCC_NON_RESERVED_OFFSET;
509 scc->black_memory = scc->base + SCC_SCM_BLACK_MEMORY +
510 SCC_NON_RESERVED_OFFSET;
512 scc->bytes_remaining = scc->memory_size_bytes;
515 static int mxc_scc_get_config(struct mxc_scc *scc)
517 int config;
519 config = readl(scc->base + SCC_SCM_CFG);
521 scc->block_size_bytes = config & SCC_SCM_CFG_BLOCK_SIZE_MASK;
523 scc->black_ram_size_blocks = config & SCC_SCM_CFG_BLACK_SIZE_MASK;
525 scc->memory_size_bytes = (scc->block_size_bytes *
526 scc->black_ram_size_blocks) -
527 SCC_NON_RESERVED_OFFSET;
529 return 0;
532 static enum mxc_scc_state mxc_scc_get_state(struct mxc_scc *scc)
534 enum mxc_scc_state state;
535 int status;
537 status = readl(scc->base + SCC_SMN_STATUS) &
538 SCC_SMN_STATUS_STATE_MASK;
540 /* If in Health Check, try to bringup to secure state */
541 if (status & SCC_SMN_STATE_HEALTH_CHECK) {
543 * Write a simple algorithm to the Algorithm Sequence
544 * Checker (ASC)
546 writel(0xaaaa, scc->base + SCC_SMN_SEQ_START);
547 writel(0x5555, scc->base + SCC_SMN_SEQ_END);
548 writel(0x5555, scc->base + SCC_SMN_SEQ_CHECK);
550 status = readl(scc->base + SCC_SMN_STATUS) &
551 SCC_SMN_STATUS_STATE_MASK;
554 switch (status) {
555 case SCC_SMN_STATE_NON_SECURE:
556 case SCC_SMN_STATE_SECURE:
557 state = SCC_STATE_OK;
558 break;
559 case SCC_SMN_STATE_FAIL:
560 state = SCC_STATE_FAILED;
561 break;
562 default:
563 state = SCC_STATE_UNIMPLEMENTED;
564 break;
567 return state;
570 static struct mxc_scc_crypto_tmpl scc_ecb_des = {
571 .alg = {
572 .cra_name = "ecb(des3_ede)",
573 .cra_driver_name = "ecb-des3-scc",
574 .cra_priority = 300,
575 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
576 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
577 .cra_ctxsize = sizeof(struct mxc_scc_ctx),
578 .cra_alignmask = 0,
579 .cra_type = &crypto_ablkcipher_type,
580 .cra_module = THIS_MODULE,
581 .cra_init = mxc_scc_cra_init,
582 .cra_u.ablkcipher = {
583 .min_keysize = DES3_EDE_KEY_SIZE,
584 .max_keysize = DES3_EDE_KEY_SIZE,
585 .encrypt = mxc_scc_ecb_des_encrypt,
586 .decrypt = mxc_scc_ecb_des_decrypt,
591 static struct mxc_scc_crypto_tmpl scc_cbc_des = {
592 .alg = {
593 .cra_name = "cbc(des3_ede)",
594 .cra_driver_name = "cbc-des3-scc",
595 .cra_priority = 300,
596 .cra_flags = CRYPTO_ALG_TYPE_ABLKCIPHER,
597 .cra_blocksize = DES3_EDE_BLOCK_SIZE,
598 .cra_ctxsize = sizeof(struct mxc_scc_ctx),
599 .cra_alignmask = 0,
600 .cra_type = &crypto_ablkcipher_type,
601 .cra_module = THIS_MODULE,
602 .cra_init = mxc_scc_cra_init,
603 .cra_u.ablkcipher = {
604 .min_keysize = DES3_EDE_KEY_SIZE,
605 .max_keysize = DES3_EDE_KEY_SIZE,
606 .encrypt = mxc_scc_cbc_des_encrypt,
607 .decrypt = mxc_scc_cbc_des_decrypt,
612 static struct mxc_scc_crypto_tmpl *scc_crypto_algs[] = {
613 &scc_ecb_des,
614 &scc_cbc_des,
617 static int mxc_scc_crypto_register(struct mxc_scc *scc)
619 int i;
620 int err = 0;
622 for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++) {
623 scc_crypto_algs[i]->scc = scc;
624 err = crypto_register_alg(&scc_crypto_algs[i]->alg);
625 if (err)
626 goto err_out;
629 return 0;
631 err_out:
632 while (--i >= 0)
633 crypto_unregister_alg(&scc_crypto_algs[i]->alg);
635 return err;
638 static void mxc_scc_crypto_unregister(void)
640 unsigned int i;
642 for (i = 0; i < ARRAY_SIZE(scc_crypto_algs); i++)
643 crypto_unregister_alg(&scc_crypto_algs[i]->alg);
646 static int mxc_scc_probe(struct platform_device *pdev)
648 struct device *dev = &pdev->dev;
649 struct resource *res;
650 struct mxc_scc *scc;
651 enum mxc_scc_state state;
652 int irq;
653 int ret;
654 int i;
656 scc = devm_kzalloc(dev, sizeof(*scc), GFP_KERNEL);
657 if (!scc)
658 return -ENOMEM;
660 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661 scc->base = devm_ioremap_resource(dev, res);
662 if (IS_ERR(scc->base))
663 return PTR_ERR(scc->base);
665 scc->clk = devm_clk_get(&pdev->dev, "ipg");
666 if (IS_ERR(scc->clk)) {
667 dev_err(dev, "Could not get ipg clock\n");
668 return PTR_ERR(scc->clk);
671 ret = clk_prepare_enable(scc->clk);
672 if (ret)
673 return ret;
675 /* clear error status register */
676 writel(0x0, scc->base + SCC_SCM_ERROR_STATUS);
678 /* clear interrupt control registers */
679 writel(SCC_SCM_INTR_CTRL_CLR_INTR |
680 SCC_SCM_INTR_CTRL_MASK_INTR,
681 scc->base + SCC_SCM_INTR_CTRL);
683 writel(SCC_SMN_COMMAND_CLR_INTR |
684 SCC_SMN_COMMAND_EN_INTR,
685 scc->base + SCC_SMN_COMMAND);
687 scc->dev = dev;
688 platform_set_drvdata(pdev, scc);
690 ret = mxc_scc_get_config(scc);
691 if (ret)
692 goto err_out;
694 state = mxc_scc_get_state(scc);
696 if (state != SCC_STATE_OK) {
697 dev_err(dev, "SCC in unusable state %d\n", state);
698 ret = -EINVAL;
699 goto err_out;
702 mxc_scc_hw_init(scc);
704 spin_lock_init(&scc->lock);
705 /* FIXME: calculate queue from RAM slots */
706 crypto_init_queue(&scc->queue, 50);
708 for (i = 0; i < 2; i++) {
709 irq = platform_get_irq(pdev, i);
710 if (irq < 0) {
711 dev_err(dev, "failed to get irq resource\n");
712 ret = -EINVAL;
713 goto err_out;
716 ret = devm_request_threaded_irq(dev, irq, NULL, mxc_scc_int,
717 IRQF_ONESHOT, dev_name(dev), scc);
718 if (ret)
719 goto err_out;
722 ret = mxc_scc_crypto_register(scc);
723 if (ret) {
724 dev_err(dev, "could not register algorithms");
725 goto err_out;
728 dev_info(dev, "registered successfully.\n");
730 return 0;
732 err_out:
733 clk_disable_unprepare(scc->clk);
735 return ret;
738 static int mxc_scc_remove(struct platform_device *pdev)
740 struct mxc_scc *scc = platform_get_drvdata(pdev);
742 mxc_scc_crypto_unregister();
744 clk_disable_unprepare(scc->clk);
746 return 0;
749 static const struct of_device_id mxc_scc_dt_ids[] = {
750 { .compatible = "fsl,imx25-scc", .data = NULL, },
751 { /* sentinel */ }
753 MODULE_DEVICE_TABLE(of, mxc_scc_dt_ids);
755 static struct platform_driver mxc_scc_driver = {
756 .probe = mxc_scc_probe,
757 .remove = mxc_scc_remove,
758 .driver = {
759 .name = "mxc-scc",
760 .of_match_table = mxc_scc_dt_ids,
764 module_platform_driver(mxc_scc_driver);
765 MODULE_AUTHOR("Steffen Trumtrar <kernel@pengutronix.de>");
766 MODULE_DESCRIPTION("Freescale i.MX25 SCC Crypto driver");
767 MODULE_LICENSE("GPL v2");