Staging: sep: clean up error checking in probe function
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / sep / sep_driver.c
blob821d3b7c6477a492934d9d6ee8ad8d9cf72530e5
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * CONTACTS:
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
32 #define DEBUG
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/miscdevice.h>
36 #include <linux/fs.h>
37 #include <linux/cdev.h>
38 #include <linux/kdev_t.h>
39 #include <linux/mutex.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/poll.h>
43 #include <linux/wait.h>
44 #include <linux/pci.h>
45 #include <linux/firmware.h>
46 #include <linux/slab.h>
47 #include <linux/ioctl.h>
48 #include <asm/current.h>
49 #include <linux/ioport.h>
50 #include <linux/io.h>
51 #include <linux/interrupt.h>
52 #include <linux/pagemap.h>
53 #include <asm/cacheflush.h>
54 #include <linux/sched.h>
55 #include <linux/delay.h>
56 #include <linux/rar_register.h>
58 #include "../memrar/memrar.h"
60 #include "sep_driver_hw_defs.h"
61 #include "sep_driver_config.h"
62 #include "sep_driver_api.h"
63 #include "sep_dev.h"
65 /*----------------------------------------
66 DEFINES
67 -----------------------------------------*/
69 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
71 /*--------------------------------------------
72 GLOBAL variables
73 --------------------------------------------*/
75 /* Keep this a single static object for now to keep the conversion easy */
77 static struct sep_device *sep_dev;
79 /**
80 * sep_load_firmware - copy firmware cache/resident
81 * @sep: pointer to struct sep_device we are loading
83 * This functions copies the cache and resident from their source
84 * location into destination shared memory.
86 static int sep_load_firmware(struct sep_device *sep)
88 const struct firmware *fw;
89 char *cache_name = "cache.image.bin";
90 char *res_name = "resident.image.bin";
91 char *extapp_name = "extapp.image.bin";
92 int error ;
93 unsigned int work1, work2, work3;
95 /* Set addresses and load resident */
96 sep->resident_bus = sep->rar_bus;
97 sep->resident_addr = sep->rar_addr;
99 error = request_firmware(&fw, res_name, &sep->pdev->dev);
100 if (error) {
101 dev_warn(&sep->pdev->dev, "can't request resident fw\n");
102 return error;
105 memcpy(sep->resident_addr, (void *)fw->data, fw->size);
106 sep->resident_size = fw->size;
107 release_firmware(fw);
109 dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
110 sep->resident_addr);
111 dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
112 (unsigned long)sep->resident_bus);
113 dev_dbg(&sep->pdev->dev, "resident size is %08zx\n",
114 sep->resident_size);
116 /* Set addresses for dcache (no loading needed) */
117 work1 = (unsigned int)sep->resident_bus;
118 work2 = (unsigned int)sep->resident_size;
119 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
120 sep->dcache_bus = (dma_addr_t)work3;
122 work1 = (unsigned int)sep->resident_addr;
123 work2 = (unsigned int)sep->resident_size;
124 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
125 sep->dcache_addr = (void *)work3;
127 sep->dcache_size = 1024 * 128;
129 /* Set addresses and load cache */
130 sep->cache_bus = sep->dcache_bus + sep->dcache_size;
131 sep->cache_addr = sep->dcache_addr + sep->dcache_size;
133 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
134 if (error) {
135 dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
136 return error;
139 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
140 sep->cache_size = fw->size;
141 release_firmware(fw);
143 dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
144 sep->cache_addr);
145 dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
146 (unsigned long)sep->cache_bus);
147 dev_dbg(&sep->pdev->dev, "cache size is %08zx\n",
148 sep->cache_size);
150 /* Set addresses and load extapp */
151 sep->extapp_bus = sep->cache_bus + (1024 * 370);
152 sep->extapp_addr = sep->cache_addr + (1024 * 370);
154 error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
155 if (error) {
156 dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
157 return error;
160 memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
161 sep->extapp_size = fw->size;
162 release_firmware(fw);
164 dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
165 sep->extapp_addr);
166 dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
167 (unsigned long long)sep->extapp_bus);
168 dev_dbg(&sep->pdev->dev, "extapp size is %08zx\n",
169 sep->extapp_size);
171 return error;
174 MODULE_FIRMWARE("sep/cache.image.bin");
175 MODULE_FIRMWARE("sep/resident.image.bin");
176 MODULE_FIRMWARE("sep/extapp.image.bin");
179 * sep_dump_message - dump the message that is pending
180 * @sep: SEP device
182 static void sep_dump_message(struct sep_device *sep)
184 int count;
185 u32 *p = sep->shared_addr;
186 for (count = 0; count < 12 * 4; count += 4)
187 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
188 count, *p++);
192 * sep_map_and_alloc_shared_area - allocate shared block
193 * @sep: security processor
194 * @size: size of shared area
196 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
198 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
199 sep->shared_size,
200 &sep->shared_bus, GFP_KERNEL);
202 if (!sep->shared_addr) {
203 dev_warn(&sep->pdev->dev,
204 "shared memory dma_alloc_coherent failed\n");
205 return -ENOMEM;
207 dev_dbg(&sep->pdev->dev,
208 "shared_addr %zx bytes @%p (bus %llx)\n",
209 sep->shared_size, sep->shared_addr,
210 (unsigned long long)sep->shared_bus);
211 return 0;
215 * sep_unmap_and_free_shared_area - free shared block
216 * @sep: security processor
218 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
220 dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
221 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
222 sep->shared_addr, sep->shared_bus);
226 * sep_shared_bus_to_virt - convert bus/virt addresses
227 * @sep: pointer to struct sep_device
228 * @bus_address: address to convert
230 * Returns virtual address inside the shared area according
231 * to the bus address.
233 static void *sep_shared_bus_to_virt(struct sep_device *sep,
234 dma_addr_t bus_address)
236 return sep->shared_addr + (bus_address - sep->shared_bus);
240 * open function for the singleton driver
241 * @inode_ptr struct inode *
242 * @file_ptr struct file *
244 * Called when the user opens the singleton device interface
246 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
248 int error = 0;
249 struct sep_device *sep;
252 * Get the SEP device structure and use it for the
253 * private_data field in filp for other methods
255 sep = sep_dev;
257 file_ptr->private_data = sep;
259 dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n", current->pid);
261 dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
262 if (test_and_set_bit(0, &sep->singleton_access_flag)) {
263 error = -EBUSY;
264 goto end_function;
267 dev_dbg(&sep->pdev->dev, "sep_singleton_open end\n");
268 end_function:
269 return error;
273 * sep_open - device open method
274 * @inode: inode of SEP device
275 * @filp: file handle to SEP device
277 * Open method for the SEP device. Called when userspace opens
278 * the SEP device node.
280 * Returns zero on success otherwise an error code.
282 static int sep_open(struct inode *inode, struct file *filp)
284 struct sep_device *sep;
287 * Get the SEP device structure and use it for the
288 * private_data field in filp for other methods
290 sep = sep_dev;
291 filp->private_data = sep;
293 dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
295 /* Anyone can open; locking takes place at transaction level */
296 return 0;
300 * sep_singleton_release - close a SEP singleton device
301 * @inode: inode of SEP device
302 * @filp: file handle being closed
304 * Called on the final close of a SEP device. As the open protects against
305 * multiple simultaenous opens that means this method is called when the
306 * final reference to the open handle is dropped.
308 static int sep_singleton_release(struct inode *inode, struct file *filp)
310 struct sep_device *sep = filp->private_data;
312 dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
313 current->pid);
314 clear_bit(0, &sep->singleton_access_flag);
315 return 0;
319 * sep_request_daemonopen - request daemon open method
320 * @inode: inode of SEP device
321 * @filp: file handle to SEP device
323 * Open method for the SEP request daemon. Called when
324 * request daemon in userspace opens the SEP device node.
326 * Returns zero on success otherwise an error code.
328 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
330 struct sep_device *sep = sep_dev;
331 int error = 0;
333 filp->private_data = sep;
335 dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
336 current->pid);
338 /* There is supposed to be only one request daemon */
339 dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
340 if (test_and_set_bit(0, &sep->request_daemon_open))
341 error = -EBUSY;
342 return error;
346 * sep_request_daemon_release - close a SEP daemon
347 * @inode: inode of SEP device
348 * @filp: file handle being closed
350 * Called on the final close of a SEP daemon.
352 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
354 struct sep_device *sep = filp->private_data;
356 dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
357 current->pid);
359 /* Clear the request_daemon_open flag */
360 clear_bit(0, &sep->request_daemon_open);
361 return 0;
365 * sep_req_daemon_send_reply_command_handler - poke the SEP
366 * @sep: struct sep_device *
368 * This function raises interrupt to SEPm that signals that is has a
369 * new command from HOST
371 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
373 unsigned long lck_flags;
375 dev_dbg(&sep->pdev->dev,
376 "sep_req_daemon_send_reply_command_handler start\n");
378 sep_dump_message(sep);
380 /* Counters are lockable region */
381 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
382 sep->send_ct++;
383 sep->reply_ct++;
385 /* Send the interrupt to SEP */
386 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
387 sep->send_ct++;
389 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
391 dev_dbg(&sep->pdev->dev,
392 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
393 sep->send_ct, sep->reply_ct);
395 dev_dbg(&sep->pdev->dev,
396 "sep_req_daemon_send_reply_command_handler end\n");
398 return 0;
403 * sep_free_dma_table_data_handler - free DMA table
404 * @sep: pointere to struct sep_device
406 * Handles the request to free DMA table for synchronic actions
408 static int sep_free_dma_table_data_handler(struct sep_device *sep)
410 int count;
411 int dcb_counter;
412 /* Pointer to the current dma_resource struct */
413 struct sep_dma_resource *dma;
415 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
417 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
418 dma = &sep->dma_res_arr[dcb_counter];
420 /* Unmap and free input map array */
421 if (dma->in_map_array) {
422 for (count = 0; count < dma->in_num_pages; count++) {
423 dma_unmap_page(&sep->pdev->dev,
424 dma->in_map_array[count].dma_addr,
425 dma->in_map_array[count].size,
426 DMA_TO_DEVICE);
428 kfree(dma->in_map_array);
431 /* Unmap output map array, DON'T free it yet */
432 if (dma->out_map_array) {
433 for (count = 0; count < dma->out_num_pages; count++) {
434 dma_unmap_page(&sep->pdev->dev,
435 dma->out_map_array[count].dma_addr,
436 dma->out_map_array[count].size,
437 DMA_FROM_DEVICE);
439 kfree(dma->out_map_array);
442 /* Free page cache for output */
443 if (dma->in_page_array) {
444 for (count = 0; count < dma->in_num_pages; count++) {
445 flush_dcache_page(dma->in_page_array[count]);
446 page_cache_release(dma->in_page_array[count]);
448 kfree(dma->in_page_array);
451 if (dma->out_page_array) {
452 for (count = 0; count < dma->out_num_pages; count++) {
453 if (!PageReserved(dma->out_page_array[count]))
454 SetPageDirty(dma->out_page_array[count]);
455 flush_dcache_page(dma->out_page_array[count]);
456 page_cache_release(dma->out_page_array[count]);
458 kfree(dma->out_page_array);
461 /* Reset all the values */
462 dma->in_page_array = NULL;
463 dma->out_page_array = NULL;
464 dma->in_num_pages = 0;
465 dma->out_num_pages = 0;
466 dma->in_map_array = NULL;
467 dma->out_map_array = NULL;
468 dma->in_map_num_entries = 0;
469 dma->out_map_num_entries = 0;
472 sep->nr_dcb_creat = 0;
473 sep->num_lli_tables_created = 0;
475 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
476 return 0;
480 * sep_request_daemon_mmap - maps the shared area to user space
481 * @filp: pointer to struct file
482 * @vma: pointer to vm_area_struct
484 * Called by the kernel when the daemon attempts an mmap() syscall
485 * using our handle.
487 static int sep_request_daemon_mmap(struct file *filp,
488 struct vm_area_struct *vma)
490 struct sep_device *sep = filp->private_data;
491 dma_addr_t bus_address;
492 int error = 0;
494 dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
496 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
497 error = -EINVAL;
498 goto end_function;
501 /* Get physical address */
502 bus_address = sep->shared_bus;
504 dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
505 (unsigned long)bus_address);
507 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
508 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
510 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
511 error = -EAGAIN;
512 goto end_function;
515 end_function:
516 dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
517 return error;
521 * sep_request_daemon_poll - poll implementation
522 * @sep: struct sep_device * for current SEP device
523 * @filp: struct file * for open file
524 * @wait: poll_table * for poll
526 * Called when our device is part of a poll() or select() syscall
528 static unsigned int sep_request_daemon_poll(struct file *filp,
529 poll_table *wait)
531 u32 mask = 0;
532 /* GPR2 register */
533 u32 retval2;
534 unsigned long lck_flags;
535 struct sep_device *sep = filp->private_data;
537 dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
539 poll_wait(filp, &sep->event_request_daemon, wait);
541 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
542 sep->send_ct, sep->reply_ct);
544 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
545 /* Check if the data is ready */
546 if (sep->send_ct == sep->reply_ct) {
547 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
549 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
550 dev_dbg(&sep->pdev->dev,
551 "daemon poll: data check (GPR2) is %x\n", retval2);
553 /* Check if PRINT request */
554 if ((retval2 >> 30) & 0x1) {
555 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
556 mask |= POLLIN;
557 goto end_function;
559 /* Check if NVS request */
560 if (retval2 >> 31) {
561 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
562 mask |= POLLPRI | POLLWRNORM;
564 } else {
565 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
566 dev_dbg(&sep->pdev->dev,
567 "daemon poll: no reply received; returning 0\n");
568 mask = 0;
570 end_function:
571 dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
572 return mask;
576 * sep_release - close a SEP device
577 * @inode: inode of SEP device
578 * @filp: file handle being closed
580 * Called on the final close of a SEP device.
582 static int sep_release(struct inode *inode, struct file *filp)
584 struct sep_device *sep = filp->private_data;
586 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
588 mutex_lock(&sep->sep_mutex);
589 /* Is this the process that has a transaction open?
590 * If so, lets reset pid_doing_transaction to 0 and
591 * clear the in use flags, and then wake up sep_event
592 * so that other processes can do transactions
594 dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
595 if (sep->pid_doing_transaction == current->pid) {
596 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
597 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
598 sep_free_dma_table_data_handler(sep);
599 wake_up(&sep->event);
600 sep->pid_doing_transaction = 0;
603 mutex_unlock(&sep->sep_mutex);
604 return 0;
608 * sep_mmap - maps the shared area to user space
609 * @filp: pointer to struct file
610 * @vma: pointer to vm_area_struct
612 * Called on an mmap of our space via the normal SEP device
614 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
616 dma_addr_t bus_addr;
617 struct sep_device *sep = filp->private_data;
618 unsigned long error = 0;
620 dev_dbg(&sep->pdev->dev, "mmap start\n");
622 /* Set the transaction busy (own the device) */
623 wait_event_interruptible(sep->event,
624 test_and_set_bit(SEP_MMAP_LOCK_BIT,
625 &sep->in_use_flags) == 0);
627 if (signal_pending(current)) {
628 error = -EINTR;
629 goto end_function_with_error;
632 * The pid_doing_transaction indicates that this process
633 * now owns the facilities to performa a transaction with
634 * the SEP. While this process is performing a transaction,
635 * no other process who has the SEP device open can perform
636 * any transactions. This method allows more than one process
637 * to have the device open at any given time, which provides
638 * finer granularity for device utilization by multiple
639 * processes.
641 mutex_lock(&sep->sep_mutex);
642 sep->pid_doing_transaction = current->pid;
643 mutex_unlock(&sep->sep_mutex);
645 /* Zero the pools and the number of data pool alocation pointers */
646 sep->data_pool_bytes_allocated = 0;
647 sep->num_of_data_allocations = 0;
650 * Check that the size of the mapped range is as the size of the message
651 * shared area
653 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
654 error = -EINVAL;
655 goto end_function_with_error;
658 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
660 /* Get bus address */
661 bus_addr = sep->shared_bus;
663 dev_dbg(&sep->pdev->dev,
664 "bus_address is %lx\n", (unsigned long)bus_addr);
666 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
667 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
668 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
669 error = -EAGAIN;
670 goto end_function_with_error;
672 dev_dbg(&sep->pdev->dev, "mmap end\n");
673 goto end_function;
675 end_function_with_error:
676 /* Clear the bit */
677 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
678 mutex_lock(&sep->sep_mutex);
679 sep->pid_doing_transaction = 0;
680 mutex_unlock(&sep->sep_mutex);
682 /* Raise event for stuck contextes */
684 dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
685 wake_up(&sep->event);
687 end_function:
688 return error;
692 * sep_poll - poll handler
693 * @filp: pointer to struct file
694 * @wait: pointer to poll_table
696 * Called by the OS when the kernel is asked to do a poll on
697 * a SEP file handle.
699 static unsigned int sep_poll(struct file *filp, poll_table *wait)
701 u32 mask = 0;
702 u32 retval = 0;
703 u32 retval2 = 0;
704 unsigned long lck_flags;
706 struct sep_device *sep = filp->private_data;
708 dev_dbg(&sep->pdev->dev, "poll: start\n");
710 /* Am I the process that owns the transaction? */
711 mutex_lock(&sep->sep_mutex);
712 if (current->pid != sep->pid_doing_transaction) {
713 dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
714 mask = POLLERR;
715 mutex_unlock(&sep->sep_mutex);
716 goto end_function;
718 mutex_unlock(&sep->sep_mutex);
720 /* Check if send command or send_reply were activated previously */
721 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
722 dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
723 mask = POLLERR;
724 goto end_function;
727 /* Add the event to the polling wait table */
728 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
730 poll_wait(filp, &sep->event, wait);
732 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
733 sep->send_ct, sep->reply_ct);
735 /* Check if error occured during poll */
736 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
737 if (retval2 != 0x0) {
738 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
739 mask |= POLLERR;
740 goto end_function;
743 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
745 if (sep->send_ct == sep->reply_ct) {
746 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
747 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
748 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
749 retval);
751 /* Check if printf request */
752 if ((retval >> 30) & 0x1) {
753 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
754 wake_up(&sep->event_request_daemon);
755 goto end_function;
758 /* Check if the this is SEP reply or request */
759 if (retval >> 31) {
760 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
761 wake_up(&sep->event_request_daemon);
762 } else {
763 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
764 /* In case it is again by send_reply_comand */
765 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
766 sep_dump_message(sep);
767 dev_dbg(&sep->pdev->dev,
768 "poll; SEP reply POLLIN | POLLRDNORM\n");
769 mask |= POLLIN | POLLRDNORM;
771 } else {
772 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
773 dev_dbg(&sep->pdev->dev,
774 "poll; no reply received; returning mask of 0\n");
775 mask = 0;
778 end_function:
779 dev_dbg(&sep->pdev->dev, "poll: end\n");
780 return mask;
784 * sep_time_address - address in SEP memory of time
785 * @sep: SEP device we want the address from
787 * Return the address of the two dwords in memory used for time
788 * setting.
790 static u32 *sep_time_address(struct sep_device *sep)
792 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
796 * sep_set_time - set the SEP time
797 * @sep: the SEP we are setting the time for
799 * Calculates time and sets it at the predefined address.
800 * Called with the SEP mutex held.
802 static unsigned long sep_set_time(struct sep_device *sep)
804 struct timeval time;
805 u32 *time_addr; /* Address of time as seen by the kernel */
808 dev_dbg(&sep->pdev->dev, "sep_set_time start\n");
810 do_gettimeofday(&time);
812 /* Set value in the SYSTEM MEMORY offset */
813 time_addr = sep_time_address(sep);
815 time_addr[0] = SEP_TIME_VAL_TOKEN;
816 time_addr[1] = time.tv_sec;
818 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
819 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
820 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
822 return time.tv_sec;
826 * sep_set_caller_id_handler - insert caller id entry
827 * @sep: SEP device
828 * @arg: pointer to struct caller_id_struct
830 * Inserts the data into the caller id table. Note that this function
831 * falls under the ioctl lock
833 static int sep_set_caller_id_handler(struct sep_device *sep, u32 arg)
835 void __user *hash;
836 int error = 0;
837 int i;
838 struct caller_id_struct command_args;
840 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
842 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
843 if (sep->caller_id_table[i].pid == 0)
844 break;
847 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
848 dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
849 dev_warn(&sep->pdev->dev, "maximum number is %d\n",
850 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
851 error = -EUSERS;
852 goto end_function;
855 /* Copy the data */
856 if (copy_from_user(&command_args, (void __user *)arg,
857 sizeof(command_args))) {
858 error = -EFAULT;
859 goto end_function;
862 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
864 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
865 error = -EINVAL;
866 goto end_function;
869 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
870 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
871 command_args.callerIdSizeInBytes);
873 if (command_args.callerIdSizeInBytes >
874 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
875 error = -EMSGSIZE;
876 goto end_function;
879 sep->caller_id_table[i].pid = command_args.pid;
881 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
882 hash, command_args.callerIdSizeInBytes))
883 error = -EFAULT;
884 end_function:
885 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
886 return error;
890 * sep_set_current_caller_id - set the caller id
891 * @sep: pointer to struct_sep_device
893 * Set the caller ID (if it exists) to the SEP. Note that this
894 * function falls under the ioctl lock
896 static int sep_set_current_caller_id(struct sep_device *sep)
898 int i;
900 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
901 dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
903 /* Zero the previous value */
904 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
905 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
907 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
908 if (sep->caller_id_table[i].pid == current->pid) {
909 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
911 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
912 (void *)(sep->caller_id_table[i].callerIdHash),
913 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
914 break;
917 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
918 return 0;
922 * sep_send_command_handler - kick off a command
923 * @sep: SEP being signalled
925 * This function raises interrupt to SEP that signals that is has a new
926 * command from the host
928 * Note that this function does fall under the ioctl lock
930 static int sep_send_command_handler(struct sep_device *sep)
932 unsigned long lck_flags;
933 int error = 0;
935 dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
937 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
938 error = -EPROTO;
939 goto end_function;
941 sep_set_time(sep);
943 /* Only Medfield has caller id */
944 if (sep->mrst == 0)
945 sep_set_current_caller_id(sep);
947 sep_dump_message(sep);
949 /* Update counter */
950 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
951 sep->send_ct++;
952 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
954 dev_dbg(&sep->pdev->dev,
955 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
956 sep->send_ct, sep->reply_ct);
958 /* Send interrupt to SEP */
959 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
961 end_function:
962 dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
963 return error;
967 * sep_allocate_data_pool_memory_handler -allocate pool memory
968 * @sep: pointer to struct sep_device
969 * @arg: pointer to struct alloc_struct
971 * This function handles the allocate data pool memory request
972 * This function returns calculates the bus address of the
973 * allocated memory, and the offset of this area from the mapped address.
974 * Therefore, the FVOs in user space can calculate the exact virtual
975 * address of this allocated memory
977 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
978 unsigned long arg)
980 int error = 0;
981 struct alloc_struct command_args;
983 /* Holds the allocated buffer address in the system memory pool */
984 u32 *token_addr;
986 dev_dbg(&sep->pdev->dev,
987 "sep_allocate_data_pool_memory_handler start\n");
989 if (copy_from_user(&command_args, (void __user *)arg,
990 sizeof(struct alloc_struct))) {
991 error = -EFAULT;
992 goto end_function;
995 /* Allocate memory */
996 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
997 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
998 error = -ENOMEM;
999 goto end_function;
1002 dev_dbg(&sep->pdev->dev,
1003 "bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
1004 dev_dbg(&sep->pdev->dev,
1005 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
1006 /* Set the virtual and bus address */
1007 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1008 sep->data_pool_bytes_allocated;
1010 dev_dbg(&sep->pdev->dev,
1011 "command_args.offset: %x\n", command_args.offset);
1013 /* Place in the shared area that is known by the SEP */
1014 token_addr = (u32 *)(sep->shared_addr +
1015 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
1016 (sep->num_of_data_allocations)*2*sizeof(u32));
1018 dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
1019 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
1020 dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
1022 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
1023 token_addr[1] = (u32)sep->shared_bus +
1024 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1025 sep->data_pool_bytes_allocated;
1027 dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
1028 dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
1030 /* Write the memory back to the user space */
1031 error = copy_to_user((void *)arg, (void *)&command_args,
1032 sizeof(struct alloc_struct));
1033 if (error) {
1034 error = -EFAULT;
1035 goto end_function;
1038 /* Update the allocation */
1039 sep->data_pool_bytes_allocated += command_args.num_bytes;
1040 sep->num_of_data_allocations += 1;
1042 dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
1043 sep->num_of_data_allocations);
1044 dev_dbg(&sep->pdev->dev, "bytes allocated %d\n",
1045 (int)sep->data_pool_bytes_allocated);
1047 end_function:
1048 dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
1049 return error;
1053 * sep_lock_kernel_pages - map kernel pages for DMA
1054 * @sep: pointer to struct sep_device
1055 * @kernel_virt_addr: address of data buffer in kernel
1056 * @data_size: size of data
1057 * @lli_array_ptr: lli array
1058 * @in_out_flag: input into device or output from device
1060 * This function locks all the physical pages of the kernel virtual buffer
1061 * and construct a basic lli array, where each entry holds the physical
1062 * page address and the size that application data holds in this page
1063 * This function is used only during kernel crypto mod calls from within
1064 * the kernel (when ioctl is not used)
1066 static int sep_lock_kernel_pages(struct sep_device *sep,
1067 u32 kernel_virt_addr,
1068 u32 data_size,
1069 struct sep_lli_entry **lli_array_ptr,
1070 int in_out_flag)
1073 int error = 0;
1074 /* Array of lli */
1075 struct sep_lli_entry *lli_array;
1076 /* Map array */
1077 struct sep_dma_map *map_array;
1079 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages start\n");
1080 dev_dbg(&sep->pdev->dev, "kernel_virt_addr is %08x\n",
1081 kernel_virt_addr);
1082 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1084 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
1085 if (!lli_array) {
1086 error = -ENOMEM;
1087 goto end_function;
1089 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
1090 if (!map_array) {
1091 error = -ENOMEM;
1092 goto end_function_with_error;
1095 map_array[0].dma_addr =
1096 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
1097 data_size, DMA_BIDIRECTIONAL);
1098 map_array[0].size = data_size;
1102 * Set the start address of the first page - app data may start not at
1103 * the beginning of the page
1105 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
1106 lli_array[0].block_size = map_array[0].size;
1108 dev_dbg(&sep->pdev->dev,
1109 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1110 (unsigned long)lli_array[0].bus_address,
1111 lli_array[0].block_size);
1113 /* Set the output parameters */
1114 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1115 *lli_array_ptr = lli_array;
1116 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
1117 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1118 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1119 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
1120 } else {
1121 *lli_array_ptr = lli_array;
1122 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
1123 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
1124 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1125 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
1127 goto end_function;
1129 end_function_with_error:
1130 kfree(lli_array);
1132 end_function:
1133 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
1134 return error;
1138 * sep_lock_user_pages - lock and map user pages for DMA
1139 * @sep: pointer to struct sep_device
1140 * @app_virt_addr: user memory data buffer
1141 * @data_size: size of data buffer
1142 * @lli_array_ptr: lli array
1143 * @in_out_flag: input or output to device
1145 * This function locks all the physical pages of the application
1146 * virtual buffer and construct a basic lli array, where each entry
1147 * holds the physical page address and the size that application
1148 * data holds in this physical pages
1150 static int sep_lock_user_pages(struct sep_device *sep,
1151 u32 app_virt_addr,
1152 u32 data_size,
1153 struct sep_lli_entry **lli_array_ptr,
1154 int in_out_flag)
1157 int error = 0;
1158 u32 count;
1159 int result;
1160 /* The the page of the end address of the user space buffer */
1161 u32 end_page;
1162 /* The page of the start address of the user space buffer */
1163 u32 start_page;
1164 /* The range in pages */
1165 u32 num_pages;
1166 /* Array of pointers to page */
1167 struct page **page_array;
1168 /* Array of lli */
1169 struct sep_lli_entry *lli_array;
1170 /* Map array */
1171 struct sep_dma_map *map_array;
1172 /* Direction of the DMA mapping for locked pages */
1173 enum dma_data_direction dir;
1175 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages start\n");
1177 /* Set start and end pages and num pages */
1178 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1179 start_page = app_virt_addr >> PAGE_SHIFT;
1180 num_pages = end_page - start_page + 1;
1182 dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
1183 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1184 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1185 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1186 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1188 dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
1190 /* Allocate array of pages structure pointers */
1191 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1192 if (!page_array) {
1193 error = -ENOMEM;
1194 goto end_function;
1196 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1197 if (!map_array) {
1198 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1199 error = -ENOMEM;
1200 goto end_function_with_error1;
1203 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1204 GFP_ATOMIC);
1206 if (!lli_array) {
1207 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1208 error = -ENOMEM;
1209 goto end_function_with_error2;
1212 dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
1214 /* Convert the application virtual address into a set of physical */
1215 down_read(&current->mm->mmap_sem);
1216 result = get_user_pages(current, current->mm, app_virt_addr,
1217 num_pages,
1218 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1219 0, page_array, NULL);
1221 up_read(&current->mm->mmap_sem);
1223 /* Check the number of pages locked - if not all then exit with error */
1224 if (result != num_pages) {
1225 dev_warn(&sep->pdev->dev,
1226 "not all pages locked by get_user_pages\n");
1227 error = -ENOMEM;
1228 goto end_function_with_error3;
1231 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1233 /* Set direction */
1234 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1235 dir = DMA_TO_DEVICE;
1236 else
1237 dir = DMA_FROM_DEVICE;
1240 * Fill the array using page array data and
1241 * map the pages - this action will also flush the cache as needed
1243 for (count = 0; count < num_pages; count++) {
1244 /* Fill the map array */
1245 map_array[count].dma_addr =
1246 dma_map_page(&sep->pdev->dev, page_array[count],
1247 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1249 map_array[count].size = PAGE_SIZE;
1251 /* Fill the lli array entry */
1252 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1253 lli_array[count].block_size = PAGE_SIZE;
1255 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1256 count, (unsigned long)lli_array[count].bus_address,
1257 count, lli_array[count].block_size);
1260 /* Check the offset for the first page */
1261 lli_array[0].bus_address =
1262 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1264 /* Check that not all the data is in the first page only */
1265 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1266 lli_array[0].block_size = data_size;
1267 else
1268 lli_array[0].block_size =
1269 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1271 dev_dbg(&sep->pdev->dev,
1272 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1273 (unsigned long)lli_array[count].bus_address,
1274 lli_array[count].block_size);
1276 /* Check the size of the last page */
1277 if (num_pages > 1) {
1278 lli_array[num_pages - 1].block_size =
1279 (app_virt_addr + data_size) & (~PAGE_MASK);
1281 dev_warn(&sep->pdev->dev,
1282 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1283 num_pages - 1,
1284 (unsigned long)lli_array[count].bus_address,
1285 num_pages - 1,
1286 lli_array[count].block_size);
1289 /* Set output params acording to the in_out flag */
1290 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1291 *lli_array_ptr = lli_array;
1292 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1293 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1294 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1295 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1296 num_pages;
1297 } else {
1298 *lli_array_ptr = lli_array;
1299 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1300 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1301 page_array;
1302 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1303 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1304 num_pages;
1306 goto end_function;
1308 end_function_with_error3:
1309 /* Free lli array */
1310 kfree(lli_array);
1312 end_function_with_error2:
1313 kfree(map_array);
1315 end_function_with_error1:
1316 /* Free page array */
1317 kfree(page_array);
1319 end_function:
1320 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
1321 return error;
1325 * u32 sep_calculate_lli_table_max_size - size the LLI table
1326 * @sep: pointer to struct sep_device
1327 * @lli_in_array_ptr
1328 * @num_array_entries
1329 * @last_table_flag
1331 * This function calculates the size of data that can be inserted into
1332 * the lli table from this array, such that either the table is full
1333 * (all entries are entered), or there are no more entries in the
1334 * lli array
1336 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1337 struct sep_lli_entry *lli_in_array_ptr,
1338 u32 num_array_entries,
1339 u32 *last_table_flag)
1341 u32 counter;
1342 /* Table data size */
1343 u32 table_data_size = 0;
1344 /* Data size for the next table */
1345 u32 next_table_data_size;
1347 *last_table_flag = 0;
1350 * Calculate the data in the out lli table till we fill the whole
1351 * table or till the data has ended
1353 for (counter = 0;
1354 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1355 (counter < num_array_entries); counter++)
1356 table_data_size += lli_in_array_ptr[counter].block_size;
1359 * Check if we reached the last entry,
1360 * meaning this ia the last table to build,
1361 * and no need to check the block alignment
1363 if (counter == num_array_entries) {
1364 /* Set the last table flag */
1365 *last_table_flag = 1;
1366 goto end_function;
1370 * Calculate the data size of the next table.
1371 * Stop if no entries left or if data size is more the DMA restriction
1373 next_table_data_size = 0;
1374 for (; counter < num_array_entries; counter++) {
1375 next_table_data_size += lli_in_array_ptr[counter].block_size;
1376 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1377 break;
1381 * Check if the next table data size is less then DMA rstriction.
1382 * if it is - recalculate the current table size, so that the next
1383 * table data size will be adaquete for DMA
1385 if (next_table_data_size &&
1386 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1388 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1389 next_table_data_size);
1391 dev_dbg(&sep->pdev->dev, "table data size is %x\n",
1392 table_data_size);
1393 end_function:
1394 return table_data_size;
1398 * sep_build_lli_table - build an lli array for the given table
1399 * @sep: pointer to struct sep_device
1400 * @lli_array_ptr: pointer to lli array
1401 * @lli_table_ptr: pointer to lli table
1402 * @num_processed_entries_ptr: pointer to number of entries
1403 * @num_table_entries_ptr: pointer to number of tables
1404 * @table_data_size: total data size
1406 * Builds ant lli table from the lli_array according to
1407 * the given size of data
1409 static void sep_build_lli_table(struct sep_device *sep,
1410 struct sep_lli_entry *lli_array_ptr,
1411 struct sep_lli_entry *lli_table_ptr,
1412 u32 *num_processed_entries_ptr,
1413 u32 *num_table_entries_ptr,
1414 u32 table_data_size)
1416 /* Current table data size */
1417 u32 curr_table_data_size;
1418 /* Counter of lli array entry */
1419 u32 array_counter;
1421 dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
1423 /* Init currrent table data size and lli array entry counter */
1424 curr_table_data_size = 0;
1425 array_counter = 0;
1426 *num_table_entries_ptr = 1;
1428 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
1430 /* Fill the table till table size reaches the needed amount */
1431 while (curr_table_data_size < table_data_size) {
1432 /* Update the number of entries in table */
1433 (*num_table_entries_ptr)++;
1435 lli_table_ptr->bus_address =
1436 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1438 lli_table_ptr->block_size =
1439 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1441 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1443 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1444 lli_table_ptr);
1445 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1446 (unsigned long)lli_table_ptr->bus_address);
1447 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1448 lli_table_ptr->block_size);
1450 /* Check for overflow of the table data */
1451 if (curr_table_data_size > table_data_size) {
1452 dev_dbg(&sep->pdev->dev,
1453 "curr_table_data_size too large\n");
1455 /* Update the size of block in the table */
1456 lli_table_ptr->block_size -=
1457 cpu_to_le32((curr_table_data_size - table_data_size));
1459 /* Update the physical address in the lli array */
1460 lli_array_ptr[array_counter].bus_address +=
1461 cpu_to_le32(lli_table_ptr->block_size);
1463 /* Update the block size left in the lli array */
1464 lli_array_ptr[array_counter].block_size =
1465 (curr_table_data_size - table_data_size);
1466 } else
1467 /* Advance to the next entry in the lli_array */
1468 array_counter++;
1470 dev_dbg(&sep->pdev->dev,
1471 "lli_table_ptr->bus_address is %08lx\n",
1472 (unsigned long)lli_table_ptr->bus_address);
1473 dev_dbg(&sep->pdev->dev,
1474 "lli_table_ptr->block_size is %x\n",
1475 lli_table_ptr->block_size);
1477 /* Move to the next entry in table */
1478 lli_table_ptr++;
1481 /* Set the info entry to default */
1482 lli_table_ptr->bus_address = 0xffffffff;
1483 lli_table_ptr->block_size = 0;
1485 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
1486 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1487 (unsigned long)lli_table_ptr->bus_address);
1488 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1489 lli_table_ptr->block_size);
1491 /* Set the output parameter */
1492 *num_processed_entries_ptr += array_counter;
1494 dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
1495 *num_processed_entries_ptr);
1497 dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
1501 * sep_shared_area_virt_to_bus - map shared area to bus address
1502 * @sep: pointer to struct sep_device
1503 * @virt_address: virtual address to convert
1505 * This functions returns the physical address inside shared area according
1506 * to the virtual address. It can be either on the externa RAM device
1507 * (ioremapped), or on the system RAM
1508 * This implementation is for the external RAM
1510 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1511 void *virt_address)
1513 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1514 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1515 (unsigned long)
1516 sep->shared_bus + (virt_address - sep->shared_addr));
1518 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1522 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1523 * @sep: pointer to struct sep_device
1524 * @bus_address: bus address to convert
1526 * This functions returns the virtual address inside shared area
1527 * according to the physical address. It can be either on the
1528 * externa RAM device (ioremapped), or on the system RAM
1529 * This implementation is for the external RAM
1531 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1532 dma_addr_t bus_address)
1534 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%x v=%x\n",
1535 (u32)bus_address, (u32)(sep->shared_addr +
1536 (size_t)(bus_address - sep->shared_bus)));
1538 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1542 * sep_debug_print_lli_tables - dump LLI table
1543 * @sep: pointer to struct sep_device
1544 * @lli_table_ptr: pointer to sep_lli_entry
1545 * @num_table_entries: number of entries
1546 * @table_data_size: total data size
1548 * Walk the the list of the print created tables and print all the data
1550 static void sep_debug_print_lli_tables(struct sep_device *sep,
1551 struct sep_lli_entry *lli_table_ptr,
1552 unsigned long num_table_entries,
1553 unsigned long table_data_size)
1555 unsigned long table_count = 1;
1556 unsigned long entries_count = 0;
1558 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1560 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1561 dev_dbg(&sep->pdev->dev,
1562 "lli table %08lx, table_data_size is %lu\n",
1563 table_count, table_data_size);
1564 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1565 num_table_entries);
1567 /* Print entries of the table (without info entry) */
1568 for (entries_count = 0; entries_count < num_table_entries;
1569 entries_count++, lli_table_ptr++) {
1571 dev_dbg(&sep->pdev->dev,
1572 "lli_table_ptr address is %08lx\n",
1573 (unsigned long) lli_table_ptr);
1575 dev_dbg(&sep->pdev->dev,
1576 "phys address is %08lx block size is %x\n",
1577 (unsigned long)lli_table_ptr->bus_address,
1578 lli_table_ptr->block_size);
1580 /* Point to the info entry */
1581 lli_table_ptr--;
1583 dev_dbg(&sep->pdev->dev,
1584 "phys lli_table_ptr->block_size is %x\n",
1585 lli_table_ptr->block_size);
1587 dev_dbg(&sep->pdev->dev,
1588 "phys lli_table_ptr->physical_address is %08lu\n",
1589 (unsigned long)lli_table_ptr->bus_address);
1592 table_data_size = lli_table_ptr->block_size & 0xffffff;
1593 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1594 lli_table_ptr = (struct sep_lli_entry *)
1595 (lli_table_ptr->bus_address);
1597 dev_dbg(&sep->pdev->dev,
1598 "phys table_data_size is %lu num_table_entries is"
1599 " %lu lli_table_ptr is%lu\n", table_data_size,
1600 num_table_entries, (unsigned long)lli_table_ptr);
1602 if ((unsigned long)lli_table_ptr != 0xffffffff)
1603 lli_table_ptr = (struct sep_lli_entry *)
1604 sep_shared_bus_to_virt(sep,
1605 (unsigned long)lli_table_ptr);
1607 table_count++;
1609 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1614 * sep_prepare_empty_lli_table - create a blank LLI table
1615 * @sep: pointer to struct sep_device
1616 * @lli_table_addr_ptr: pointer to lli table
1617 * @num_entries_ptr: pointer to number of entries
1618 * @table_data_size_ptr: point to table data size
1620 * This function creates empty lli tables when there is no data
1622 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1623 dma_addr_t *lli_table_addr_ptr,
1624 u32 *num_entries_ptr,
1625 u32 *table_data_size_ptr)
1627 struct sep_lli_entry *lli_table_ptr;
1629 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1631 /* Find the area for new table */
1632 lli_table_ptr =
1633 (struct sep_lli_entry *)(sep->shared_addr +
1634 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1635 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1636 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1638 lli_table_ptr->bus_address = 0;
1639 lli_table_ptr->block_size = 0;
1641 lli_table_ptr++;
1642 lli_table_ptr->bus_address = 0xFFFFFFFF;
1643 lli_table_ptr->block_size = 0;
1645 /* Set the output parameter value */
1646 *lli_table_addr_ptr = sep->shared_bus +
1647 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1648 sep->num_lli_tables_created *
1649 sizeof(struct sep_lli_entry) *
1650 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1652 /* Set the num of entries and table data size for empty table */
1653 *num_entries_ptr = 2;
1654 *table_data_size_ptr = 0;
1656 /* Update the number of created tables */
1657 sep->num_lli_tables_created++;
1659 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1664 * sep_prepare_input_dma_table - prepare input DMA mappings
1665 * @sep: pointer to struct sep_device
1666 * @data_size:
1667 * @block_size:
1668 * @lli_table_ptr:
1669 * @num_entries_ptr:
1670 * @table_data_size_ptr:
1671 * @is_kva: set for kernel data (kernel cryptio call)
1673 * This function prepares only input DMA table for synhronic symmetric
1674 * operations (HASH)
1675 * Note that all bus addresses that are passed to the SEP
1676 * are in 32 bit format; the SEP is a 32 bit device
1678 static int sep_prepare_input_dma_table(struct sep_device *sep,
1679 unsigned long app_virt_addr,
1680 u32 data_size,
1681 u32 block_size,
1682 dma_addr_t *lli_table_ptr,
1683 u32 *num_entries_ptr,
1684 u32 *table_data_size_ptr,
1685 bool is_kva)
1687 int error = 0;
1688 /* Pointer to the info entry of the table - the last entry */
1689 struct sep_lli_entry *info_entry_ptr;
1690 /* Array of pointers to page */
1691 struct sep_lli_entry *lli_array_ptr;
1692 /* Points to the first entry to be processed in the lli_in_array */
1693 u32 current_entry = 0;
1694 /* Num entries in the virtual buffer */
1695 u32 sep_lli_entries = 0;
1696 /* Lli table pointer */
1697 struct sep_lli_entry *in_lli_table_ptr;
1698 /* The total data in one table */
1699 u32 table_data_size = 0;
1700 /* Flag for last table */
1701 u32 last_table_flag = 0;
1702 /* Number of entries in lli table */
1703 u32 num_entries_in_table = 0;
1704 /* Next table address */
1705 u32 lli_table_alloc_addr = 0;
1707 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
1708 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1709 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1711 /* Initialize the pages pointers */
1712 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
1713 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1715 /* Set the kernel address for first table to be allocated */
1716 lli_table_alloc_addr = (u32)(sep->shared_addr +
1717 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1718 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1719 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1721 if (data_size == 0) {
1722 /* Special case - create meptu table - 2 entries, zero data */
1723 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1724 num_entries_ptr, table_data_size_ptr);
1725 goto update_dcb_counter;
1728 /* Check if the pages are in Kernel Virtual Address layout */
1729 if (is_kva == true)
1730 /* Lock the pages in the kernel */
1731 error = sep_lock_kernel_pages(sep, app_virt_addr,
1732 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1733 else
1735 * Lock the pages of the user buffer
1736 * and translate them to pages
1738 error = sep_lock_user_pages(sep, app_virt_addr,
1739 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1741 if (error)
1742 goto end_function;
1744 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1745 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1747 current_entry = 0;
1748 info_entry_ptr = NULL;
1750 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1752 /* Loop till all the entries in in array are not processed */
1753 while (current_entry < sep_lli_entries) {
1755 /* Set the new input and output tables */
1756 in_lli_table_ptr =
1757 (struct sep_lli_entry *)lli_table_alloc_addr;
1759 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1760 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1762 if (lli_table_alloc_addr >
1763 ((u32)sep->shared_addr +
1764 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1765 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1767 error = -ENOMEM;
1768 goto end_function_error;
1772 /* Update the number of created tables */
1773 sep->num_lli_tables_created++;
1775 /* Calculate the maximum size of data for input table */
1776 table_data_size = sep_calculate_lli_table_max_size(sep,
1777 &lli_array_ptr[current_entry],
1778 (sep_lli_entries - current_entry),
1779 &last_table_flag);
1782 * If this is not the last table -
1783 * then allign it to the block size
1785 if (!last_table_flag)
1786 table_data_size =
1787 (table_data_size / block_size) * block_size;
1789 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1790 table_data_size);
1792 /* Construct input lli table */
1793 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1794 in_lli_table_ptr,
1795 &current_entry, &num_entries_in_table, table_data_size);
1797 if (info_entry_ptr == NULL) {
1799 /* Set the output parameters to physical addresses */
1800 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1801 in_lli_table_ptr);
1802 *num_entries_ptr = num_entries_in_table;
1803 *table_data_size_ptr = table_data_size;
1805 dev_dbg(&sep->pdev->dev,
1806 "output lli_table_in_ptr is %08lx\n",
1807 (unsigned long)*lli_table_ptr);
1809 } else {
1810 /* Update the info entry of the previous in table */
1811 info_entry_ptr->bus_address =
1812 sep_shared_area_virt_to_bus(sep,
1813 in_lli_table_ptr);
1814 info_entry_ptr->block_size =
1815 ((num_entries_in_table) << 24) |
1816 (table_data_size);
1818 /* Save the pointer to the info entry of the current tables */
1819 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1821 /* Print input tables */
1822 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1823 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1824 *num_entries_ptr, *table_data_size_ptr);
1825 /* The array of the pages */
1826 kfree(lli_array_ptr);
1828 update_dcb_counter:
1829 /* Update DCB counter */
1830 sep->nr_dcb_creat++;
1831 goto end_function;
1833 end_function_error:
1834 /* Free all the allocated resources */
1835 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1836 kfree(lli_array_ptr);
1837 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1839 end_function:
1840 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
1841 return error;
1845 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1846 * @sep: pointer to struct sep_device
1847 * @lli_in_array:
1848 * @sep_in_lli_entries:
1849 * @lli_out_array:
1850 * @sep_out_lli_entries
1851 * @block_size
1852 * @lli_table_in_ptr
1853 * @lli_table_out_ptr
1854 * @in_num_entries_ptr
1855 * @out_num_entries_ptr
1856 * @table_data_size_ptr
1858 * This function creates the input and output DMA tables for
1859 * symmetric operations (AES/DES) according to the block
1860 * size from LLI arays
1861 * Note that all bus addresses that are passed to the SEP
1862 * are in 32 bit format; the SEP is a 32 bit device
1864 static int sep_construct_dma_tables_from_lli(
1865 struct sep_device *sep,
1866 struct sep_lli_entry *lli_in_array,
1867 u32 sep_in_lli_entries,
1868 struct sep_lli_entry *lli_out_array,
1869 u32 sep_out_lli_entries,
1870 u32 block_size,
1871 dma_addr_t *lli_table_in_ptr,
1872 dma_addr_t *lli_table_out_ptr,
1873 u32 *in_num_entries_ptr,
1874 u32 *out_num_entries_ptr,
1875 u32 *table_data_size_ptr)
1877 /* Points to the area where next lli table can be allocated */
1878 u32 lli_table_alloc_addr = 0;
1879 /* Input lli table */
1880 struct sep_lli_entry *in_lli_table_ptr = NULL;
1881 /* Output lli table */
1882 struct sep_lli_entry *out_lli_table_ptr = NULL;
1883 /* Pointer to the info entry of the table - the last entry */
1884 struct sep_lli_entry *info_in_entry_ptr = NULL;
1885 /* Pointer to the info entry of the table - the last entry */
1886 struct sep_lli_entry *info_out_entry_ptr = NULL;
1887 /* Points to the first entry to be processed in the lli_in_array */
1888 u32 current_in_entry = 0;
1889 /* Points to the first entry to be processed in the lli_out_array */
1890 u32 current_out_entry = 0;
1891 /* Max size of the input table */
1892 u32 in_table_data_size = 0;
1893 /* Max size of the output table */
1894 u32 out_table_data_size = 0;
1895 /* Flag te signifies if this is the last tables build */
1896 u32 last_table_flag = 0;
1897 /* The data size that should be in table */
1898 u32 table_data_size = 0;
1899 /* Number of etnries in the input table */
1900 u32 num_entries_in_table = 0;
1901 /* Number of etnries in the output table */
1902 u32 num_entries_out_table = 0;
1904 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
1906 /* Initiate to point after the message area */
1907 lli_table_alloc_addr = (u32)(sep->shared_addr +
1908 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1909 (sep->num_lli_tables_created *
1910 (sizeof(struct sep_lli_entry) *
1911 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1913 /* Loop till all the entries in in array are not processed */
1914 while (current_in_entry < sep_in_lli_entries) {
1915 /* Set the new input and output tables */
1916 in_lli_table_ptr =
1917 (struct sep_lli_entry *)lli_table_alloc_addr;
1919 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1920 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1922 /* Set the first output tables */
1923 out_lli_table_ptr =
1924 (struct sep_lli_entry *)lli_table_alloc_addr;
1926 /* Check if the DMA table area limit was overrun */
1927 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1928 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1929 ((u32)sep->shared_addr +
1930 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1931 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1933 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1934 return -ENOMEM;
1937 /* Update the number of the lli tables created */
1938 sep->num_lli_tables_created += 2;
1940 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1941 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1943 /* Calculate the maximum size of data for input table */
1944 in_table_data_size =
1945 sep_calculate_lli_table_max_size(sep,
1946 &lli_in_array[current_in_entry],
1947 (sep_in_lli_entries - current_in_entry),
1948 &last_table_flag);
1950 /* Calculate the maximum size of data for output table */
1951 out_table_data_size =
1952 sep_calculate_lli_table_max_size(sep,
1953 &lli_out_array[current_out_entry],
1954 (sep_out_lli_entries - current_out_entry),
1955 &last_table_flag);
1957 dev_dbg(&sep->pdev->dev,
1958 "in_table_data_size is %x\n",
1959 in_table_data_size);
1961 dev_dbg(&sep->pdev->dev,
1962 "out_table_data_size is %x\n",
1963 out_table_data_size);
1965 table_data_size = in_table_data_size;
1967 if (!last_table_flag) {
1969 * If this is not the last table,
1970 * then must check where the data is smallest
1971 * and then align it to the block size
1973 if (table_data_size > out_table_data_size)
1974 table_data_size = out_table_data_size;
1977 * Now calculate the table size so that
1978 * it will be module block size
1980 table_data_size = (table_data_size / block_size) *
1981 block_size;
1984 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
1985 table_data_size);
1987 /* Construct input lli table */
1988 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
1989 in_lli_table_ptr,
1990 &current_in_entry,
1991 &num_entries_in_table,
1992 table_data_size);
1994 /* Construct output lli table */
1995 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
1996 out_lli_table_ptr,
1997 &current_out_entry,
1998 &num_entries_out_table,
1999 table_data_size);
2001 /* If info entry is null - this is the first table built */
2002 if (info_in_entry_ptr == NULL) {
2003 /* Set the output parameters to physical addresses */
2004 *lli_table_in_ptr =
2005 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
2007 *in_num_entries_ptr = num_entries_in_table;
2009 *lli_table_out_ptr =
2010 sep_shared_area_virt_to_bus(sep,
2011 out_lli_table_ptr);
2013 *out_num_entries_ptr = num_entries_out_table;
2014 *table_data_size_ptr = table_data_size;
2016 dev_dbg(&sep->pdev->dev,
2017 "output lli_table_in_ptr is %08lx\n",
2018 (unsigned long)*lli_table_in_ptr);
2019 dev_dbg(&sep->pdev->dev,
2020 "output lli_table_out_ptr is %08lx\n",
2021 (unsigned long)*lli_table_out_ptr);
2022 } else {
2023 /* Update the info entry of the previous in table */
2024 info_in_entry_ptr->bus_address =
2025 sep_shared_area_virt_to_bus(sep,
2026 in_lli_table_ptr);
2028 info_in_entry_ptr->block_size =
2029 ((num_entries_in_table) << 24) |
2030 (table_data_size);
2032 /* Update the info entry of the previous in table */
2033 info_out_entry_ptr->bus_address =
2034 sep_shared_area_virt_to_bus(sep,
2035 out_lli_table_ptr);
2037 info_out_entry_ptr->block_size =
2038 ((num_entries_out_table) << 24) |
2039 (table_data_size);
2041 dev_dbg(&sep->pdev->dev,
2042 "output lli_table_in_ptr:%08lx %08x\n",
2043 (unsigned long)info_in_entry_ptr->bus_address,
2044 info_in_entry_ptr->block_size);
2046 dev_dbg(&sep->pdev->dev,
2047 "output lli_table_out_ptr:%08lx %08x\n",
2048 (unsigned long)info_out_entry_ptr->bus_address,
2049 info_out_entry_ptr->block_size);
2052 /* Save the pointer to the info entry of the current tables */
2053 info_in_entry_ptr = in_lli_table_ptr +
2054 num_entries_in_table - 1;
2055 info_out_entry_ptr = out_lli_table_ptr +
2056 num_entries_out_table - 1;
2058 dev_dbg(&sep->pdev->dev,
2059 "output num_entries_out_table is %x\n",
2060 (u32)num_entries_out_table);
2061 dev_dbg(&sep->pdev->dev,
2062 "output info_in_entry_ptr is %lx\n",
2063 (unsigned long)info_in_entry_ptr);
2064 dev_dbg(&sep->pdev->dev,
2065 "output info_out_entry_ptr is %lx\n",
2066 (unsigned long)info_out_entry_ptr);
2069 /* Print input tables */
2070 sep_debug_print_lli_tables(sep,
2071 (struct sep_lli_entry *)
2072 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2073 *in_num_entries_ptr,
2074 *table_data_size_ptr);
2076 /* Print output tables */
2077 sep_debug_print_lli_tables(sep,
2078 (struct sep_lli_entry *)
2079 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2080 *out_num_entries_ptr,
2081 *table_data_size_ptr);
2083 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
2084 return 0;
2088 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2089 * @app_virt_in_addr:
2090 * @app_virt_out_addr:
2091 * @data_size:
2092 * @block_size:
2093 * @lli_table_in_ptr:
2094 * @lli_table_out_ptr:
2095 * @in_num_entries_ptr:
2096 * @out_num_entries_ptr:
2097 * @table_data_size_ptr:
2098 * @is_kva: set for kernel data; used only for kernel crypto module
2100 * This function builds input and output DMA tables for synhronic
2101 * symmetric operations (AES, DES, HASH). It also checks that each table
2102 * is of the modular block size
2103 * Note that all bus addresses that are passed to the SEP
2104 * are in 32 bit format; the SEP is a 32 bit device
2106 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2107 unsigned long app_virt_in_addr,
2108 unsigned long app_virt_out_addr,
2109 u32 data_size,
2110 u32 block_size,
2111 dma_addr_t *lli_table_in_ptr,
2112 dma_addr_t *lli_table_out_ptr,
2113 u32 *in_num_entries_ptr,
2114 u32 *out_num_entries_ptr,
2115 u32 *table_data_size_ptr,
2116 bool is_kva)
2119 int error = 0;
2120 /* Array of pointers of page */
2121 struct sep_lli_entry *lli_in_array;
2122 /* Array of pointers of page */
2123 struct sep_lli_entry *lli_out_array;
2125 dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
2127 if (data_size == 0) {
2128 /* Prepare empty table for input and output */
2129 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2130 in_num_entries_ptr, table_data_size_ptr);
2132 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2133 out_num_entries_ptr, table_data_size_ptr);
2135 goto update_dcb_counter;
2138 /* Initialize the pages pointers */
2139 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = NULL;
2140 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = NULL;
2142 /* Lock the pages of the buffer and translate them to pages */
2143 if (is_kva == true) {
2144 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2145 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2147 if (error) {
2148 dev_warn(&sep->pdev->dev,
2149 "lock kernel for in failed\n");
2150 goto end_function;
2153 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2154 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2156 if (error) {
2157 dev_warn(&sep->pdev->dev,
2158 "lock kernel for out failed\n");
2159 goto end_function;
2163 else {
2164 error = sep_lock_user_pages(sep, app_virt_in_addr,
2165 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2166 if (error) {
2167 dev_warn(&sep->pdev->dev,
2168 "sep_lock_user_pages for input virtual buffer failed\n");
2169 goto end_function;
2172 error = sep_lock_user_pages(sep, app_virt_out_addr,
2173 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2175 if (error) {
2176 dev_warn(&sep->pdev->dev,
2177 "sep_lock_user_pages for output virtual buffer failed\n");
2178 goto end_function_free_lli_in;
2182 dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
2183 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
2184 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
2185 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
2186 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
2187 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2189 /* Call the fucntion that creates table from the lli arrays */
2190 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
2191 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
2192 lli_out_array,
2193 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
2194 block_size, lli_table_in_ptr, lli_table_out_ptr,
2195 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
2197 if (error) {
2198 dev_warn(&sep->pdev->dev,
2199 "sep_construct_dma_tables_from_lli failed\n");
2200 goto end_function_with_error;
2203 kfree(lli_out_array);
2204 kfree(lli_in_array);
2206 update_dcb_counter:
2207 /* Update DCB counter */
2208 sep->nr_dcb_creat++;
2209 /* Fall through - free the lli entry arrays */
2210 dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
2211 *in_num_entries_ptr);
2212 dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
2213 *out_num_entries_ptr);
2214 dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
2215 *table_data_size_ptr);
2217 goto end_function;
2219 end_function_with_error:
2220 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2221 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2222 kfree(lli_out_array);
2225 end_function_free_lli_in:
2226 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2227 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2228 kfree(lli_in_array);
2230 end_function:
2231 dev_dbg(&sep->pdev->dev,
2232 "sep_prepare_input_output_dma_table end result = %d\n", error);
2234 return error;
2239 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2240 * @app_in_address: unsigned long; for data buffer in (user space)
2241 * @app_out_address: unsigned long; for data buffer out (user space)
2242 * @data_in_size: u32; for size of data
2243 * @block_size: u32; for block size
2244 * @tail_block_size: u32; for size of tail block
2245 * @isapplet: bool; to indicate external app
2246 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2248 * This function prepares the linked DMA tables and puts the
2249 * address for the linked list of tables inta a DCB (data control
2250 * block) the address of which is known by the SEP hardware
2251 * Note that all bus addresses that are passed to the SEP
2252 * are in 32 bit format; the SEP is a 32 bit device
2254 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2255 u32 app_in_address,
2256 u32 app_out_address,
2257 u32 data_in_size,
2258 u32 block_size,
2259 u32 tail_block_size,
2260 bool isapplet,
2261 bool is_kva)
2263 int error = 0;
2264 /* Size of tail */
2265 u32 tail_size = 0;
2266 /* Address of the created DCB table */
2267 struct sep_dcblock *dcb_table_ptr = NULL;
2268 /* The physical address of the first input DMA table */
2269 dma_addr_t in_first_mlli_address = 0;
2270 /* Number of entries in the first input DMA table */
2271 u32 in_first_num_entries = 0;
2272 /* The physical address of the first output DMA table */
2273 dma_addr_t out_first_mlli_address = 0;
2274 /* Number of entries in the first output DMA table */
2275 u32 out_first_num_entries = 0;
2276 /* Data in the first input/output table */
2277 u32 first_data_size = 0;
2279 dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
2281 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2282 /* No more DCBs to allocate */
2283 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2284 error = -ENOSPC;
2285 goto end_function;
2288 /* Allocate new DCB */
2289 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2290 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2291 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2293 /* Set the default values in the DCB */
2294 dcb_table_ptr->input_mlli_address = 0;
2295 dcb_table_ptr->input_mlli_num_entries = 0;
2296 dcb_table_ptr->input_mlli_data_size = 0;
2297 dcb_table_ptr->output_mlli_address = 0;
2298 dcb_table_ptr->output_mlli_num_entries = 0;
2299 dcb_table_ptr->output_mlli_data_size = 0;
2300 dcb_table_ptr->tail_data_size = 0;
2301 dcb_table_ptr->out_vr_tail_pt = 0;
2303 if (isapplet == true) {
2304 tail_size = data_in_size % block_size;
2305 if (tail_size) {
2306 if (data_in_size < tail_block_size) {
2307 dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
2308 error = -ENOSPC;
2309 goto end_function;
2311 if (tail_block_size)
2313 * Case the tail size should be
2314 * bigger than the real block size
2316 tail_size = tail_block_size +
2317 ((data_in_size -
2318 tail_block_size) % block_size);
2321 /* Check if there is enough data for DMA operation */
2322 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2323 if (is_kva == true) {
2324 memcpy(dcb_table_ptr->tail_data,
2325 (void *)app_in_address, data_in_size);
2326 } else {
2327 if (copy_from_user(dcb_table_ptr->tail_data,
2328 (void __user *)app_in_address,
2329 data_in_size)) {
2330 error = -EFAULT;
2331 goto end_function;
2335 dcb_table_ptr->tail_data_size = data_in_size;
2337 /* Set the output user-space address for mem2mem op */
2338 if (app_out_address)
2339 dcb_table_ptr->out_vr_tail_pt =
2340 (u32)app_out_address;
2343 * Update both data length parameters in order to avoid
2344 * second data copy and allow building of empty mlli
2345 * tables
2347 tail_size = 0x0;
2348 data_in_size = 0x0;
2350 if (tail_size) {
2351 if (is_kva == true) {
2352 memcpy(dcb_table_ptr->tail_data,
2353 (void *)(app_in_address + data_in_size -
2354 tail_size), tail_size);
2355 } else {
2356 /* We have tail data - copy it to DCB */
2357 if (copy_from_user(dcb_table_ptr->tail_data,
2358 (void *)(app_in_address +
2359 data_in_size - tail_size), tail_size)) {
2360 error = -EFAULT;
2361 goto end_function;
2364 if (app_out_address)
2366 * Calculate the output address
2367 * according to tail data size
2369 dcb_table_ptr->out_vr_tail_pt =
2370 app_out_address + data_in_size
2371 - tail_size;
2373 /* Save the real tail data size */
2374 dcb_table_ptr->tail_data_size = tail_size;
2376 * Update the data size without the tail
2377 * data size AKA data for the dma
2379 data_in_size = (data_in_size - tail_size);
2382 /* Check if we need to build only input table or input/output */
2383 if (app_out_address) {
2384 /* Prepare input/output tables */
2385 error = sep_prepare_input_output_dma_table(sep,
2386 app_in_address,
2387 app_out_address,
2388 data_in_size,
2389 block_size,
2390 &in_first_mlli_address,
2391 &out_first_mlli_address,
2392 &in_first_num_entries,
2393 &out_first_num_entries,
2394 &first_data_size,
2395 is_kva);
2396 } else {
2397 /* Prepare input tables */
2398 error = sep_prepare_input_dma_table(sep,
2399 app_in_address,
2400 data_in_size,
2401 block_size,
2402 &in_first_mlli_address,
2403 &in_first_num_entries,
2404 &first_data_size,
2405 is_kva);
2408 if (error) {
2409 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2410 goto end_function;
2413 /* Set the DCB values */
2414 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2415 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2416 dcb_table_ptr->input_mlli_data_size = first_data_size;
2417 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2418 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2419 dcb_table_ptr->output_mlli_data_size = first_data_size;
2421 end_function:
2422 dev_dbg(&sep->pdev->dev,
2423 "sep_prepare_input_output_dma_table_in_dcb end\n");
2424 return error;
2430 * sep_create_sync_dma_tables_handler - create sync DMA tables
2431 * @sep: pointer to struct sep_device
2432 * @arg: pointer to struct bld_syn_tab_struct
2434 * Handle the request for creation of the DMA tables for the synchronic
2435 * symmetric operations (AES,DES). Note that all bus addresses that are
2436 * passed to the SEP are in 32 bit format; the SEP is a 32 bit device
2438 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
2439 unsigned long arg)
2441 int error = 0;
2443 /* Command arguments */
2444 struct bld_syn_tab_struct command_args;
2446 dev_dbg(&sep->pdev->dev,
2447 "sep_create_sync_dma_tables_handler start\n");
2449 if (copy_from_user(&command_args, (void __user *)arg,
2450 sizeof(struct bld_syn_tab_struct))) {
2451 error = -EFAULT;
2452 goto end_function;
2455 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2456 command_args.app_in_address);
2457 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2458 command_args.app_out_address);
2459 dev_dbg(&sep->pdev->dev, "data_size is %u\n",
2460 command_args.data_in_size);
2461 dev_dbg(&sep->pdev->dev, "block_size is %u\n",
2462 command_args.block_size);
2464 /* Validate user parameters */
2465 if (!command_args.app_in_address) {
2466 error = -EINVAL;
2467 goto end_function;
2470 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2471 command_args.app_in_address,
2472 command_args.app_out_address,
2473 command_args.data_in_size,
2474 command_args.block_size,
2475 0x0,
2476 false,
2477 false);
2479 end_function:
2480 dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
2481 return error;
2485 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2486 * @sep: pointer to struct sep_device
2487 * @isapplet: indicates external application (used for kernel access)
2488 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2490 * This function frees the DMA tables and DCB
2492 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2493 bool is_kva)
2495 int i = 0;
2496 int error = 0;
2497 int error_temp = 0;
2498 struct sep_dcblock *dcb_table_ptr;
2500 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
2502 if (isapplet == true) {
2503 /* Set pointer to first DCB table */
2504 dcb_table_ptr = (struct sep_dcblock *)
2505 (sep->shared_addr +
2506 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2508 /* Go over each DCB and see if tail pointer must be updated */
2509 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2510 if (dcb_table_ptr->out_vr_tail_pt) {
2511 if (is_kva == true) {
2512 memcpy((void *)dcb_table_ptr->out_vr_tail_pt,
2513 dcb_table_ptr->tail_data,
2514 dcb_table_ptr->tail_data_size);
2515 } else {
2516 error_temp = copy_to_user(
2517 (void *)dcb_table_ptr->out_vr_tail_pt,
2518 dcb_table_ptr->tail_data,
2519 dcb_table_ptr->tail_data_size);
2521 if (error_temp) {
2522 /* Release the DMA resource */
2523 error = -EFAULT;
2524 break;
2529 /* Free the output pages, if any */
2530 sep_free_dma_table_data_handler(sep);
2532 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
2533 return error;
2537 * sep_get_static_pool_addr_handler - get static pool address
2538 * @sep: pointer to struct sep_device
2539 * @arg: parameters from user space application
2541 * This function sets the bus and virtual addresses of the static pool
2542 * and returns the virtual address
2544 static int sep_get_static_pool_addr_handler(struct sep_device *sep)
2546 u32 *static_pool_addr = NULL;
2548 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
2550 static_pool_addr = (u32 *)(sep->shared_addr +
2551 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2553 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2554 static_pool_addr[1] = (u32)sep->shared_bus +
2555 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2557 dev_dbg(&sep->pdev->dev, "static pool: physical %x\n",
2558 (u32)static_pool_addr[1]);
2560 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
2562 return 0;
2566 * sep_start_handler - start device
2567 * @sep: pointer to struct sep_device
2569 static int sep_start_handler(struct sep_device *sep)
2571 unsigned long reg_val;
2572 unsigned long error = 0;
2574 dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
2576 /* Wait in polling for message from SEP */
2577 do {
2578 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2579 } while (!reg_val);
2581 /* Check the value */
2582 if (reg_val == 0x1)
2583 /* Fatal error - read error status from GPRO */
2584 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2585 dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
2586 return error;
2590 * ep_check_sum_calc - checksum messages
2591 * @data: buffer to checksum
2592 * @length: buffer size
2594 * This function performs a checksum for messages that are sent
2595 * to the SEP.
2597 static u32 sep_check_sum_calc(u8 *data, u32 length)
2599 u32 sum = 0;
2600 u16 *Tdata = (u16 *)data;
2602 while (length > 1) {
2603 /* This is the inner loop */
2604 sum += *Tdata++;
2605 length -= 2;
2608 /* Add left-over byte, if any */
2609 if (length > 0)
2610 sum += *(u8 *)Tdata;
2612 /* Fold 32-bit sum to 16 bits */
2613 while (sum>>16)
2614 sum = (sum & 0xffff) + (sum >> 16);
2616 return ~sum & 0xFFFF;
2620 * sep_init_handler -
2621 * @sep: pointer to struct sep_device
2622 * @arg: parameters from user space application
2624 * Handles the request for SEP initialization
2625 * Note that this will go away for Medfield once the SCU
2626 * SEP initialization is complete
2627 * Also note that the message to the SEP has components
2628 * from user space as well as components written by the driver
2629 * This is becuase the portions of the message that pertain to
2630 * physical addresses must be set by the driver after the message
2631 * leaves custody of the user space application for security
2632 * reasons.
2634 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2636 u32 message_buff[14];
2637 u32 counter;
2638 int error = 0;
2639 u32 reg_val;
2640 dma_addr_t new_base_addr;
2641 unsigned long addr_hold;
2642 struct init_struct command_args;
2644 dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
2646 /* Make sure that we have not initialized already */
2647 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2649 if (reg_val != 0x2) {
2650 error = SEP_ALREADY_INITIALIZED_ERR;
2651 dev_warn(&sep->pdev->dev, "init; device already initialized\n");
2652 goto end_function;
2655 /* Only root can initialize */
2656 if (!capable(CAP_SYS_ADMIN)) {
2657 error = -EACCES;
2658 goto end_function;
2661 /* Copy in the parameters */
2662 error = copy_from_user(&command_args, (void __user *)arg,
2663 sizeof(struct init_struct));
2665 if (error) {
2666 error = -EFAULT;
2667 goto end_function;
2670 /* Validate parameters */
2671 if (!command_args.message_addr || !command_args.sep_sram_addr ||
2672 command_args.message_size_in_words > 14) {
2673 error = -EINVAL;
2674 goto end_function;
2677 /* Copy in the SEP init message */
2678 addr_hold = (unsigned long)command_args.message_addr;
2679 error = copy_from_user(message_buff,
2680 (void __user *)addr_hold,
2681 command_args.message_size_in_words*sizeof(u32));
2683 if (error) {
2684 error = -EFAULT;
2685 goto end_function;
2688 /* Load resident, cache, and extapp firmware */
2689 error = sep_load_firmware(sep);
2691 if (error) {
2692 dev_warn(&sep->pdev->dev,
2693 "init; copy SEP init message failed %x\n", error);
2694 goto end_function;
2697 /* Compute the base address */
2698 new_base_addr = sep->shared_bus;
2700 if (sep->resident_bus < new_base_addr)
2701 new_base_addr = sep->resident_bus;
2703 if (sep->cache_bus < new_base_addr)
2704 new_base_addr = sep->cache_bus;
2706 if (sep->dcache_bus < new_base_addr)
2707 new_base_addr = sep->dcache_bus;
2709 /* Put physical addresses in SEP message */
2710 message_buff[3] = (u32)new_base_addr;
2711 message_buff[4] = (u32)sep->shared_bus;
2712 message_buff[6] = (u32)sep->resident_bus;
2713 message_buff[7] = (u32)sep->cache_bus;
2714 message_buff[8] = (u32)sep->dcache_bus;
2716 message_buff[command_args.message_size_in_words - 1] = 0x0;
2717 message_buff[command_args.message_size_in_words - 1] =
2718 sep_check_sum_calc((u8 *)message_buff,
2719 command_args.message_size_in_words*sizeof(u32));
2721 /* Debug print of message */
2722 for (counter = 0; counter < command_args.message_size_in_words;
2723 counter++)
2724 dev_dbg(&sep->pdev->dev, "init; SEP message word %d is %x\n",
2725 counter, message_buff[counter]);
2727 /* Tell the SEP the sram address */
2728 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
2730 /* Push the message to the SEP */
2731 for (counter = 0; counter < command_args.message_size_in_words;
2732 counter++) {
2733 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
2734 message_buff[counter]);
2735 sep_wait_sram_write(sep);
2738 /* Signal SEP that message is ready and to init */
2739 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2741 /* Wait for acknowledge */
2742 dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
2744 do {
2745 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2746 } while (!(reg_val & 0xFFFFFFFD));
2748 if (reg_val == 0x1) {
2749 dev_warn(&sep->pdev->dev, "init; device int failed\n");
2750 error = sep_read_reg(sep, 0x8060);
2751 dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
2752 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2753 dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
2754 goto end_function;
2756 dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
2758 /* Signal SEP to zero the GPR3 */
2759 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
2761 /* Wait for response */
2762 dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
2764 do {
2765 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2766 } while (reg_val != 0);
2768 end_function:
2769 dev_dbg(&sep->pdev->dev, "init is done\n");
2770 return error;
2774 * sep_end_transaction_handler - end transaction
2775 * @sep: pointer to struct sep_device
2777 * This API handles the end transaction request
2779 static int sep_end_transaction_handler(struct sep_device *sep)
2781 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
2783 /* Clear the data pool pointers Token */
2784 memset((void *)(sep->shared_addr +
2785 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2786 0, sep->num_of_data_allocations*2*sizeof(u32));
2788 /* Check that all the DMA resources were freed */
2789 sep_free_dma_table_data_handler(sep);
2791 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2794 * We are now through with the transaction. Let's
2795 * allow other processes who have the device open
2796 * to perform transactions
2798 mutex_lock(&sep->sep_mutex);
2799 sep->pid_doing_transaction = 0;
2800 mutex_unlock(&sep->sep_mutex);
2801 /* Raise event for stuck contextes */
2802 wake_up(&sep->event);
2804 dev_dbg(&sep->pdev->dev, "waking up event\n");
2805 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
2807 return 0;
2811 * sep_prepare_dcb_handler - prepare a control block
2812 * @sep: pointer to struct sep_device
2813 * @arg: pointer to user parameters
2815 * This function will retrieve the RAR buffer physical addresses, type
2816 * & size corresponding to the RAR handles provided in the buffers vector.
2818 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2820 int error;
2821 /* Command arguments */
2822 struct build_dcb_struct command_args;
2824 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2826 /* Get the command arguments */
2827 if (copy_from_user(&command_args, (void __user *)arg,
2828 sizeof(struct build_dcb_struct))) {
2829 error = -EFAULT;
2830 goto end_function;
2833 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2834 command_args.app_in_address);
2835 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2836 command_args.app_out_address);
2837 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2838 command_args.data_in_size);
2839 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2840 command_args.block_size);
2841 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2842 command_args.tail_block_size);
2844 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2845 command_args.app_in_address, command_args.app_out_address,
2846 command_args.data_in_size, command_args.block_size,
2847 command_args.tail_block_size, true, false);
2849 end_function:
2850 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
2851 return error;
2856 * sep_free_dcb_handler - free control block resources
2857 * @sep: pointer to struct sep_device
2859 * This function frees the DCB resources and updates the needed
2860 * user-space buffers.
2862 static int sep_free_dcb_handler(struct sep_device *sep)
2864 int error ;
2866 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2867 dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
2869 error = sep_free_dma_tables_and_dcb(sep, false, false);
2871 dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
2872 return error;
2876 * sep_rar_prepare_output_msg_handler - prepare an output message
2877 * @sep: pointer to struct sep_device
2878 * @arg: pointer to user parameters
2880 * This function will retrieve the RAR buffer physical addresses, type
2881 * & size corresponding to the RAR handles provided in the buffers vector.
2883 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2884 unsigned long arg)
2886 int error = 0;
2887 /* Command args */
2888 struct rar_hndl_to_bus_struct command_args;
2889 struct RAR_buffer rar_buf;
2890 /* Bus address */
2891 dma_addr_t rar_bus = 0;
2892 /* Holds the RAR address in the system memory offset */
2893 u32 *rar_addr;
2895 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2897 /* Copy the data */
2898 if (copy_from_user(&command_args, (void __user *)arg,
2899 sizeof(command_args))) {
2900 error = -EFAULT;
2901 goto end_function;
2904 /* Call to translation function only if user handle is not NULL */
2905 if (command_args.rar_handle) {
2906 memset(&rar_buf, 0, sizeof(rar_buf));
2907 rar_buf.info.handle = (u32)command_args.rar_handle;
2909 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2910 dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
2911 error = -EFAULT;
2912 goto end_function;
2914 rar_bus = rar_buf.bus_address;
2916 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2918 /* Set value in the SYSTEM MEMORY offset */
2919 rar_addr = (u32 *)(sep->shared_addr +
2920 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2922 /* Copy the physical address to the System Area for the SEP */
2923 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2924 rar_addr[1] = rar_bus;
2926 end_function:
2927 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2928 return error;
2932 * sep_realloc_ext_cache_handler - report location of extcache
2933 * @sep: pointer to struct sep_device
2934 * @arg: pointer to user parameters
2936 * This function tells the SEP where the extapp is located
2938 static int sep_realloc_ext_cache_handler(struct sep_device *sep,
2939 unsigned long arg)
2941 /* Holds the new ext cache address in the system memory offset */
2942 u32 *system_addr;
2944 /* Set value in the SYSTEM MEMORY offset */
2945 system_addr = (u32 *)(sep->shared_addr +
2946 SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
2948 /* Copy the physical address to the System Area for the SEP */
2949 system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
2950 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
2951 system_addr[0]);
2952 system_addr[1] = sep->extapp_bus;
2953 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
2954 system_addr[1]);
2956 return 0;
2960 * sep_ioctl - ioctl api
2961 * @filp: pointer to struct file
2962 * @cmd: command
2963 * @arg: pointer to argument structure
2965 * Implement the ioctl methods availble on the SEP device.
2967 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2969 int error = 0;
2970 struct sep_device *sep = filp->private_data;
2972 dev_dbg(&sep->pdev->dev, "ioctl start\n");
2974 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
2975 dev_dbg(&sep->pdev->dev,
2976 "SEP_IOCSENDSEPCOMMAND is %x\n", SEP_IOCSENDSEPCOMMAND);
2977 dev_dbg(&sep->pdev->dev,
2978 "SEP_IOCALLOCDATAPOLL is %x\n", SEP_IOCALLOCDATAPOLL);
2979 dev_dbg(&sep->pdev->dev,
2980 "SEP_IOCCREATESYMDMATABLE is %x\n", SEP_IOCCREATESYMDMATABLE);
2981 dev_dbg(&sep->pdev->dev,
2982 "SEP_IOCFREEDMATABLEDATA is %x\n", SEP_IOCFREEDMATABLEDATA);
2983 dev_dbg(&sep->pdev->dev,
2984 "SEP_IOCSEPSTART is %x\n", SEP_IOCSEPSTART);
2985 dev_dbg(&sep->pdev->dev,
2986 "SEP_IOCSEPINIT is %x\n", SEP_IOCSEPINIT);
2987 dev_dbg(&sep->pdev->dev,
2988 "SEP_IOCGETSTATICPOOLADDR is %x\n", SEP_IOCGETSTATICPOOLADDR);
2989 dev_dbg(&sep->pdev->dev,
2990 "SEP_IOCENDTRANSACTION is %x\n", SEP_IOCENDTRANSACTION);
2991 dev_dbg(&sep->pdev->dev,
2992 "SEP_IOCREALLOCEXTCACHE is %x\n", SEP_IOCREALLOCEXTCACHE);
2993 dev_dbg(&sep->pdev->dev,
2994 "SEP_IOCRARPREPAREMESSAGE is %x\n", SEP_IOCRARPREPAREMESSAGE);
2995 dev_dbg(&sep->pdev->dev,
2996 "SEP_IOCPREPAREDCB is %x\n", SEP_IOCPREPAREDCB);
2997 dev_dbg(&sep->pdev->dev,
2998 "SEP_IOCFREEDCB is %x\n", SEP_IOCFREEDCB);
3000 /* Make sure we own this device */
3001 mutex_lock(&sep->sep_mutex);
3002 if ((current->pid != sep->pid_doing_transaction) &&
3003 (sep->pid_doing_transaction != 0)) {
3004 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
3005 mutex_unlock(&sep->sep_mutex);
3006 error = -EACCES;
3007 goto end_function;
3010 mutex_unlock(&sep->sep_mutex);
3012 /* Check that the command is for SEP device */
3013 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3014 error = -ENOTTY;
3015 goto end_function;
3018 /* Lock to prevent the daemon to interfere with operation */
3019 mutex_lock(&sep->ioctl_mutex);
3021 switch (cmd) {
3022 case SEP_IOCSENDSEPCOMMAND:
3023 /* Send command to SEP */
3024 error = sep_send_command_handler(sep);
3025 break;
3026 case SEP_IOCALLOCDATAPOLL:
3027 /* Allocate data pool */
3028 error = sep_allocate_data_pool_memory_handler(sep, arg);
3029 break;
3030 case SEP_IOCCREATESYMDMATABLE:
3031 /* Create DMA table for synhronic operation */
3032 error = sep_create_sync_dma_tables_handler(sep, arg);
3033 break;
3034 case SEP_IOCFREEDMATABLEDATA:
3035 /* Free the pages */
3036 error = sep_free_dma_table_data_handler(sep);
3037 break;
3038 case SEP_IOCSEPSTART:
3039 /* Start command to SEP */
3040 if (sep->pdev->revision == 0) /* Only for old chip */
3041 error = sep_start_handler(sep);
3042 else
3043 error = -EPERM; /* Not permitted on new chip */
3044 break;
3045 case SEP_IOCSEPINIT:
3046 /* Init command to SEP */
3047 if (sep->pdev->revision == 0) /* Only for old chip */
3048 error = sep_init_handler(sep, arg);
3049 else
3050 error = -EPERM; /* Not permitted on new chip */
3051 break;
3052 case SEP_IOCGETSTATICPOOLADDR:
3053 /* Get the physical and virtual addresses of the static pool */
3054 error = sep_get_static_pool_addr_handler(sep);
3055 break;
3056 case SEP_IOCENDTRANSACTION:
3057 error = sep_end_transaction_handler(sep);
3058 break;
3059 case SEP_IOCREALLOCEXTCACHE:
3060 if (sep->mrst)
3061 error = -ENODEV;
3062 if (sep->pdev->revision == 0) /* Only for old chip */
3063 error = sep_realloc_ext_cache_handler(sep, arg);
3064 else
3065 error = -EPERM; /* Not permitted on new chip */
3066 break;
3067 case SEP_IOCRARPREPAREMESSAGE:
3068 error = sep_rar_prepare_output_msg_handler(sep, arg);
3069 break;
3070 case SEP_IOCPREPAREDCB:
3071 error = sep_prepare_dcb_handler(sep, arg);
3072 break;
3073 case SEP_IOCFREEDCB:
3074 error = sep_free_dcb_handler(sep);
3075 break;
3076 default:
3077 dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
3078 error = -ENOTTY;
3079 break;
3081 mutex_unlock(&sep->ioctl_mutex);
3083 end_function:
3084 dev_dbg(&sep->pdev->dev, "ioctl end\n");
3085 return error;
3089 * sep_singleton_ioctl - ioctl api for singleton interface
3090 * @filp: pointer to struct file
3091 * @cmd: command
3092 * @arg: pointer to argument structure
3094 * Implement the additional ioctls for the singleton device
3096 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
3098 long error = 0;
3099 struct sep_device *sep = filp->private_data;
3101 dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
3102 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
3104 /* Check that the command is for the SEP device */
3105 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3106 error = -ENOTTY;
3107 goto end_function;
3110 /* Make sure we own this device */
3111 mutex_lock(&sep->sep_mutex);
3112 if ((current->pid != sep->pid_doing_transaction) &&
3113 (sep->pid_doing_transaction != 0)) {
3114 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
3115 mutex_unlock(&sep->sep_mutex);
3116 error = -EACCES;
3117 goto end_function;
3120 mutex_unlock(&sep->sep_mutex);
3122 switch (cmd) {
3123 case SEP_IOCTLSETCALLERID:
3124 mutex_lock(&sep->ioctl_mutex);
3125 error = sep_set_caller_id_handler(sep, arg);
3126 mutex_unlock(&sep->ioctl_mutex);
3127 break;
3128 default:
3129 error = sep_ioctl(filp, cmd, arg);
3130 break;
3133 end_function:
3134 dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
3135 return error;
3139 * sep_request_daemon_ioctl - ioctl for daemon
3140 * @filp: pointer to struct file
3141 * @cmd: command
3142 * @arg: pointer to argument structure
3144 * Called by the request daemon to perform ioctls on the daemon device
3146 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
3147 unsigned long arg)
3150 long error;
3151 struct sep_device *sep = filp->private_data;
3153 dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
3154 dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
3156 /* Check that the command is for SEP device */
3157 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3158 error = -ENOTTY;
3159 goto end_function;
3162 /* Only one process can access ioctl at any given time */
3163 mutex_lock(&sep->ioctl_mutex);
3165 switch (cmd) {
3166 case SEP_IOCSENDSEPRPLYCOMMAND:
3167 /* Send reply command to SEP */
3168 error = sep_req_daemon_send_reply_command_handler(sep);
3169 break;
3170 case SEP_IOCENDTRANSACTION:
3172 * End req daemon transaction, do nothing
3173 * will be removed upon update in middleware
3174 * API library
3176 error = 0;
3177 break;
3178 default:
3179 dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
3180 error = -ENOTTY;
3182 mutex_unlock(&sep->ioctl_mutex);
3184 end_function:
3185 dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
3186 return error;
3191 * sep_inthandler - interrupt handler
3192 * @irq: interrupt
3193 * @dev_id: device id
3195 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3197 irqreturn_t int_error = IRQ_HANDLED;
3198 unsigned long lck_flags;
3199 u32 reg_val, reg_val2 = 0;
3200 struct sep_device *sep = dev_id;
3202 /* Read the IRR register to check if this is SEP interrupt */
3203 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3204 dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
3206 if (reg_val & (0x1 << 13)) {
3207 /* Lock and update the counter of reply messages */
3208 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
3209 sep->reply_ct++;
3210 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
3212 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3213 sep->send_ct, sep->reply_ct);
3215 /* Is this printf or daemon request? */
3216 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3217 dev_dbg(&sep->pdev->dev,
3218 "SEP Interrupt - reg2 is %08x\n", reg_val2);
3220 if ((reg_val2 >> 30) & 0x1) {
3221 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3222 wake_up(&sep->event_request_daemon);
3223 } else if (reg_val2 >> 31) {
3224 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3225 wake_up(&sep->event_request_daemon);
3226 } else {
3227 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3228 wake_up(&sep->event);
3230 } else {
3231 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3232 int_error = IRQ_NONE;
3234 if (int_error == IRQ_HANDLED)
3235 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3237 return int_error;
3241 * sep_callback - RAR callback
3242 * @sep_context_pointer: pointer to struct sep_device
3244 * Function that is called by rar_register when it is ready with
3245 * a region (only for Moorestown)
3247 static int sep_callback(unsigned long sep_context_pointer)
3249 int error;
3250 struct sep_device *sep = (struct sep_device *)sep_context_pointer;
3251 dma_addr_t rar_end_address;
3253 dev_dbg(&sep->pdev->dev, "callback start\n");
3255 error = rar_get_address(RAR_TYPE_IMAGE, &sep->rar_bus,
3256 &rar_end_address);
3258 if (error) {
3259 dev_warn(&sep->pdev->dev, "mrst can't get rar region\n");
3260 goto end_function;
3263 sep->rar_size = (size_t)(rar_end_address - sep->rar_bus + 1);
3265 if (!request_mem_region(sep->rar_bus, sep->rar_size,
3266 "sep_sec_driver")) {
3267 dev_warn(&sep->pdev->dev,
3268 "request mem region for mrst failed\n");
3269 error = -1;
3270 goto end_function;
3273 sep->rar_addr = ioremap_nocache(sep->rar_bus, sep->rar_size);
3274 if (!sep->rar_addr) {
3275 dev_warn(&sep->pdev->dev,
3276 "ioremap nocache for mrst rar failed\n");
3277 error = -ENOMEM;
3278 goto end_function;
3280 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx, size is %zx\n",
3281 sep->rar_addr, (unsigned long long)sep->rar_bus,
3282 sep->rar_size);
3284 end_function:
3285 dev_dbg(&sep->pdev->dev, "callback end\n");
3286 return error;
3290 * sep_reconfig_shared_area - reconfigure shared area
3291 * @sep: pointer to struct sep_device
3293 * Reconfig the shared area between HOST and SEP - needed in case
3294 * the DX_CC_Init function was called before OS loading.
3296 static int sep_reconfig_shared_area(struct sep_device *sep)
3298 int ret_val;
3300 dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
3302 /* Send the new SHARED MESSAGE AREA to the SEP */
3303 dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
3304 (unsigned long long)sep->shared_bus);
3306 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3308 /* Poll for SEP response */
3309 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3311 while (ret_val != 0xffffffff && ret_val != sep->shared_bus)
3312 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3314 /* Check the return value (register) */
3315 if (ret_val != sep->shared_bus) {
3316 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3317 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3318 ret_val = -ENOMEM;
3319 } else
3320 ret_val = 0;
3322 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3323 return ret_val;
3326 /* File operation for singleton SEP operations */
3327 static const struct file_operations singleton_file_operations = {
3328 .owner = THIS_MODULE,
3329 .unlocked_ioctl = sep_singleton_ioctl,
3330 .poll = sep_poll,
3331 .open = sep_singleton_open,
3332 .release = sep_singleton_release,
3333 .mmap = sep_mmap,
3336 /* File operation for daemon operations */
3337 static const struct file_operations daemon_file_operations = {
3338 .owner = THIS_MODULE,
3339 .unlocked_ioctl = sep_request_daemon_ioctl,
3340 .poll = sep_request_daemon_poll,
3341 .open = sep_request_daemon_open,
3342 .release = sep_request_daemon_release,
3343 .mmap = sep_request_daemon_mmap,
3346 /* The files operations structure of the driver */
3347 static const struct file_operations sep_file_operations = {
3348 .owner = THIS_MODULE,
3349 .unlocked_ioctl = sep_ioctl,
3350 .poll = sep_poll,
3351 .open = sep_open,
3352 .release = sep_release,
3353 .mmap = sep_mmap,
3357 * sep_register_driver_with_fs - register misc devices
3358 * @sep: pointer to struct sep_device
3360 * This function registers the driver with the file system
3362 static int sep_register_driver_with_fs(struct sep_device *sep)
3364 int ret_val;
3366 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
3367 sep->miscdev_sep.name = SEP_DEV_NAME;
3368 sep->miscdev_sep.fops = &sep_file_operations;
3370 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
3371 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
3372 sep->miscdev_singleton.fops = &singleton_file_operations;
3374 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
3375 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
3376 sep->miscdev_daemon.fops = &daemon_file_operations;
3378 ret_val = misc_register(&sep->miscdev_sep);
3379 if (ret_val) {
3380 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
3381 ret_val);
3382 return ret_val;
3385 ret_val = misc_register(&sep->miscdev_singleton);
3386 if (ret_val) {
3387 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
3388 ret_val);
3389 misc_deregister(&sep->miscdev_sep);
3390 return ret_val;
3393 if (!sep->mrst) {
3394 ret_val = misc_register(&sep->miscdev_daemon);
3395 if (ret_val) {
3396 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
3397 ret_val);
3398 misc_deregister(&sep->miscdev_sep);
3399 misc_deregister(&sep->miscdev_singleton);
3401 return ret_val;
3404 return ret_val;
3409 * sep_probe - probe a matching PCI device
3410 * @pdev: pci_device
3411 * @end: pci_device_id
3413 * Attempt to set up and configure a SEP device that has been
3414 * discovered by the PCI layer.
3416 static int __devinit sep_probe(struct pci_dev *pdev,
3417 const struct pci_device_id *ent)
3419 int error = 0;
3420 struct sep_device *sep;
3422 pr_debug("SEP pci probe starting\n");
3423 if (sep_dev != NULL) {
3424 dev_warn(&pdev->dev, "only one SEP supported.\n");
3425 return -EBUSY;
3428 /* Enable the device */
3429 error = pci_enable_device(pdev);
3430 if (error) {
3431 dev_warn(&pdev->dev, "error enabling pci device\n");
3432 goto end_function;
3435 /* Allocate the sep_device structure for this device */
3436 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
3437 if (sep_dev == NULL) {
3438 dev_warn(&pdev->dev,
3439 "can't kmalloc the sep_device structure\n");
3440 error = -ENOMEM;
3441 goto end_function_disable_device;
3445 * We're going to use another variable for actually
3446 * working with the device; this way, if we have
3447 * multiple devices in the future, it would be easier
3448 * to make appropriate changes
3450 sep = sep_dev;
3452 sep->pdev = pci_dev_get(pdev);
3454 init_waitqueue_head(&sep->event);
3455 init_waitqueue_head(&sep->event_request_daemon);
3456 spin_lock_init(&sep->snd_rply_lck);
3457 mutex_init(&sep->sep_mutex);
3458 mutex_init(&sep->ioctl_mutex);
3460 if (pdev->device == MRST_PCI_DEVICE_ID)
3461 sep->mrst = 1;
3463 dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
3464 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
3466 /* Set up our register area */
3467 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
3468 if (!sep->reg_physical_addr) {
3469 dev_warn(&sep->pdev->dev, "Error getting register start\n");
3470 error = -ENODEV;
3471 goto end_function_free_sep_dev;
3474 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
3475 if (!sep->reg_physical_end) {
3476 dev_warn(&sep->pdev->dev, "Error getting register end\n");
3477 error = -ENODEV;
3478 goto end_function_free_sep_dev;
3481 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
3482 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
3483 if (!sep->reg_addr) {
3484 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
3485 error = -ENODEV;
3486 goto end_function_free_sep_dev;
3489 dev_dbg(&sep->pdev->dev,
3490 "Register area start %llx end %llx virtual %p\n",
3491 (unsigned long long)sep->reg_physical_addr,
3492 (unsigned long long)sep->reg_physical_end,
3493 sep->reg_addr);
3495 /* Allocate the shared area */
3496 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
3497 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
3498 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
3499 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
3500 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
3502 if (sep_map_and_alloc_shared_area(sep)) {
3503 error = -ENOMEM;
3504 /* Allocation failed */
3505 goto end_function_error;
3508 /* The next section depends on type of unit */
3509 if (sep->mrst) {
3510 error = register_rar(RAR_TYPE_IMAGE, &sep_callback,
3511 (unsigned long)sep);
3512 if (error) {
3513 dev_dbg(&sep->pdev->dev,
3514 "error register_rar\n");
3515 goto end_function_deallocate_sep_shared_area;
3517 } else {
3518 sep->rar_size = FAKE_RAR_SIZE;
3519 sep->rar_addr = dma_alloc_coherent(NULL,
3520 sep->rar_size, &sep->rar_bus, GFP_KERNEL);
3521 if (sep->rar_addr == NULL) {
3522 dev_warn(&sep->pdev->dev, "can't allocate mfld rar\n");
3523 error = -ENOMEM;
3524 goto end_function_deallocate_sep_shared_area;
3527 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
3528 " size is %zx\n", sep->rar_addr,
3529 (unsigned long long)sep->rar_bus,
3530 sep->rar_size);
3533 dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
3535 /* Clear ICR register */
3536 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
3538 /* Set the IMR register - open only GPR 2 */
3539 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
3541 dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
3542 /* Get the interrupt line */
3543 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
3544 "sep_driver", sep);
3546 if (error)
3547 goto end_function_dealloc_rar;
3549 /* The new chip requires ashared area reconfigure */
3550 if (sep->pdev->revision == 4) { /* Only for new chip */
3551 error = sep_reconfig_shared_area(sep);
3552 if (error)
3553 goto end_function_free_irq;
3555 /* Finally magic up the device nodes */
3556 /* Register driver with the fs */
3557 error = sep_register_driver_with_fs(sep);
3558 if (error == 0)
3559 /* Success */
3560 return 0;
3562 end_function_free_irq:
3563 free_irq(pdev->irq, sep);
3565 end_function_dealloc_rar:
3566 if (sep->rar_addr)
3567 dma_free_coherent(&sep->pdev->dev, sep->rar_size,
3568 sep->rar_addr, sep->rar_bus);
3569 goto end_function;
3571 end_function_deallocate_sep_shared_area:
3572 /* De-allocate shared area */
3573 sep_unmap_and_free_shared_area(sep);
3575 end_function_error:
3576 iounmap(sep->reg_addr);
3578 end_function_free_sep_dev:
3579 pci_dev_put(sep_dev->pdev);
3580 kfree(sep_dev);
3581 sep_dev = NULL;
3583 end_function_disable_device:
3584 pci_disable_device(pdev);
3586 end_function:
3587 return error;
3590 static void sep_remove(struct pci_dev *pdev)
3592 struct sep_device *sep = sep_dev;
3594 /* Unregister from fs */
3595 misc_deregister(&sep->miscdev_sep);
3596 misc_deregister(&sep->miscdev_singleton);
3597 misc_deregister(&sep->miscdev_daemon);
3599 /* Free the irq */
3600 free_irq(sep->pdev->irq, sep);
3602 /* Free the shared area */
3603 sep_unmap_and_free_shared_area(sep_dev);
3604 iounmap((void *) sep_dev->reg_addr);
3607 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
3608 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRST_PCI_DEVICE_ID)},
3609 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
3613 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
3615 /* Field for registering driver to PCI device */
3616 static struct pci_driver sep_pci_driver = {
3617 .name = "sep_sec_driver",
3618 .id_table = sep_pci_id_tbl,
3619 .probe = sep_probe,
3620 .remove = sep_remove
3625 * sep_init - init function
3627 * Module load time. Register the PCI device driver.
3629 static int __init sep_init(void)
3631 return pci_register_driver(&sep_pci_driver);
3636 * sep_exit - called to unload driver
3638 * Drop the misc devices then remove and unmap the various resources
3639 * that are not released by the driver remove method.
3641 static void __exit sep_exit(void)
3643 pci_unregister_driver(&sep_pci_driver);
3647 module_init(sep_init);
3648 module_exit(sep_exit);
3650 MODULE_LICENSE("GPL");