Staging: sep: cant is an angular inclination
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / sep / sep_driver.c
blobebe7a90aec0ec5634aa03f503af841df49d9f18e
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * CONTACTS:
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
32 #define DEBUG
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/miscdevice.h>
36 #include <linux/fs.h>
37 #include <linux/cdev.h>
38 #include <linux/kdev_t.h>
39 #include <linux/mutex.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/poll.h>
43 #include <linux/wait.h>
44 #include <linux/pci.h>
45 #include <linux/firmware.h>
46 #include <linux/slab.h>
47 #include <linux/ioctl.h>
48 #include <asm/current.h>
49 #include <linux/ioport.h>
50 #include <linux/io.h>
51 #include <linux/interrupt.h>
52 #include <linux/pagemap.h>
53 #include <asm/cacheflush.h>
54 #include <linux/sched.h>
55 #include <linux/delay.h>
56 #include <linux/rar_register.h>
58 #include "../memrar/memrar.h"
60 #include "sep_driver_hw_defs.h"
61 #include "sep_driver_config.h"
62 #include "sep_driver_api.h"
63 #include "sep_dev.h"
65 /*----------------------------------------
66 DEFINES
67 -----------------------------------------*/
69 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
71 /*--------------------------------------------
72 GLOBAL variables
73 --------------------------------------------*/
75 /* Keep this a single static object for now to keep the conversion easy */
77 static struct sep_device *sep_dev;
79 /**
80 * sep_load_firmware - copy firmware cache/resident
81 * @sep: pointer to struct sep_device we are loading
83 * This functions copies the cache and resident from their source
84 * location into destination shared memory.
86 static int sep_load_firmware(struct sep_device *sep)
88 const struct firmware *fw;
89 char *cache_name = "cache.image.bin";
90 char *res_name = "resident.image.bin";
91 char *extapp_name = "extapp.image.bin";
92 int error ;
93 unsigned int work1, work2, work3;
95 /* Set addresses and load resident */
96 sep->resident_bus = sep->rar_bus;
97 sep->resident_addr = sep->rar_addr;
99 error = request_firmware(&fw, res_name, &sep->pdev->dev);
100 if (error) {
101 dev_warn(&sep->pdev->dev, "can't request resident fw\n");
102 return error;
105 memcpy(sep->resident_addr, (void *)fw->data, fw->size);
106 sep->resident_size = fw->size;
107 release_firmware(fw);
109 dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
110 sep->resident_addr);
111 dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
112 (unsigned long)sep->resident_bus);
113 dev_dbg(&sep->pdev->dev, "resident size is %08x\n",
114 sep->resident_size);
116 /* Set addresses for dcache (no loading needed) */
117 work1 = (unsigned int)sep->resident_bus;
118 work2 = (unsigned int)sep->resident_size;
119 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
120 sep->dcache_bus = (dma_addr_t)work3;
122 work1 = (unsigned int)sep->resident_addr;
123 work2 = (unsigned int)sep->resident_size;
124 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
125 sep->dcache_addr = (void *)work3;
127 sep->dcache_size = 1024 * 128;
129 /* Set addresses and load cache */
130 sep->cache_bus = sep->dcache_bus + sep->dcache_size;
131 sep->cache_addr = sep->dcache_addr + sep->dcache_size;
133 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
134 if (error) {
135 dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
136 return error;
139 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
140 sep->cache_size = fw->size;
141 release_firmware(fw);
143 dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
144 sep->cache_addr);
145 dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
146 (unsigned long)sep->cache_bus);
147 dev_dbg(&sep->pdev->dev, "cache size is %08x\n",
148 sep->cache_size);
150 /* Set addresses and load extapp */
151 sep->extapp_bus = sep->cache_bus + (1024 * 370);
152 sep->extapp_addr = sep->cache_addr + (1024 * 370);
154 error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
155 if (error) {
156 dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
157 return error;
160 memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
161 sep->extapp_size = fw->size;
162 release_firmware(fw);
164 dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
165 sep->extapp_addr);
166 dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
167 (unsigned long long)sep->extapp_bus);
168 dev_dbg(&sep->pdev->dev, "extapp size is %08x\n",
169 sep->extapp_size);
171 return error;
174 MODULE_FIRMWARE("sep/cache.image.bin");
175 MODULE_FIRMWARE("sep/resident.image.bin");
176 MODULE_FIRMWARE("sep/extapp.image.bin");
179 * sep_dump_message - dump the message that is pending
180 * @sep: SEP device
182 static void sep_dump_message(struct sep_device *sep)
184 int count;
185 u32 *p = sep->shared_addr;
186 for (count = 0; count < 12 * 4; count += 4)
187 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
188 count, *p++);
192 * sep_map_and_alloc_shared_area - allocate shared block
193 * @sep: security processor
194 * @size: size of shared area
196 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
198 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
199 sep->shared_size,
200 &sep->shared_bus, GFP_KERNEL);
202 if (!sep->shared_addr) {
203 dev_warn(&sep->pdev->dev,
204 "shared memory dma_alloc_coherent failed\n");
205 return -ENOMEM;
207 dev_dbg(&sep->pdev->dev,
208 "shared_addr %x bytes @%p (bus %llx)\n",
209 sep->shared_size, sep->shared_addr,
210 (unsigned long long)sep->shared_bus);
211 return 0;
215 * sep_unmap_and_free_shared_area - free shared block
216 * @sep: security processor
218 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
220 dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
221 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
222 sep->shared_addr, sep->shared_bus);
226 * sep_shared_bus_to_virt - convert bus/virt addresses
227 * @sep: pointer to struct sep_device
228 * @bus_address: address to convert
230 * Returns virtual address inside the shared area according
231 * to the bus address.
233 static void *sep_shared_bus_to_virt(struct sep_device *sep,
234 dma_addr_t bus_address)
236 return sep->shared_addr + (bus_address - sep->shared_bus);
240 * open function for the singleton driver
241 * @inode_ptr struct inode *
242 * @file_ptr struct file *
244 * Called when the user opens the singleton device interface
246 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
248 int error = 0;
249 struct sep_device *sep;
252 * Get the SEP device structure and use it for the
253 * private_data field in filp for other methods
255 sep = sep_dev;
257 file_ptr->private_data = sep;
259 dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n",
260 current->pid);
262 dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
263 if (test_and_set_bit(0, &sep->singleton_access_flag)) {
264 error = -EBUSY;
265 goto end_function;
268 dev_dbg(&sep->pdev->dev,
269 "sep_singleton_open end\n");
270 end_function:
272 return error;
276 * sep_open - device open method
277 * @inode: inode of SEP device
278 * @filp: file handle to SEP device
280 * Open method for the SEP device. Called when userspace opens
281 * the SEP device node.
283 * Returns zero on success otherwise an error code.
285 static int sep_open(struct inode *inode, struct file *filp)
287 struct sep_device *sep;
290 * Get the SEP device structure and use it for the
291 * private_data field in filp for other methods
293 sep = sep_dev;
294 filp->private_data = sep;
296 dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
298 /* Anyone can open; locking takes place at transaction level */
299 return 0;
303 * sep_singleton_release - close a SEP singleton device
304 * @inode: inode of SEP device
305 * @filp: file handle being closed
307 * Called on the final close of a SEP device. As the open protects against
308 * multiple simultaenous opens that means this method is called when the
309 * final reference to the open handle is dropped.
311 static int sep_singleton_release(struct inode *inode, struct file *filp)
313 struct sep_device *sep = filp->private_data;
315 dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
316 current->pid);
317 clear_bit(0, &sep->singleton_access_flag);
318 return 0;
322 * sep_request_daemonopen - request daemon open method
323 * @inode: inode of SEP device
324 * @filp: file handle to SEP device
326 * Open method for the SEP request daemon. Called when
327 * request daemon in userspace opens the SEP device node.
329 * Returns zero on success otherwise an error code.
331 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
333 struct sep_device *sep = sep_dev;
334 int error = 0;
336 filp->private_data = sep;
338 dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
339 current->pid);
341 /* There is supposed to be only one request daemon */
342 dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
343 if (test_and_set_bit(0, &sep->request_daemon_open))
344 error = -EBUSY;
345 return error;
349 * sep_request_daemon_release - close a SEP daemon
350 * @inode: inode of SEP device
351 * @filp: file handle being closed
353 * Called on the final close of a SEP daemon.
355 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
357 struct sep_device *sep = filp->private_data;
359 dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
360 current->pid);
362 /* Clear the request_daemon_open flag */
363 clear_bit(0, &sep->request_daemon_open);
364 return 0;
368 * sep_req_daemon_send_reply_command_handler - poke the SEP
369 * @sep: struct sep_device *
371 * This function raises interrupt to SEPm that signals that is has a
372 * new command from HOST
374 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
376 unsigned long lck_flags;
378 dev_dbg(&sep->pdev->dev,
379 "sep_req_daemon_send_reply_command_handler start\n");
381 sep_dump_message(sep);
383 /* Counters are lockable region */
384 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
385 sep->send_ct++;
386 sep->reply_ct++;
388 /* Send the interrupt to SEP */
389 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR,
390 sep->send_ct);
392 sep->send_ct++;
394 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
396 dev_dbg(&sep->pdev->dev,
397 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
398 sep->send_ct, sep->reply_ct);
400 dev_dbg(&sep->pdev->dev,
401 "sep_req_daemon_send_reply_command_handler end\n");
403 return 0;
408 * sep_free_dma_table_data_handler - free DMA table
409 * @sep: pointere to struct sep_device
411 * Handles the request to free DMA table for synchronic actions
413 static int sep_free_dma_table_data_handler(struct sep_device *sep)
415 int count;
416 int dcb_counter;
417 /* Pointer to the current dma_resource struct */
418 struct sep_dma_resource *dma;
420 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
422 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
423 dma = &sep->dma_res_arr[dcb_counter];
425 /* Unmap and free input map array */
426 if (dma->in_map_array) {
427 for (count = 0; count < dma->in_num_pages; count++) {
428 dma_unmap_page(&sep->pdev->dev,
429 dma->in_map_array[count].dma_addr,
430 dma->in_map_array[count].size,
431 DMA_TO_DEVICE);
433 kfree(dma->in_map_array);
436 /* Unmap output map array, DON'T free it yet */
437 if (dma->out_map_array) {
438 for (count = 0; count < dma->out_num_pages; count++) {
439 dma_unmap_page(&sep->pdev->dev,
440 dma->out_map_array[count].dma_addr,
441 dma->out_map_array[count].size,
442 DMA_FROM_DEVICE);
444 kfree(dma->out_map_array);
447 /* Free page cache for output */
448 if (dma->in_page_array) {
449 for (count = 0; count < dma->in_num_pages; count++) {
450 flush_dcache_page(dma->in_page_array[count]);
451 page_cache_release(dma->in_page_array[count]);
453 kfree(dma->in_page_array);
456 if (dma->out_page_array) {
457 for (count = 0; count < dma->out_num_pages; count++) {
458 if (!PageReserved(dma->out_page_array[count]))
459 SetPageDirty(dma->out_page_array[count]);
460 flush_dcache_page(dma->out_page_array[count]);
461 page_cache_release(dma->out_page_array[count]);
463 kfree(dma->out_page_array);
466 /* Reset all the values */
467 dma->in_page_array = 0;
468 dma->out_page_array = 0;
469 dma->in_num_pages = 0;
470 dma->out_num_pages = 0;
471 dma->in_map_array = 0;
472 dma->out_map_array = 0;
473 dma->in_map_num_entries = 0;
474 dma->out_map_num_entries = 0;
477 sep->nr_dcb_creat = 0;
478 sep->num_lli_tables_created = 0;
480 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
481 return 0;
485 * sep_request_daemon_mmap - maps the shared area to user space
486 * @filp: pointer to struct file
487 * @vma: pointer to vm_area_struct
489 * Called by the kernel when the daemon attempts an mmap() syscall
490 * using our handle.
492 static int sep_request_daemon_mmap(struct file *filp,
493 struct vm_area_struct *vma)
495 struct sep_device *sep = filp->private_data;
496 dma_addr_t bus_address;
497 int error = 0;
499 dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
501 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
502 error = -EINVAL;
503 goto end_function;
506 /* Get physical address */
507 bus_address = sep->shared_bus;
509 dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
510 (unsigned long)bus_address);
512 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
513 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
515 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
516 error = -EAGAIN;
517 goto end_function;
520 end_function:
521 dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
522 return error;
526 * sep_request_daemon_poll - poll implementation
527 * @sep: struct sep_device * for current SEP device
528 * @filp: struct file * for open file
529 * @wait: poll_table * for poll
531 * Called when our device is part of a poll() or select() syscall
533 static unsigned int sep_request_daemon_poll(struct file *filp,
534 poll_table *wait)
536 u32 mask = 0;
537 /* GPR2 register */
538 u32 retval2;
539 unsigned long lck_flags;
540 struct sep_device *sep = filp->private_data;
542 dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
544 poll_wait(filp, &sep->event_request_daemon, wait);
546 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
547 sep->send_ct, sep->reply_ct);
549 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
550 /* Check if the data is ready */
551 if (sep->send_ct == sep->reply_ct) {
552 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
554 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
555 dev_dbg(&sep->pdev->dev,
556 "daemon poll: data check (GPR2) is %x\n", retval2);
558 /* Check if PRINT request */
559 if ((retval2 >> 30) & 0x1) {
560 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
561 mask |= POLLIN;
562 goto end_function;
564 /* Check if NVS request */
565 if (retval2 >> 31) {
566 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
567 mask |= POLLPRI | POLLWRNORM;
569 } else {
570 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
571 dev_dbg(&sep->pdev->dev,
572 "daemon poll: no reply received; returning 0\n");
573 mask = 0;
575 end_function:
576 dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
577 return mask;
581 * sep_release - close a SEP device
582 * @inode: inode of SEP device
583 * @filp: file handle being closed
585 * Called on the final close of a SEP device.
587 static int sep_release(struct inode *inode, struct file *filp)
589 struct sep_device *sep = filp->private_data;
591 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
593 mutex_lock(&sep->sep_mutex);
594 /* Is this the process that has a transaction open?
595 * If so, lets reset pid_doing_transaction to 0 and
596 * clear the in use flags, and then wake up sep_event
597 * so that other processes can do transactions
599 dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
600 if (sep->pid_doing_transaction == current->pid) {
601 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
602 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
603 sep_free_dma_table_data_handler(sep);
604 wake_up(&sep->event);
605 sep->pid_doing_transaction = 0;
608 mutex_unlock(&sep->sep_mutex);
609 return 0;
613 * sep_mmap - maps the shared area to user space
614 * @filp: pointer to struct file
615 * @vma: pointer to vm_area_struct
617 * Called on an mmap of our space via the normal SEP device
619 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
621 dma_addr_t bus_addr;
622 struct sep_device *sep = filp->private_data;
623 unsigned long error = 0;
625 dev_dbg(&sep->pdev->dev, "mmap start\n");
627 /* Set the transaction busy (own the device) */
628 wait_event_interruptible(sep->event,
629 test_and_set_bit(SEP_MMAP_LOCK_BIT,
630 &sep->in_use_flags) == 0);
632 if (signal_pending(current)) {
633 error = -EINTR;
634 goto end_function_with_error;
637 * The pid_doing_transaction indicates that this process
638 * now owns the facilities to performa a transaction with
639 * the SEP. While this process is performing a transaction,
640 * no other process who has the SEP device open can perform
641 * any transactions. This method allows more than one process
642 * to have the device open at any given time, which provides
643 * finer granularity for device utilization by multiple
644 * processes.
646 mutex_lock(&sep->sep_mutex);
647 sep->pid_doing_transaction = current->pid;
648 mutex_unlock(&sep->sep_mutex);
650 /* Zero the pools and the number of data pool alocation pointers */
651 sep->data_pool_bytes_allocated = 0;
652 sep->num_of_data_allocations = 0;
655 * Check that the size of the mapped range is as the size of the message
656 * shared area
658 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
659 error = -EINVAL;
660 goto end_function_with_error;
663 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
665 /* Get bus address */
666 bus_addr = sep->shared_bus;
668 dev_dbg(&sep->pdev->dev,
669 "bus_address is %lx\n", (unsigned long)bus_addr);
671 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
672 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
673 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
674 error = -EAGAIN;
675 goto end_function_with_error;
677 dev_dbg(&sep->pdev->dev, "mmap end\n");
678 goto end_function;
680 end_function_with_error:
681 /* Clear the bit */
682 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
683 mutex_lock(&sep->sep_mutex);
684 sep->pid_doing_transaction = 0;
685 mutex_unlock(&sep->sep_mutex);
687 /* Raise event for stuck contextes */
689 dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
690 wake_up(&sep->event);
692 end_function:
693 return error;
697 * sep_poll - poll handler
698 * @filp: pointer to struct file
699 * @wait: pointer to poll_table
701 * Called by the OS when the kernel is asked to do a poll on
702 * a SEP file handle.
704 static unsigned int sep_poll(struct file *filp, poll_table *wait)
706 u32 mask = 0;
707 u32 retval = 0;
708 u32 retval2 = 0;
709 unsigned long lck_flags;
711 struct sep_device *sep = filp->private_data;
713 dev_dbg(&sep->pdev->dev, "poll: start\n");
715 /* Am I the process that owns the transaction? */
716 mutex_lock(&sep->sep_mutex);
717 if (current->pid != sep->pid_doing_transaction) {
718 dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
719 mask = POLLERR;
720 mutex_unlock(&sep->sep_mutex);
721 goto end_function;
723 mutex_unlock(&sep->sep_mutex);
725 /* Check if send command or send_reply were activated previously */
726 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
727 dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
728 mask = POLLERR;
729 goto end_function;
732 /* Add the event to the polling wait table */
733 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
735 poll_wait(filp, &sep->event, wait);
737 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
738 sep->send_ct, sep->reply_ct);
740 /* Check if error occured during poll */
741 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
742 if (retval2 != 0x0) {
743 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
744 mask |= POLLERR;
745 goto end_function;
748 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
750 if (sep->send_ct == sep->reply_ct) {
751 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
752 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
753 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
754 retval);
756 /* Check if printf request */
757 if ((retval >> 30) & 0x1) {
758 dev_dbg(&sep->pdev->dev, "poll: SEP printf request\n");
759 wake_up(&sep->event_request_daemon);
760 goto end_function;
763 /* Check if the this is SEP reply or request */
764 if (retval >> 31) {
765 dev_dbg(&sep->pdev->dev, "poll: SEP request\n");
766 wake_up(&sep->event_request_daemon);
767 } else {
768 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
769 /* In case it is again by send_reply_comand */
770 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
771 sep_dump_message(sep);
772 dev_dbg(&sep->pdev->dev,
773 "poll; SEP reply POLLIN | POLLRDNORM\n");
774 mask |= POLLIN | POLLRDNORM;
776 } else {
777 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
778 dev_dbg(&sep->pdev->dev,
779 "poll; no reply received; returning mask of 0\n");
780 mask = 0;
783 end_function:
784 dev_dbg(&sep->pdev->dev, "poll: end\n");
785 return mask;
789 * sep_time_address - address in SEP memory of time
790 * @sep: SEP device we want the address from
792 * Return the address of the two dwords in memory used for time
793 * setting.
795 static u32 *sep_time_address(struct sep_device *sep)
797 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
801 * sep_set_time - set the SEP time
802 * @sep: the SEP we are setting the time for
804 * Calculates time and sets it at the predefined address.
805 * Called with the SEP mutex held.
807 static unsigned long sep_set_time(struct sep_device *sep)
809 struct timeval time;
810 u32 *time_addr; /* Address of time as seen by the kernel */
813 dev_dbg(&sep->pdev->dev, "sep_set_time start\n");
815 do_gettimeofday(&time);
817 /* Set value in the SYSTEM MEMORY offset */
818 time_addr = sep_time_address(sep);
820 time_addr[0] = SEP_TIME_VAL_TOKEN;
821 time_addr[1] = time.tv_sec;
823 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
824 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
825 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
827 return time.tv_sec;
831 * sep_set_caller_id_handler - insert caller id entry
832 * @sep: SEP device
833 * @arg: pointer to struct caller_id_struct
835 * Inserts the data into the caller id table. Note that this function
836 * falls under the ioctl lock
838 static int sep_set_caller_id_handler(struct sep_device *sep, u32 arg)
840 void __user *hash;
841 int error = 0;
842 int i;
843 struct caller_id_struct command_args;
845 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
847 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
848 if (sep->caller_id_table[i].pid == 0)
849 break;
852 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
853 dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
854 dev_warn(&sep->pdev->dev, "maximum number is %d\n",
855 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
856 error = -EUSERS;
857 goto end_function;
860 /* Copy the data */
861 if (copy_from_user(&command_args, (void __user *)arg,
862 sizeof(command_args))) {
863 error = -EFAULT;
864 goto end_function;
867 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
869 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
870 error = -EINVAL;
871 goto end_function;
874 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
875 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
876 command_args.callerIdSizeInBytes);
878 if (command_args.callerIdSizeInBytes >
879 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
880 error = -EMSGSIZE;
881 goto end_function;
884 sep->caller_id_table[i].pid = command_args.pid;
886 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
887 hash, command_args.callerIdSizeInBytes))
888 error = -EFAULT;
889 end_function:
890 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
891 return error;
895 * sep_set_current_caller_id - set the caller id
896 * @sep: pointer to struct_sep_device
898 * Set the caller ID (if it exists) to the SEP. Note that this
899 * function falls under the ioctl lock
901 static int sep_set_current_caller_id(struct sep_device *sep)
903 int i;
905 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
906 dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
908 /* Zero the previous value */
909 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
910 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
912 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
913 if (sep->caller_id_table[i].pid == current->pid) {
914 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
916 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
917 (void *)(sep->caller_id_table[i].callerIdHash),
918 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
919 break;
922 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
923 return 0;
927 * sep_send_command_handler - kick off a command
928 * @sep: SEP being signalled
930 * This function raises interrupt to SEP that signals that is has a new
931 * command from the host
933 * Note that this function does fall under the ioctl lock
935 static int sep_send_command_handler(struct sep_device *sep)
937 unsigned long lck_flags;
938 int error = 0;
940 dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
942 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
943 error = -EPROTO;
944 goto end_function;
946 sep_set_time(sep);
948 /* Only Medfield has caller id */
949 if (sep->mrst == 0)
950 sep_set_current_caller_id(sep);
952 sep_dump_message(sep);
954 /* Update counter */
955 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
956 sep->send_ct++;
957 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
959 dev_dbg(&sep->pdev->dev,
960 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
961 sep->send_ct, sep->reply_ct);
963 /* Send interrupt to SEP */
964 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
966 end_function:
967 dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
968 return error;
972 * sep_allocate_data_pool_memory_handler -allocate pool memory
973 * @sep: pointer to struct sep_device
974 * @arg: pointer to struct alloc_struct
976 * This function handles the allocate data pool memory request
977 * This function returns calculates the bus address of the
978 * allocated memory, and the offset of this area from the mapped address.
979 * Therefore, the FVOs in user space can calculate the exact virtual
980 * address of this allocated memory
982 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
983 unsigned long arg)
985 int error = 0;
986 struct alloc_struct command_args;
988 /* Holds the allocated buffer address in the system memory pool */
989 u32 *token_addr;
991 dev_dbg(&sep->pdev->dev,
992 "sep_allocate_data_pool_memory_handler start\n");
994 if (copy_from_user(&command_args, (void __user *)arg,
995 sizeof(struct alloc_struct))) {
996 error = -EFAULT;
997 goto end_function;
1000 /* Allocate memory */
1001 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
1002 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1003 error = -ENOMEM;
1004 goto end_function;
1007 dev_dbg(&sep->pdev->dev,
1008 "bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
1009 dev_dbg(&sep->pdev->dev,
1010 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
1011 /* Set the virtual and bus address */
1012 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1013 sep->data_pool_bytes_allocated;
1015 dev_dbg(&sep->pdev->dev,
1016 "command_args.offset: %x\n", command_args.offset);
1018 /* Place in the shared area that is known by the SEP */
1019 token_addr = (u32 *)(sep->shared_addr +
1020 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
1021 (sep->num_of_data_allocations)*2*sizeof(u32));
1023 dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
1024 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
1025 dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
1027 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
1028 token_addr[1] = (u32)sep->shared_bus +
1029 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1030 sep->data_pool_bytes_allocated;
1032 dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
1033 dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
1035 /* Write the memory back to the user space */
1036 error = copy_to_user((void *)arg, (void *)&command_args,
1037 sizeof(struct alloc_struct));
1038 if (error) {
1039 error = -EFAULT;
1040 dev_warn(&sep->pdev->dev,
1041 "allocate data pool copy to user error\n");
1042 goto end_function;
1045 /* Update the allocation */
1046 sep->data_pool_bytes_allocated += command_args.num_bytes;
1047 sep->num_of_data_allocations += 1;
1049 dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
1050 sep->num_of_data_allocations);
1051 dev_dbg(&sep->pdev->dev, "bytes allocated %d\n",
1052 (int)sep->data_pool_bytes_allocated);
1054 end_function:
1055 dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
1056 return error;
1060 * sep_lock_kernel_pages - map kernel pages for DMA
1061 * @sep: pointer to struct sep_device
1062 * @kernel_virt_addr: address of data buffer in kernel
1063 * @data_size: size of data
1064 * @lli_array_ptr: lli array
1065 * @in_out_flag: input into device or output from device
1067 * This function locks all the physical pages of the kernel virtual buffer
1068 * and construct a basic lli array, where each entry holds the physical
1069 * page address and the size that application data holds in this page
1070 * This function is used only during kernel crypto mod calls from within
1071 * the kernel (when ioctl is not used)
1073 static int sep_lock_kernel_pages(struct sep_device *sep,
1074 u32 kernel_virt_addr,
1075 u32 data_size,
1076 struct sep_lli_entry **lli_array_ptr,
1077 int in_out_flag)
1080 int error = 0;
1081 /* Array of lli */
1082 struct sep_lli_entry *lli_array;
1083 /* Map array */
1084 struct sep_dma_map *map_array;
1086 dev_dbg(&sep->pdev->dev,
1087 "sep_lock_kernel_pages start\n");
1089 dev_dbg(&sep->pdev->dev,
1090 "kernel_virt_addr is %08x\n", kernel_virt_addr);
1091 dev_dbg(&sep->pdev->dev,
1092 "data_size is %x\n", data_size);
1094 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
1095 if (!lli_array) {
1096 error = -ENOMEM;
1097 goto end_function;
1099 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
1100 if (!map_array) {
1101 error = -ENOMEM;
1102 goto end_function_with_error;
1105 map_array[0].dma_addr =
1106 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
1107 data_size, DMA_BIDIRECTIONAL);
1108 map_array[0].size = data_size;
1112 * Set the start address of the first page - app data may start not at
1113 * the beginning of the page
1115 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
1116 lli_array[0].block_size = map_array[0].size;
1118 dev_dbg(&sep->pdev->dev,
1119 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1120 (unsigned long)lli_array[0].bus_address,
1121 lli_array[0].block_size);
1123 /* Set the output parameters */
1124 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1125 *lli_array_ptr = lli_array;
1126 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
1127 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
1128 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1129 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
1130 } else {
1131 *lli_array_ptr = lli_array;
1132 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
1133 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = 0;
1134 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1135 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
1137 goto end_function;
1139 end_function_with_error:
1140 kfree(lli_array);
1142 end_function:
1143 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
1144 return error;
1148 * sep_lock_user_pages - lock and map user pages for DMA
1149 * @sep: pointer to struct sep_device
1150 * @app_virt_addr: user memory data buffer
1151 * @data_size: size of data buffer
1152 * @lli_array_ptr: lli array
1153 * @in_out_flag: input or output to device
1155 * This function locks all the physical pages of the application
1156 * virtual buffer and construct a basic lli array, where each entry
1157 * holds the physical page address and the size that application
1158 * data holds in this physical pages
1160 static int sep_lock_user_pages(struct sep_device *sep,
1161 u32 app_virt_addr,
1162 u32 data_size,
1163 struct sep_lli_entry **lli_array_ptr,
1164 int in_out_flag)
1167 int error = 0;
1168 u32 count;
1169 int result;
1170 /* The the page of the end address of the user space buffer */
1171 u32 end_page;
1172 /* The page of the start address of the user space buffer */
1173 u32 start_page;
1174 /* The range in pages */
1175 u32 num_pages;
1176 /* Array of pointers to page */
1177 struct page **page_array;
1178 /* Array of lli */
1179 struct sep_lli_entry *lli_array;
1180 /* Map array */
1181 struct sep_dma_map *map_array;
1182 /* Direction of the DMA mapping for locked pages */
1183 enum dma_data_direction dir;
1185 dev_dbg(&sep->pdev->dev,
1186 "sep_lock_user_pages start\n");
1188 /* Set start and end pages and num pages */
1189 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1190 start_page = app_virt_addr >> PAGE_SHIFT;
1191 num_pages = end_page - start_page + 1;
1193 dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
1194 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1195 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1196 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1197 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1199 dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
1201 /* Allocate array of pages structure pointers */
1202 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1203 if (!page_array) {
1204 error = -ENOMEM;
1205 goto end_function;
1207 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1208 if (!map_array) {
1209 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1210 error = -ENOMEM;
1211 goto end_function_with_error1;
1214 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1215 GFP_ATOMIC);
1217 if (!lli_array) {
1218 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1219 error = -ENOMEM;
1220 goto end_function_with_error2;
1223 dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
1225 /* Convert the application virtual address into a set of physical */
1226 down_read(&current->mm->mmap_sem);
1227 result = get_user_pages(current, current->mm, app_virt_addr,
1228 num_pages,
1229 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1230 0, page_array, 0);
1232 up_read(&current->mm->mmap_sem);
1234 /* Check the number of pages locked - if not all then exit with error */
1235 if (result != num_pages) {
1236 dev_warn(&sep->pdev->dev,
1237 "not all pages locked by get_user_pages\n");
1238 error = -ENOMEM;
1239 goto end_function_with_error3;
1242 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1244 /* Set direction */
1245 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1246 dir = DMA_TO_DEVICE;
1247 else
1248 dir = DMA_FROM_DEVICE;
1251 * Fill the array using page array data and
1252 * map the pages - this action will also flush the cache as needed
1254 for (count = 0; count < num_pages; count++) {
1255 /* Fill the map array */
1256 map_array[count].dma_addr =
1257 dma_map_page(&sep->pdev->dev, page_array[count],
1258 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1260 map_array[count].size = PAGE_SIZE;
1262 /* Fill the lli array entry */
1263 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1264 lli_array[count].block_size = PAGE_SIZE;
1266 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1267 count, (unsigned long)lli_array[count].bus_address,
1268 count, lli_array[count].block_size);
1271 /* Check the offset for the first page */
1272 lli_array[0].bus_address =
1273 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1275 /* Check that not all the data is in the first page only */
1276 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1277 lli_array[0].block_size = data_size;
1278 else
1279 lli_array[0].block_size =
1280 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1282 dev_dbg(&sep->pdev->dev,
1283 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1284 (unsigned long)lli_array[count].bus_address,
1285 lli_array[count].block_size);
1287 /* Check the size of the last page */
1288 if (num_pages > 1) {
1289 lli_array[num_pages - 1].block_size =
1290 (app_virt_addr + data_size) & (~PAGE_MASK);
1292 dev_warn(&sep->pdev->dev,
1293 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1294 num_pages - 1,
1295 (unsigned long)lli_array[count].bus_address,
1296 num_pages - 1,
1297 lli_array[count].block_size);
1300 /* Set output params acording to the in_out flag */
1301 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1302 *lli_array_ptr = lli_array;
1303 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1304 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1305 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1306 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1307 num_pages;
1308 } else {
1309 *lli_array_ptr = lli_array;
1310 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1311 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1312 page_array;
1313 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1314 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1315 num_pages;
1317 goto end_function;
1319 end_function_with_error3:
1320 /* Free lli array */
1321 kfree(lli_array);
1323 end_function_with_error2:
1324 kfree(map_array);
1326 end_function_with_error1:
1327 /* Free page array */
1328 kfree(page_array);
1330 end_function:
1331 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
1332 return error;
1336 * u32 sep_calculate_lli_table_max_size - size the LLI table
1337 * @sep: pointer to struct sep_device
1338 * @lli_in_array_ptr
1339 * @num_array_entries
1340 * @last_table_flag
1342 * This function calculates the size of data that can be inserted into
1343 * the lli table from this array, such that either the table is full
1344 * (all entries are entered), or there are no more entries in the
1345 * lli array
1347 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1348 struct sep_lli_entry *lli_in_array_ptr,
1349 u32 num_array_entries,
1350 u32 *last_table_flag)
1352 u32 counter;
1353 /* Table data size */
1354 u32 table_data_size = 0;
1355 /* Data size for the next table */
1356 u32 next_table_data_size;
1358 *last_table_flag = 0;
1361 * Calculate the data in the out lli table till we fill the whole
1362 * table or till the data has ended
1364 for (counter = 0;
1365 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1366 (counter < num_array_entries); counter++)
1367 table_data_size += lli_in_array_ptr[counter].block_size;
1370 * Check if we reached the last entry,
1371 * meaning this ia the last table to build,
1372 * and no need to check the block alignment
1374 if (counter == num_array_entries) {
1375 /* Set the last table flag */
1376 *last_table_flag = 1;
1377 goto end_function;
1381 * Calculate the data size of the next table.
1382 * Stop if no entries left or if data size is more the DMA restriction
1384 next_table_data_size = 0;
1385 for (; counter < num_array_entries; counter++) {
1386 next_table_data_size += lli_in_array_ptr[counter].block_size;
1387 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1388 break;
1392 * Check if the next table data size is less then DMA rstriction.
1393 * if it is - recalculate the current table size, so that the next
1394 * table data size will be adaquete for DMA
1396 if (next_table_data_size &&
1397 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1399 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1400 next_table_data_size);
1402 dev_dbg(&sep->pdev->dev, "table data size is %x\n",
1403 table_data_size);
1404 end_function:
1405 return table_data_size;
1409 * sep_build_lli_table - build an lli array for the given table
1410 * @sep: pointer to struct sep_device
1411 * @lli_array_ptr: pointer to lli array
1412 * @lli_table_ptr: pointer to lli table
1413 * @num_processed_entries_ptr: pointer to number of entries
1414 * @num_table_entries_ptr: pointer to number of tables
1415 * @table_data_size: total data size
1417 * Builds ant lli table from the lli_array according to
1418 * the given size of data
1420 static void sep_build_lli_table(struct sep_device *sep,
1421 struct sep_lli_entry *lli_array_ptr,
1422 struct sep_lli_entry *lli_table_ptr,
1423 u32 *num_processed_entries_ptr,
1424 u32 *num_table_entries_ptr,
1425 u32 table_data_size)
1427 /* Current table data size */
1428 u32 curr_table_data_size;
1429 /* Counter of lli array entry */
1430 u32 array_counter;
1432 dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
1434 /* Init currrent table data size and lli array entry counter */
1435 curr_table_data_size = 0;
1436 array_counter = 0;
1437 *num_table_entries_ptr = 1;
1439 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
1441 /* Fill the table till table size reaches the needed amount */
1442 while (curr_table_data_size < table_data_size) {
1443 /* Update the number of entries in table */
1444 (*num_table_entries_ptr)++;
1446 lli_table_ptr->bus_address =
1447 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1449 lli_table_ptr->block_size =
1450 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1452 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1454 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1455 lli_table_ptr);
1456 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1457 (unsigned long)lli_table_ptr->bus_address);
1458 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1459 lli_table_ptr->block_size);
1461 /* Check for overflow of the table data */
1462 if (curr_table_data_size > table_data_size) {
1463 dev_dbg(&sep->pdev->dev,
1464 "curr_table_data_size too large\n");
1466 /* Update the size of block in the table */
1467 lli_table_ptr->block_size -=
1468 cpu_to_le32((curr_table_data_size - table_data_size));
1470 /* Update the physical address in the lli array */
1471 lli_array_ptr[array_counter].bus_address +=
1472 cpu_to_le32(lli_table_ptr->block_size);
1474 /* Update the block size left in the lli array */
1475 lli_array_ptr[array_counter].block_size =
1476 (curr_table_data_size - table_data_size);
1477 } else
1478 /* Advance to the next entry in the lli_array */
1479 array_counter++;
1481 dev_dbg(&sep->pdev->dev,
1482 "lli_table_ptr->bus_address is %08lx\n",
1483 (unsigned long)lli_table_ptr->bus_address);
1484 dev_dbg(&sep->pdev->dev,
1485 "lli_table_ptr->block_size is %x\n",
1486 lli_table_ptr->block_size);
1488 /* Move to the next entry in table */
1489 lli_table_ptr++;
1492 /* Set the info entry to default */
1493 lli_table_ptr->bus_address = 0xffffffff;
1494 lli_table_ptr->block_size = 0;
1496 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
1497 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1498 (unsigned long)lli_table_ptr->bus_address);
1499 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1500 lli_table_ptr->block_size);
1502 /* Set the output parameter */
1503 *num_processed_entries_ptr += array_counter;
1505 dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
1506 *num_processed_entries_ptr);
1508 dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
1512 * sep_shared_area_virt_to_bus - map shared area to bus address
1513 * @sep: pointer to struct sep_device
1514 * @virt_address: virtual address to convert
1516 * This functions returns the physical address inside shared area according
1517 * to the virtual address. It can be either on the externa RAM device
1518 * (ioremapped), or on the system RAM
1519 * This implementation is for the external RAM
1521 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1522 void *virt_address)
1524 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1525 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1526 (unsigned long)
1527 sep->shared_bus + (virt_address - sep->shared_addr));
1529 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1533 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1534 * @sep: pointer to struct sep_device
1535 * @bus_address: bus address to convert
1537 * This functions returns the virtual address inside shared area
1538 * according to the physical address. It can be either on the
1539 * externa RAM device (ioremapped), or on the system RAM
1540 * This implementation is for the external RAM
1542 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1543 dma_addr_t bus_address)
1545 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%x v=%x\n",
1546 (u32)bus_address, (u32)(sep->shared_addr +
1547 (size_t)(bus_address - sep->shared_bus)));
1549 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1553 * sep_debug_print_lli_tables - dump LLI table
1554 * @sep: pointer to struct sep_device
1555 * @lli_table_ptr: pointer to sep_lli_entry
1556 * @num_table_entries: number of entries
1557 * @table_data_size: total data size
1559 * Walk the the list of the print created tables and print all the data
1561 static void sep_debug_print_lli_tables(struct sep_device *sep,
1562 struct sep_lli_entry *lli_table_ptr,
1563 unsigned long num_table_entries,
1564 unsigned long table_data_size)
1566 unsigned long table_count = 1;
1567 unsigned long entries_count = 0;
1569 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1571 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1572 dev_dbg(&sep->pdev->dev,
1573 "lli table %08lx, table_data_size is %lu\n",
1574 table_count, table_data_size);
1575 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1576 num_table_entries);
1578 /* Print entries of the table (without info entry) */
1579 for (entries_count = 0; entries_count < num_table_entries;
1580 entries_count++, lli_table_ptr++) {
1582 dev_dbg(&sep->pdev->dev,
1583 "lli_table_ptr address is %08lx\n",
1584 (unsigned long) lli_table_ptr);
1586 dev_dbg(&sep->pdev->dev,
1587 "phys address is %08lx block size is %x\n",
1588 (unsigned long)lli_table_ptr->bus_address,
1589 lli_table_ptr->block_size);
1591 /* Point to the info entry */
1592 lli_table_ptr--;
1594 dev_dbg(&sep->pdev->dev,
1595 "phys lli_table_ptr->block_size is %x\n",
1596 lli_table_ptr->block_size);
1598 dev_dbg(&sep->pdev->dev,
1599 "phys lli_table_ptr->physical_address is %08lu\n",
1600 (unsigned long)lli_table_ptr->bus_address);
1603 table_data_size = lli_table_ptr->block_size & 0xffffff;
1604 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1605 lli_table_ptr = (struct sep_lli_entry *)
1606 (lli_table_ptr->bus_address);
1608 dev_dbg(&sep->pdev->dev,
1609 "phys table_data_size is %lu num_table_entries is"
1610 " %lu lli_table_ptr is%lu\n", table_data_size,
1611 num_table_entries, (unsigned long)lli_table_ptr);
1613 if ((unsigned long)lli_table_ptr != 0xffffffff)
1614 lli_table_ptr = (struct sep_lli_entry *)
1615 sep_shared_bus_to_virt(sep,
1616 (unsigned long)lli_table_ptr);
1618 table_count++;
1620 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1625 * sep_prepare_empty_lli_table - create a blank LLI table
1626 * @sep: pointer to struct sep_device
1627 * @lli_table_addr_ptr: pointer to lli table
1628 * @num_entries_ptr: pointer to number of entries
1629 * @table_data_size_ptr: point to table data size
1631 * This function creates empty lli tables when there is no data
1633 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1634 dma_addr_t *lli_table_addr_ptr,
1635 u32 *num_entries_ptr,
1636 u32 *table_data_size_ptr)
1638 struct sep_lli_entry *lli_table_ptr;
1640 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1642 /* Find the area for new table */
1643 lli_table_ptr =
1644 (struct sep_lli_entry *)(sep->shared_addr +
1645 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1646 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1647 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1649 lli_table_ptr->bus_address = 0;
1650 lli_table_ptr->block_size = 0;
1652 lli_table_ptr++;
1653 lli_table_ptr->bus_address = 0xFFFFFFFF;
1654 lli_table_ptr->block_size = 0;
1656 /* Set the output parameter value */
1657 *lli_table_addr_ptr = sep->shared_bus +
1658 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1659 sep->num_lli_tables_created *
1660 sizeof(struct sep_lli_entry) *
1661 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1663 /* Set the num of entries and table data size for empty table */
1664 *num_entries_ptr = 2;
1665 *table_data_size_ptr = 0;
1667 /* Update the number of created tables */
1668 sep->num_lli_tables_created++;
1670 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1675 * sep_prepare_input_dma_table - prepare input DMA mappings
1676 * @sep: pointer to struct sep_device
1677 * @data_size:
1678 * @block_size:
1679 * @lli_table_ptr:
1680 * @num_entries_ptr:
1681 * @table_data_size_ptr:
1682 * @is_kva: set for kernel data (kernel cryptio call)
1684 * This function prepares only input DMA table for synhronic symmetric
1685 * operations (HASH)
1686 * Note that all bus addresses that are passed to the SEP
1687 * are in 32 bit format; the SEP is a 32 bit device
1689 static int sep_prepare_input_dma_table(struct sep_device *sep,
1690 unsigned long app_virt_addr,
1691 u32 data_size,
1692 u32 block_size,
1693 dma_addr_t *lli_table_ptr,
1694 u32 *num_entries_ptr,
1695 u32 *table_data_size_ptr,
1696 bool is_kva)
1698 int error = 0;
1699 /* Pointer to the info entry of the table - the last entry */
1700 struct sep_lli_entry *info_entry_ptr;
1701 /* Array of pointers to page */
1702 struct sep_lli_entry *lli_array_ptr;
1703 /* Points to the first entry to be processed in the lli_in_array */
1704 u32 current_entry = 0;
1705 /* Num entries in the virtual buffer */
1706 u32 sep_lli_entries = 0;
1707 /* Lli table pointer */
1708 struct sep_lli_entry *in_lli_table_ptr;
1709 /* The total data in one table */
1710 u32 table_data_size = 0;
1711 /* Flag for last table */
1712 u32 last_table_flag = 0;
1713 /* Number of entries in lli table */
1714 u32 num_entries_in_table = 0;
1715 /* Next table address */
1716 u32 lli_table_alloc_addr = 0;
1718 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
1719 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1720 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1722 /* Initialize the pages pointers */
1723 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
1724 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1726 /* Set the kernel address for first table to be allocated */
1727 lli_table_alloc_addr = (u32)(sep->shared_addr +
1728 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1729 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1730 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1732 if (data_size == 0) {
1733 /* Special case - create meptu table - 2 entries, zero data */
1734 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1735 num_entries_ptr, table_data_size_ptr);
1736 goto update_dcb_counter;
1739 /* Check if the pages are in Kernel Virtual Address layout */
1740 if (is_kva == true)
1741 /* Lock the pages in the kernel */
1742 error = sep_lock_kernel_pages(sep, app_virt_addr,
1743 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1744 else
1746 * Lock the pages of the user buffer
1747 * and translate them to pages
1749 error = sep_lock_user_pages(sep, app_virt_addr,
1750 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1752 if (error)
1753 goto end_function;
1755 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1756 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1758 current_entry = 0;
1759 info_entry_ptr = 0;
1761 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1763 /* Loop till all the entries in in array are not processed */
1764 while (current_entry < sep_lli_entries) {
1766 /* Set the new input and output tables */
1767 in_lli_table_ptr =
1768 (struct sep_lli_entry *)lli_table_alloc_addr;
1770 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1771 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1773 if (lli_table_alloc_addr >
1774 ((u32)sep->shared_addr +
1775 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1776 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1778 error = -ENOMEM;
1779 goto end_function_error;
1783 /* Update the number of created tables */
1784 sep->num_lli_tables_created++;
1786 /* Calculate the maximum size of data for input table */
1787 table_data_size = sep_calculate_lli_table_max_size(sep,
1788 &lli_array_ptr[current_entry],
1789 (sep_lli_entries - current_entry),
1790 &last_table_flag);
1793 * If this is not the last table -
1794 * then allign it to the block size
1796 if (!last_table_flag)
1797 table_data_size =
1798 (table_data_size / block_size) * block_size;
1800 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1801 table_data_size);
1803 /* Construct input lli table */
1804 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1805 in_lli_table_ptr,
1806 &current_entry, &num_entries_in_table, table_data_size);
1808 if (info_entry_ptr == 0) {
1810 /* Set the output parameters to physical addresses */
1811 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1812 in_lli_table_ptr);
1813 *num_entries_ptr = num_entries_in_table;
1814 *table_data_size_ptr = table_data_size;
1816 dev_dbg(&sep->pdev->dev,
1817 "output lli_table_in_ptr is %08lx\n",
1818 (unsigned long)*lli_table_ptr);
1820 } else {
1821 /* Update the info entry of the previous in table */
1822 info_entry_ptr->bus_address =
1823 sep_shared_area_virt_to_bus(sep,
1824 in_lli_table_ptr);
1825 info_entry_ptr->block_size =
1826 ((num_entries_in_table) << 24) |
1827 (table_data_size);
1829 /* Save the pointer to the info entry of the current tables */
1830 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1832 /* Print input tables */
1833 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1834 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1835 *num_entries_ptr, *table_data_size_ptr);
1836 /* The array of the pages */
1837 kfree(lli_array_ptr);
1839 update_dcb_counter:
1840 /* Update DCB counter */
1841 sep->nr_dcb_creat++;
1842 goto end_function;
1844 end_function_error:
1845 /* Free all the allocated resources */
1846 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1847 kfree(lli_array_ptr);
1848 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1850 end_function:
1851 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
1852 return error;
1856 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1857 * @sep: pointer to struct sep_device
1858 * @lli_in_array:
1859 * @sep_in_lli_entries:
1860 * @lli_out_array:
1861 * @sep_out_lli_entries
1862 * @block_size
1863 * @lli_table_in_ptr
1864 * @lli_table_out_ptr
1865 * @in_num_entries_ptr
1866 * @out_num_entries_ptr
1867 * @table_data_size_ptr
1869 * This function creates the input and output DMA tables for
1870 * symmetric operations (AES/DES) according to the block
1871 * size from LLI arays
1872 * Note that all bus addresses that are passed to the SEP
1873 * are in 32 bit format; the SEP is a 32 bit device
1875 static int sep_construct_dma_tables_from_lli(
1876 struct sep_device *sep,
1877 struct sep_lli_entry *lli_in_array,
1878 u32 sep_in_lli_entries,
1879 struct sep_lli_entry *lli_out_array,
1880 u32 sep_out_lli_entries,
1881 u32 block_size,
1882 dma_addr_t *lli_table_in_ptr,
1883 dma_addr_t *lli_table_out_ptr,
1884 u32 *in_num_entries_ptr,
1885 u32 *out_num_entries_ptr,
1886 u32 *table_data_size_ptr)
1888 /* Points to the area where next lli table can be allocated */
1889 u32 lli_table_alloc_addr = 0;
1890 /* Input lli table */
1891 struct sep_lli_entry *in_lli_table_ptr = 0;
1892 /* Output lli table */
1893 struct sep_lli_entry *out_lli_table_ptr = 0;
1894 /* Pointer to the info entry of the table - the last entry */
1895 struct sep_lli_entry *info_in_entry_ptr = 0;
1896 /* Pointer to the info entry of the table - the last entry */
1897 struct sep_lli_entry *info_out_entry_ptr = 0;
1898 /* Points to the first entry to be processed in the lli_in_array */
1899 u32 current_in_entry = 0;
1900 /* Points to the first entry to be processed in the lli_out_array */
1901 u32 current_out_entry = 0;
1902 /* Max size of the input table */
1903 u32 in_table_data_size = 0;
1904 /* Max size of the output table */
1905 u32 out_table_data_size = 0;
1906 /* Flag te signifies if this is the last tables build */
1907 u32 last_table_flag = 0;
1908 /* The data size that should be in table */
1909 u32 table_data_size = 0;
1910 /* Number of etnries in the input table */
1911 u32 num_entries_in_table = 0;
1912 /* Number of etnries in the output table */
1913 u32 num_entries_out_table = 0;
1915 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
1917 /* Initiate to point after the message area */
1918 lli_table_alloc_addr = (u32)(sep->shared_addr +
1919 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1920 (sep->num_lli_tables_created *
1921 (sizeof(struct sep_lli_entry) *
1922 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1924 /* Loop till all the entries in in array are not processed */
1925 while (current_in_entry < sep_in_lli_entries) {
1926 /* Set the new input and output tables */
1927 in_lli_table_ptr =
1928 (struct sep_lli_entry *)lli_table_alloc_addr;
1930 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1931 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1933 /* Set the first output tables */
1934 out_lli_table_ptr =
1935 (struct sep_lli_entry *)lli_table_alloc_addr;
1937 /* Check if the DMA table area limit was overrun */
1938 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1939 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1940 ((u32)sep->shared_addr +
1941 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1942 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1944 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1945 return -ENOMEM;
1948 /* Update the number of the lli tables created */
1949 sep->num_lli_tables_created += 2;
1951 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1952 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1954 /* Calculate the maximum size of data for input table */
1955 in_table_data_size =
1956 sep_calculate_lli_table_max_size(sep,
1957 &lli_in_array[current_in_entry],
1958 (sep_in_lli_entries - current_in_entry),
1959 &last_table_flag);
1961 /* Calculate the maximum size of data for output table */
1962 out_table_data_size =
1963 sep_calculate_lli_table_max_size(sep,
1964 &lli_out_array[current_out_entry],
1965 (sep_out_lli_entries - current_out_entry),
1966 &last_table_flag);
1968 dev_dbg(&sep->pdev->dev,
1969 "in_table_data_size is %x\n",
1970 in_table_data_size);
1972 dev_dbg(&sep->pdev->dev,
1973 "out_table_data_size is %x\n",
1974 out_table_data_size);
1976 table_data_size = in_table_data_size;
1978 if (!last_table_flag) {
1980 * If this is not the last table,
1981 * then must check where the data is smallest
1982 * and then align it to the block size
1984 if (table_data_size > out_table_data_size)
1985 table_data_size = out_table_data_size;
1988 * Now calculate the table size so that
1989 * it will be module block size
1991 table_data_size = (table_data_size / block_size) *
1992 block_size;
1995 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
1996 table_data_size);
1998 /* Construct input lli table */
1999 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2000 in_lli_table_ptr,
2001 &current_in_entry,
2002 &num_entries_in_table,
2003 table_data_size);
2005 /* Construct output lli table */
2006 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2007 out_lli_table_ptr,
2008 &current_out_entry,
2009 &num_entries_out_table,
2010 table_data_size);
2012 /* If info entry is null - this is the first table built */
2013 if (info_in_entry_ptr == 0) {
2014 /* Set the output parameters to physical addresses */
2015 *lli_table_in_ptr =
2016 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
2018 *in_num_entries_ptr = num_entries_in_table;
2020 *lli_table_out_ptr =
2021 sep_shared_area_virt_to_bus(sep,
2022 out_lli_table_ptr);
2024 *out_num_entries_ptr = num_entries_out_table;
2025 *table_data_size_ptr = table_data_size;
2027 dev_dbg(&sep->pdev->dev,
2028 "output lli_table_in_ptr is %08lx\n",
2029 (unsigned long)*lli_table_in_ptr);
2030 dev_dbg(&sep->pdev->dev,
2031 "output lli_table_out_ptr is %08lx\n",
2032 (unsigned long)*lli_table_out_ptr);
2033 } else {
2034 /* Update the info entry of the previous in table */
2035 info_in_entry_ptr->bus_address =
2036 sep_shared_area_virt_to_bus(sep,
2037 in_lli_table_ptr);
2039 info_in_entry_ptr->block_size =
2040 ((num_entries_in_table) << 24) |
2041 (table_data_size);
2043 /* Update the info entry of the previous in table */
2044 info_out_entry_ptr->bus_address =
2045 sep_shared_area_virt_to_bus(sep,
2046 out_lli_table_ptr);
2048 info_out_entry_ptr->block_size =
2049 ((num_entries_out_table) << 24) |
2050 (table_data_size);
2052 dev_dbg(&sep->pdev->dev,
2053 "output lli_table_in_ptr:%08lx %08x\n",
2054 (unsigned long)info_in_entry_ptr->bus_address,
2055 info_in_entry_ptr->block_size);
2057 dev_dbg(&sep->pdev->dev,
2058 "output lli_table_out_ptr:%08lx %08x\n",
2059 (unsigned long)info_out_entry_ptr->bus_address,
2060 info_out_entry_ptr->block_size);
2063 /* Save the pointer to the info entry of the current tables */
2064 info_in_entry_ptr = in_lli_table_ptr +
2065 num_entries_in_table - 1;
2066 info_out_entry_ptr = out_lli_table_ptr +
2067 num_entries_out_table - 1;
2069 dev_dbg(&sep->pdev->dev,
2070 "output num_entries_out_table is %x\n",
2071 (u32)num_entries_out_table);
2072 dev_dbg(&sep->pdev->dev,
2073 "output info_in_entry_ptr is %lx\n",
2074 (unsigned long)info_in_entry_ptr);
2075 dev_dbg(&sep->pdev->dev,
2076 "output info_out_entry_ptr is %lx\n",
2077 (unsigned long)info_out_entry_ptr);
2080 /* Print input tables */
2081 sep_debug_print_lli_tables(sep,
2082 (struct sep_lli_entry *)
2083 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2084 *in_num_entries_ptr,
2085 *table_data_size_ptr);
2087 /* Print output tables */
2088 sep_debug_print_lli_tables(sep,
2089 (struct sep_lli_entry *)
2090 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2091 *out_num_entries_ptr,
2092 *table_data_size_ptr);
2094 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
2095 return 0;
2099 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2100 * @app_virt_in_addr:
2101 * @app_virt_out_addr:
2102 * @data_size:
2103 * @block_size:
2104 * @lli_table_in_ptr:
2105 * @lli_table_out_ptr:
2106 * @in_num_entries_ptr:
2107 * @out_num_entries_ptr:
2108 * @table_data_size_ptr:
2109 * @is_kva: set for kernel data; used only for kernel crypto module
2111 * This function builds input and output DMA tables for synhronic
2112 * symmetric operations (AES, DES, HASH). It also checks that each table
2113 * is of the modular block size
2114 * Note that all bus addresses that are passed to the SEP
2115 * are in 32 bit format; the SEP is a 32 bit device
2117 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2118 unsigned long app_virt_in_addr,
2119 unsigned long app_virt_out_addr,
2120 u32 data_size,
2121 u32 block_size,
2122 dma_addr_t *lli_table_in_ptr,
2123 dma_addr_t *lli_table_out_ptr,
2124 u32 *in_num_entries_ptr,
2125 u32 *out_num_entries_ptr,
2126 u32 *table_data_size_ptr,
2127 bool is_kva)
2130 int error = 0;
2131 /* Array of pointers of page */
2132 struct sep_lli_entry *lli_in_array;
2133 /* Array of pointers of page */
2134 struct sep_lli_entry *lli_out_array;
2136 dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
2138 if (data_size == 0) {
2139 /* Prepare empty table for input and output */
2140 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2141 in_num_entries_ptr, table_data_size_ptr);
2143 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2144 out_num_entries_ptr, table_data_size_ptr);
2146 goto update_dcb_counter;
2149 /* Initialize the pages pointers */
2150 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
2151 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = 0;
2153 /* Lock the pages of the buffer and translate them to pages */
2154 if (is_kva == true) {
2155 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2156 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2158 if (error) {
2159 dev_warn(&sep->pdev->dev,
2160 "lock kernel for in failed\n");
2161 goto end_function;
2164 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2165 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2167 if (error) {
2168 dev_warn(&sep->pdev->dev,
2169 "lock kernel for out failed\n");
2170 goto end_function;
2174 else {
2175 error = sep_lock_user_pages(sep, app_virt_in_addr,
2176 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2177 if (error) {
2178 dev_warn(&sep->pdev->dev,
2179 "sep_lock_user_pages for input virtual buffer failed\n");
2180 goto end_function;
2183 error = sep_lock_user_pages(sep, app_virt_out_addr,
2184 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2186 if (error) {
2187 dev_warn(&sep->pdev->dev,
2188 "sep_lock_user_pages for output virtual buffer failed\n");
2189 goto end_function_free_lli_in;
2193 dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
2194 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
2195 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
2196 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
2197 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
2198 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2200 /* Call the fucntion that creates table from the lli arrays */
2201 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
2202 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
2203 lli_out_array,
2204 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
2205 block_size, lli_table_in_ptr, lli_table_out_ptr,
2206 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
2208 if (error) {
2209 dev_warn(&sep->pdev->dev,
2210 "sep_construct_dma_tables_from_lli failed\n");
2211 goto end_function_with_error;
2214 kfree(lli_out_array);
2215 kfree(lli_in_array);
2217 update_dcb_counter:
2218 /* Update DCB counter */
2219 sep->nr_dcb_creat++;
2220 /* Fall through - free the lli entry arrays */
2221 dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
2222 *in_num_entries_ptr);
2223 dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
2224 *out_num_entries_ptr);
2225 dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
2226 *table_data_size_ptr);
2228 goto end_function;
2230 end_function_with_error:
2231 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2232 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2233 kfree(lli_out_array);
2236 end_function_free_lli_in:
2237 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2238 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2239 kfree(lli_in_array);
2241 end_function:
2242 dev_dbg(&sep->pdev->dev,
2243 "sep_prepare_input_output_dma_table end result = %d\n", error);
2245 return error;
2250 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2251 * @app_in_address: unsigned long; for data buffer in (user space)
2252 * @app_out_address: unsigned long; for data buffer out (user space)
2253 * @data_in_size: u32; for size of data
2254 * @block_size: u32; for block size
2255 * @tail_block_size: u32; for size of tail block
2256 * @isapplet: bool; to indicate external app
2257 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2259 * This function prepares the linked DMA tables and puts the
2260 * address for the linked list of tables inta a DCB (data control
2261 * block) the address of which is known by the SEP hardware
2262 * Note that all bus addresses that are passed to the SEP
2263 * are in 32 bit format; the SEP is a 32 bit device
2265 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2266 u32 app_in_address,
2267 u32 app_out_address,
2268 u32 data_in_size,
2269 u32 block_size,
2270 u32 tail_block_size,
2271 bool isapplet,
2272 bool is_kva)
2274 int error = 0;
2275 /* Size of tail */
2276 u32 tail_size = 0;
2277 /* Address of the created DCB table */
2278 struct sep_dcblock *dcb_table_ptr = 0;
2279 /* The physical address of the first input DMA table */
2280 dma_addr_t in_first_mlli_address = 0;
2281 /* Number of entries in the first input DMA table */
2282 u32 in_first_num_entries = 0;
2283 /* The physical address of the first output DMA table */
2284 dma_addr_t out_first_mlli_address = 0;
2285 /* Number of entries in the first output DMA table */
2286 u32 out_first_num_entries = 0;
2287 /* Data in the first input/output table */
2288 u32 first_data_size = 0;
2290 dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
2292 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2293 /* No more DCBs to allocate */
2294 dev_warn(&sep->pdev->dev, "no more DCBs available\n");
2295 error = -ENOSPC;
2296 goto end_function;
2299 /* Allocate new DCB */
2300 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2301 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2302 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2304 /* Set the default values in the DCB */
2305 dcb_table_ptr->input_mlli_address = 0;
2306 dcb_table_ptr->input_mlli_num_entries = 0;
2307 dcb_table_ptr->input_mlli_data_size = 0;
2308 dcb_table_ptr->output_mlli_address = 0;
2309 dcb_table_ptr->output_mlli_num_entries = 0;
2310 dcb_table_ptr->output_mlli_data_size = 0;
2311 dcb_table_ptr->tail_data_size = 0;
2312 dcb_table_ptr->out_vr_tail_pt = 0;
2314 if (isapplet == true) {
2315 tail_size = data_in_size % block_size;
2316 if (tail_size) {
2317 if (data_in_size < tail_block_size) {
2318 dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
2319 error = -ENOSPC;
2320 goto end_function;
2322 if (tail_block_size)
2324 * Case the tail size should be
2325 * bigger than the real block size
2327 tail_size = tail_block_size +
2328 ((data_in_size -
2329 tail_block_size) % block_size);
2332 /* Check if there is enough data for DMA operation */
2333 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2334 if (is_kva == true) {
2335 memcpy(dcb_table_ptr->tail_data,
2336 (void *)app_in_address, data_in_size);
2337 } else {
2338 if (copy_from_user(dcb_table_ptr->tail_data,
2339 (void __user *)app_in_address,
2340 data_in_size)) {
2341 error = -EFAULT;
2342 goto end_function;
2346 dcb_table_ptr->tail_data_size = data_in_size;
2348 /* Set the output user-space address for mem2mem op */
2349 if (app_out_address)
2350 dcb_table_ptr->out_vr_tail_pt =
2351 (u32)app_out_address;
2354 * Update both data length parameters in order to avoid
2355 * second data copy and allow building of empty mlli
2356 * tables
2358 tail_size = 0x0;
2359 data_in_size = 0x0;
2361 if (tail_size) {
2362 if (is_kva == true) {
2363 memcpy(dcb_table_ptr->tail_data,
2364 (void *)(app_in_address + data_in_size -
2365 tail_size), tail_size);
2366 } else {
2367 /* We have tail data - copy it to DCB */
2368 if (copy_from_user(dcb_table_ptr->tail_data,
2369 (void *)(app_in_address +
2370 data_in_size - tail_size), tail_size)) {
2371 error = -EFAULT;
2372 goto end_function;
2375 if (app_out_address)
2377 * Calculate the output address
2378 * according to tail data size
2380 dcb_table_ptr->out_vr_tail_pt =
2381 app_out_address + data_in_size
2382 - tail_size;
2384 /* Save the real tail data size */
2385 dcb_table_ptr->tail_data_size = tail_size;
2387 * Update the data size without the tail
2388 * data size AKA data for the dma
2390 data_in_size = (data_in_size - tail_size);
2393 /* Check if we need to build only input table or input/output */
2394 if (app_out_address) {
2395 /* Prepare input/output tables */
2396 error = sep_prepare_input_output_dma_table(sep,
2397 app_in_address,
2398 app_out_address,
2399 data_in_size,
2400 block_size,
2401 &in_first_mlli_address,
2402 &out_first_mlli_address,
2403 &in_first_num_entries,
2404 &out_first_num_entries,
2405 &first_data_size,
2406 is_kva);
2407 } else {
2408 /* Prepare input tables */
2409 error = sep_prepare_input_dma_table(sep,
2410 app_in_address,
2411 data_in_size,
2412 block_size,
2413 &in_first_mlli_address,
2414 &in_first_num_entries,
2415 &first_data_size,
2416 is_kva);
2419 if (error) {
2420 dev_warn(&sep->pdev->dev, "prepare DMA table call failed from prepare DCB call\n");
2421 goto end_function;
2424 /* Set the DCB values */
2425 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2426 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2427 dcb_table_ptr->input_mlli_data_size = first_data_size;
2428 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2429 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2430 dcb_table_ptr->output_mlli_data_size = first_data_size;
2432 end_function:
2433 dev_dbg(&sep->pdev->dev,
2434 "sep_prepare_input_output_dma_table_in_dcb end\n");
2435 return error;
2441 * sep_create_sync_dma_tables_handler - create sync DMA tables
2442 * @sep: pointer to struct sep_device
2443 * @arg: pointer to struct bld_syn_tab_struct
2445 * Handle the request for creation of the DMA tables for the synchronic
2446 * symmetric operations (AES,DES). Note that all bus addresses that are
2447 * passed to the SEP are in 32 bit format; the SEP is a 32 bit device
2449 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
2450 unsigned long arg)
2452 int error = 0;
2454 /* Command arguments */
2455 struct bld_syn_tab_struct command_args;
2457 dev_dbg(&sep->pdev->dev,
2458 "sep_create_sync_dma_tables_handler start\n");
2460 if (copy_from_user(&command_args, (void __user *)arg,
2461 sizeof(struct bld_syn_tab_struct))) {
2462 error = -EFAULT;
2463 goto end_function;
2466 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2467 command_args.app_in_address);
2468 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2469 command_args.app_out_address);
2470 dev_dbg(&sep->pdev->dev, "data_size is %u\n",
2471 command_args.data_in_size);
2472 dev_dbg(&sep->pdev->dev, "block_size is %u\n",
2473 command_args.block_size);
2475 /* Validate user parameters */
2476 if (!command_args.app_in_address) {
2477 error = -EINVAL;
2478 goto end_function;
2481 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2482 command_args.app_in_address,
2483 command_args.app_out_address,
2484 command_args.data_in_size,
2485 command_args.block_size,
2486 0x0,
2487 false,
2488 false);
2490 end_function:
2491 dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
2492 return error;
2496 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2497 * @sep: pointer to struct sep_device
2498 * @isapplet: indicates external application (used for kernel access)
2499 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2501 * This function frees the DMA tables and DCB
2503 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2504 bool is_kva)
2506 int i = 0;
2507 int error = 0;
2508 int error_temp = 0;
2509 struct sep_dcblock *dcb_table_ptr;
2511 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
2513 if (isapplet == true) {
2514 /* Set pointer to first DCB table */
2515 dcb_table_ptr = (struct sep_dcblock *)
2516 (sep->shared_addr +
2517 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2519 /* Go over each DCB and see if tail pointer must be updated */
2520 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2521 if (dcb_table_ptr->out_vr_tail_pt) {
2522 if (is_kva == true) {
2523 memcpy((void *)dcb_table_ptr->out_vr_tail_pt,
2524 dcb_table_ptr->tail_data,
2525 dcb_table_ptr->tail_data_size);
2526 } else {
2527 error_temp = copy_to_user(
2528 (void *)dcb_table_ptr->out_vr_tail_pt,
2529 dcb_table_ptr->tail_data,
2530 dcb_table_ptr->tail_data_size);
2532 if (error_temp) {
2533 /* Release the DMA resource */
2534 error = -EFAULT;
2535 break;
2540 /* Free the output pages, if any */
2541 sep_free_dma_table_data_handler(sep);
2543 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
2544 return error;
2548 * sep_get_static_pool_addr_handler - get static pool address
2549 * @sep: pointer to struct sep_device
2550 * @arg: parameters from user space application
2552 * This function sets the bus and virtual addresses of the static pool
2553 * and returns the virtual address
2555 static int sep_get_static_pool_addr_handler(struct sep_device *sep,
2556 unsigned long arg)
2558 struct stat_pool_addr_struct command_args;
2559 u32 *static_pool_addr = 0;
2560 unsigned long addr_hold;
2562 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
2564 static_pool_addr = (u32 *)(sep->shared_addr +
2565 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2567 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2568 static_pool_addr[1] = sep->shared_bus +
2569 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2571 addr_hold = (unsigned long)
2572 (sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES);
2573 command_args.static_virt_address = (aligned_u64)addr_hold;
2575 dev_dbg(&sep->pdev->dev, "static pool: physical %x virtual %x\n",
2576 (u32)static_pool_addr[1],
2577 (u32)command_args.static_virt_address);
2579 /* Send the parameters to user application */
2580 if (copy_to_user((void __user *) arg, &command_args,
2581 sizeof(struct stat_pool_addr_struct)))
2582 return -EFAULT;
2584 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
2586 return 0;
2590 * sep_start_handler - start device
2591 * @sep: pointer to struct sep_device
2593 static int sep_start_handler(struct sep_device *sep)
2595 unsigned long reg_val;
2596 unsigned long error = 0;
2598 dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
2600 /* Wait in polling for message from SEP */
2602 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2603 while (!reg_val);
2605 /* Check the value */
2606 if (reg_val == 0x1)
2607 /* Fatal error - read error status from GPRO */
2608 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2609 dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
2610 return error;
2614 * ep_check_sum_calc - checksum messages
2615 * @data: buffer to checksum
2616 * @length: buffer size
2618 * This function performs a checksum for messages that are sent
2619 * to the SEP.
2621 static u32 sep_check_sum_calc(u8 *data, u32 length)
2623 u32 sum = 0;
2624 u16 *Tdata = (u16 *)data;
2626 while (length > 1) {
2627 /* This is the inner loop */
2628 sum += *Tdata++;
2629 length -= 2;
2632 /* Add left-over byte, if any */
2633 if (length > 0)
2634 sum += *(u8 *)Tdata;
2636 /* Fold 32-bit sum to 16 bits */
2637 while (sum>>16)
2638 sum = (sum & 0xffff) + (sum >> 16);
2640 return ~sum & 0xFFFF;
2644 * sep_init_handler -
2645 * @sep: pointer to struct sep_device
2646 * @arg: parameters from user space application
2648 * Handles the request for SEP initialization
2649 * Note that this will go away for Medfield once the SCU
2650 * SEP initialization is complete
2651 * Also note that the message to the SEP has components
2652 * from user space as well as components written by the driver
2653 * This is becuase the portions of the message that pertain to
2654 * physical addresses must be set by the driver after the message
2655 * leaves custody of the user space application for security
2656 * reasons.
2658 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2660 u32 message_buff[14];
2661 u32 counter;
2662 int error = 0;
2663 u32 reg_val;
2664 dma_addr_t new_base_addr;
2665 unsigned long addr_hold;
2666 struct init_struct command_args;
2668 dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
2670 /* Make sure that we have not initialized already */
2671 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2673 if (reg_val != 0x2) {
2674 error = SEP_ALREADY_INITIALIZED_ERR;
2675 dev_warn(&sep->pdev->dev, "init; device already initialized\n");
2676 goto end_function;
2679 /* Only root can initialize */
2680 if (!capable(CAP_SYS_ADMIN)) {
2681 error = -EACCES;
2682 goto end_function;
2685 /* Copy in the parameters */
2686 error = copy_from_user(&command_args, (void __user *)arg,
2687 sizeof(struct init_struct));
2689 if (error) {
2690 error = -EFAULT;
2691 goto end_function;
2694 /* Validate parameters */
2695 if (!command_args.message_addr || !command_args.sep_sram_addr ||
2696 command_args.message_size_in_words > 14) {
2697 error = -EINVAL;
2698 goto end_function;
2701 /* Copy in the SEP init message */
2702 addr_hold = (unsigned long)command_args.message_addr;
2703 error = copy_from_user(message_buff,
2704 (void __user *)addr_hold,
2705 command_args.message_size_in_words*sizeof(u32));
2707 if (error) {
2708 error = -EFAULT;
2709 goto end_function;
2712 /* Load resident, cache, and extapp firmware */
2713 error = sep_load_firmware(sep);
2715 if (error) {
2716 dev_warn(&sep->pdev->dev,
2717 "init; copy SEP init message failed %x\n", error);
2718 goto end_function;
2721 /* Compute the base address */
2722 new_base_addr = sep->shared_bus;
2724 if (sep->resident_bus < new_base_addr)
2725 new_base_addr = sep->resident_bus;
2727 if (sep->cache_bus < new_base_addr)
2728 new_base_addr = sep->cache_bus;
2730 if (sep->dcache_bus < new_base_addr)
2731 new_base_addr = sep->dcache_bus;
2733 /* Put physical addresses in SEP message */
2734 message_buff[3] = (u32)new_base_addr;
2735 message_buff[4] = (u32)sep->shared_bus;
2736 message_buff[6] = (u32)sep->resident_bus;
2737 message_buff[7] = (u32)sep->cache_bus;
2738 message_buff[8] = (u32)sep->dcache_bus;
2740 message_buff[command_args.message_size_in_words - 1] = 0x0;
2741 message_buff[command_args.message_size_in_words - 1] =
2742 sep_check_sum_calc((u8 *)message_buff,
2743 command_args.message_size_in_words*sizeof(u32));
2745 /* Debug print of message */
2746 for (counter = 0; counter < command_args.message_size_in_words;
2747 counter++)
2748 dev_dbg(&sep->pdev->dev, "init; SEP message word %d is %x\n",
2749 counter, message_buff[counter]);
2751 /* Tell the SEP the sram address */
2752 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
2754 /* Push the message to the SEP */
2755 for (counter = 0; counter < command_args.message_size_in_words;
2756 counter++) {
2757 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
2758 message_buff[counter]);
2759 sep_wait_sram_write(sep);
2762 /* Signal SEP that message is ready and to init */
2763 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2765 /* Wait for acknowledge */
2766 dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
2769 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2770 while (!(reg_val & 0xFFFFFFFD));
2772 if (reg_val == 0x1) {
2773 dev_warn(&sep->pdev->dev, "init; device int failed\n");
2774 error = sep_read_reg(sep, 0x8060);
2775 dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
2776 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2777 dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
2778 goto end_function;
2780 dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
2782 /* Signal SEP to zero the GPR3 */
2783 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
2785 /* Wait for response */
2786 dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
2789 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2790 while (reg_val != 0);
2792 end_function:
2793 dev_dbg(&sep->pdev->dev, "init is done\n");
2794 return error;
2798 * sep_end_transaction_handler - end transaction
2799 * @sep: pointer to struct sep_device
2801 * This API handles the end transaction request
2803 static int sep_end_transaction_handler(struct sep_device *sep)
2805 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
2807 /* Clear the data pool pointers Token */
2808 memset((void *)(sep->shared_addr +
2809 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2810 0, sep->num_of_data_allocations*2*sizeof(u32));
2812 /* Check that all the DMA resources were freed */
2813 sep_free_dma_table_data_handler(sep);
2815 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2818 * We are now through with the transaction. Let's
2819 * allow other processes who have the device open
2820 * to perform transactions
2822 mutex_lock(&sep->sep_mutex);
2823 sep->pid_doing_transaction = 0;
2824 mutex_unlock(&sep->sep_mutex);
2825 /* Raise event for stuck contextes */
2826 wake_up(&sep->event);
2828 dev_dbg(&sep->pdev->dev, "waking up event\n");
2829 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
2831 return 0;
2835 * sep_prepare_dcb_handler - prepare a control block
2836 * @sep: pointer to struct sep_device
2837 * @arg: pointer to user parameters
2839 * This function will retrieve the RAR buffer physical addresses, type
2840 * & size corresponding to the RAR handles provided in the buffers vector.
2842 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2844 int error;
2845 /* Command arguments */
2846 struct build_dcb_struct command_args;
2848 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2850 /* Get the command arguments */
2851 if (copy_from_user(&command_args, (void __user *)arg,
2852 sizeof(struct build_dcb_struct))) {
2853 error = -EFAULT;
2854 goto end_function;
2857 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2858 command_args.app_in_address);
2859 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2860 command_args.app_out_address);
2861 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2862 command_args.data_in_size);
2863 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2864 command_args.block_size);
2865 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2866 command_args.tail_block_size);
2868 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2869 command_args.app_in_address, command_args.app_out_address,
2870 command_args.data_in_size, command_args.block_size,
2871 command_args.tail_block_size, true, false);
2873 end_function:
2874 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
2875 return error;
2880 * sep_free_dcb_handler - free control block resources
2881 * @sep: pointer to struct sep_device
2883 * This function frees the DCB resources and updates the needed
2884 * user-space buffers.
2886 static int sep_free_dcb_handler(struct sep_device *sep)
2888 int error ;
2890 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2891 dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
2893 error = sep_free_dma_tables_and_dcb(sep, false, false);
2895 dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
2896 return error;
2900 * sep_rar_prepare_output_msg_handler - prepare an output message
2901 * @sep: pointer to struct sep_device
2902 * @arg: pointer to user parameters
2904 * This function will retrieve the RAR buffer physical addresses, type
2905 * & size corresponding to the RAR handles provided in the buffers vector.
2907 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2908 unsigned long arg)
2910 int error = 0;
2911 /* Command args */
2912 struct rar_hndl_to_bus_struct command_args;
2913 struct RAR_buffer rar_buf;
2914 /* Bus address */
2915 dma_addr_t rar_bus = 0;
2916 /* Holds the RAR address in the system memory offset */
2917 u32 *rar_addr;
2919 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2921 /* Copy the data */
2922 if (copy_from_user(&command_args, (void __user *)arg,
2923 sizeof(command_args))) {
2924 error = -EFAULT;
2925 goto end_function;
2928 /* Call to translation function only if user handle is not NULL */
2929 if (command_args.rar_handle) {
2930 memset(&rar_buf, 0, sizeof(rar_buf));
2931 rar_buf.info.handle = (u32)command_args.rar_handle;
2933 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2934 dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
2935 error = -EFAULT;
2936 goto end_function;
2938 rar_bus = rar_buf.bus_address;
2940 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2942 /* Set value in the SYSTEM MEMORY offset */
2943 rar_addr = (u32 *)(sep->shared_addr +
2944 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2946 /* Copy the physical address to the System Area for the SEP */
2947 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2948 rar_addr[1] = rar_bus;
2950 end_function:
2951 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2952 return error;
2956 * sep_realloc_ext_cache_handler - report location of extcache
2957 * @sep: pointer to struct sep_device
2958 * @arg: pointer to user parameters
2960 * This function tells the SEP where the extapp is located
2962 static int sep_realloc_ext_cache_handler(struct sep_device *sep,
2963 unsigned long arg)
2965 /* Holds the new ext cache address in the system memory offset */
2966 u32 *system_addr;
2968 /* Set value in the SYSTEM MEMORY offset */
2969 system_addr = (u32 *)(sep->shared_addr +
2970 SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
2972 /* Copy the physical address to the System Area for the SEP */
2973 system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
2974 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
2975 system_addr[0]);
2976 system_addr[1] = sep->extapp_bus;
2977 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
2978 system_addr[1]);
2980 return 0;
2984 * sep_ioctl - ioctl api
2985 * @filp: pointer to struct file
2986 * @cmd: command
2987 * @arg: pointer to argument structure
2989 * Implement the ioctl methods availble on the SEP device.
2991 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2993 int error = 0;
2994 struct sep_device *sep = filp->private_data;
2996 dev_dbg(&sep->pdev->dev, "ioctl start\n");
2998 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
2999 dev_dbg(&sep->pdev->dev,
3000 "SEP_IOCSENDSEPCOMMAND is %x\n", SEP_IOCSENDSEPCOMMAND);
3001 dev_dbg(&sep->pdev->dev,
3002 "SEP_IOCALLOCDATAPOLL is %x\n", SEP_IOCALLOCDATAPOLL);
3003 dev_dbg(&sep->pdev->dev,
3004 "SEP_IOCCREATESYMDMATABLE is %x\n", SEP_IOCCREATESYMDMATABLE);
3005 dev_dbg(&sep->pdev->dev,
3006 "SEP_IOCFREEDMATABLEDATA is %x\n", SEP_IOCFREEDMATABLEDATA);
3007 dev_dbg(&sep->pdev->dev,
3008 "SEP_IOCSEPSTART is %x\n", SEP_IOCSEPSTART);
3009 dev_dbg(&sep->pdev->dev,
3010 "SEP_IOCSEPINIT is %x\n", SEP_IOCSEPINIT);
3011 dev_dbg(&sep->pdev->dev,
3012 "SEP_IOCGETSTATICPOOLADDR is %x\n", SEP_IOCGETSTATICPOOLADDR);
3013 dev_dbg(&sep->pdev->dev,
3014 "SEP_IOCENDTRANSACTION is %x\n", SEP_IOCENDTRANSACTION);
3015 dev_dbg(&sep->pdev->dev,
3016 "SEP_IOCREALLOCEXTCACHE is %x\n", SEP_IOCREALLOCEXTCACHE);
3017 dev_dbg(&sep->pdev->dev,
3018 "SEP_IOCRARPREPAREMESSAGE is %x\n", SEP_IOCRARPREPAREMESSAGE);
3019 dev_dbg(&sep->pdev->dev,
3020 "SEP_IOCPREPAREDCB is %x\n", SEP_IOCPREPAREDCB);
3021 dev_dbg(&sep->pdev->dev,
3022 "SEP_IOCFREEDCB is %x\n", SEP_IOCFREEDCB);
3024 /* Make sure we own this device */
3025 mutex_lock(&sep->sep_mutex);
3026 if ((current->pid != sep->pid_doing_transaction) &&
3027 (sep->pid_doing_transaction != 0)) {
3028 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
3029 mutex_unlock(&sep->sep_mutex);
3030 error = -EACCES;
3031 goto end_function;
3034 mutex_unlock(&sep->sep_mutex);
3036 /* Check that the command is for SEP device */
3037 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3038 error = -ENOTTY;
3039 goto end_function;
3042 /* Lock to prevent the daemon to interfere with operation */
3043 mutex_lock(&sep->ioctl_mutex);
3045 switch (cmd) {
3046 case SEP_IOCSENDSEPCOMMAND:
3047 /* Send command to SEP */
3048 error = sep_send_command_handler(sep);
3049 break;
3050 case SEP_IOCALLOCDATAPOLL:
3051 /* Allocate data pool */
3052 error = sep_allocate_data_pool_memory_handler(sep, arg);
3053 break;
3054 case SEP_IOCCREATESYMDMATABLE:
3055 /* Create DMA table for synhronic operation */
3056 error = sep_create_sync_dma_tables_handler(sep, arg);
3057 break;
3058 case SEP_IOCFREEDMATABLEDATA:
3059 /* Free the pages */
3060 error = sep_free_dma_table_data_handler(sep);
3061 break;
3062 case SEP_IOCSEPSTART:
3063 /* Start command to SEP */
3064 if (sep->pdev->revision == 0) /* Only for old chip */
3065 error = sep_start_handler(sep);
3066 else
3067 error = -EPERM; /* Not permitted on new chip */
3068 break;
3069 case SEP_IOCSEPINIT:
3070 /* Init command to SEP */
3071 if (sep->pdev->revision == 0) /* Only for old chip */
3072 error = sep_init_handler(sep, arg);
3073 else
3074 error = -EPERM; /* Not permitted on new chip */
3075 break;
3076 case SEP_IOCGETSTATICPOOLADDR:
3077 /* Get the physical and virtual addresses of the static pool */
3078 error = sep_get_static_pool_addr_handler(sep, arg);
3079 break;
3080 case SEP_IOCENDTRANSACTION:
3081 error = sep_end_transaction_handler(sep);
3082 break;
3083 case SEP_IOCREALLOCEXTCACHE:
3084 if (sep->mrst)
3085 error = -ENODEV;
3086 if (sep->pdev->revision == 0) /* Only for old chip */
3087 error = sep_realloc_ext_cache_handler(sep, arg);
3088 else
3089 error = -EPERM; /* Not permitted on new chip */
3090 break;
3091 case SEP_IOCRARPREPAREMESSAGE:
3092 error = sep_rar_prepare_output_msg_handler(sep, arg);
3093 break;
3094 case SEP_IOCPREPAREDCB:
3095 error = sep_prepare_dcb_handler(sep, arg);
3096 break;
3097 case SEP_IOCFREEDCB:
3098 error = sep_free_dcb_handler(sep);
3099 break;
3100 default:
3101 dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
3102 error = -ENOTTY;
3103 break;
3105 mutex_unlock(&sep->ioctl_mutex);
3107 end_function:
3108 dev_dbg(&sep->pdev->dev, "ioctl end\n");
3109 return error;
3113 * sep_singleton_ioctl - ioctl api for singleton interface
3114 * @filp: pointer to struct file
3115 * @cmd: command
3116 * @arg: pointer to argument structure
3118 * Implement the additional ioctls for the singleton device
3120 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
3122 long error = 0;
3123 struct sep_device *sep = filp->private_data;
3125 dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
3126 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
3128 /* Check that the command is for the SEP device */
3129 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3130 error = -ENOTTY;
3131 goto end_function;
3134 /* Make sure we own this device */
3135 mutex_lock(&sep->sep_mutex);
3136 if ((current->pid != sep->pid_doing_transaction) &&
3137 (sep->pid_doing_transaction != 0)) {
3138 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
3139 mutex_unlock(&sep->sep_mutex);
3140 error = -EACCES;
3141 goto end_function;
3144 mutex_unlock(&sep->sep_mutex);
3146 switch (cmd) {
3147 case SEP_IOCTLSETCALLERID:
3148 mutex_lock(&sep->ioctl_mutex);
3149 error = sep_set_caller_id_handler(sep, arg);
3150 mutex_unlock(&sep->ioctl_mutex);
3151 break;
3152 default:
3153 error = sep_ioctl(filp, cmd, arg);
3154 break;
3157 end_function:
3158 dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
3159 return error;
3163 * sep_request_daemon_ioctl - ioctl for daemon
3164 * @filp: pointer to struct file
3165 * @cmd: command
3166 * @arg: pointer to argument structure
3168 * Called by the request daemon to perform ioctls on the daemon device
3170 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
3171 unsigned long arg)
3174 long error;
3175 struct sep_device *sep = filp->private_data;
3177 dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
3178 dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
3180 /* Check that the command is for SEP device */
3181 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3182 error = -ENOTTY;
3183 goto end_function;
3186 /* Only one process can access ioctl at any given time */
3187 mutex_lock(&sep->ioctl_mutex);
3189 switch (cmd) {
3190 case SEP_IOCSENDSEPRPLYCOMMAND:
3191 /* Send reply command to SEP */
3192 error = sep_req_daemon_send_reply_command_handler(sep);
3193 break;
3194 case SEP_IOCENDTRANSACTION:
3196 * End req daemon transaction, do nothing
3197 * will be removed upon update in middleware
3198 * API library
3200 error = 0;
3201 break;
3202 default:
3203 dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
3204 error = -ENOTTY;
3206 mutex_unlock(&sep->ioctl_mutex);
3208 end_function:
3209 dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
3210 return error;
3215 * sep_inthandler - interrupt handler
3216 * @irq: interrupt
3217 * @dev_id: device id
3219 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3221 irqreturn_t int_error = IRQ_HANDLED;
3222 unsigned long lck_flags;
3223 u32 reg_val, reg_val2 = 0;
3224 struct sep_device *sep = dev_id;
3226 /* Read the IRR register to check if this is SEP interrupt */
3227 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3228 dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
3230 if (reg_val & (0x1 << 13)) {
3231 /* Lock and update the counter of reply messages */
3232 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
3233 sep->reply_ct++;
3234 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
3236 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3237 sep->send_ct, sep->reply_ct);
3239 /* Is this printf or daemon request? */
3240 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3241 dev_dbg(&sep->pdev->dev,
3242 "SEP Interrupt - reg2 is %08x\n", reg_val2);
3244 if ((reg_val2 >> 30) & 0x1) {
3245 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3246 wake_up(&sep->event_request_daemon);
3247 } else if (reg_val2 >> 31) {
3248 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3249 wake_up(&sep->event_request_daemon);
3250 } else {
3251 dev_dbg(&sep->pdev->dev, "int: SEP reply\n");
3252 wake_up(&sep->event);
3254 } else {
3255 dev_dbg(&sep->pdev->dev, "int: not SEP interrupt\n");
3256 int_error = IRQ_NONE;
3258 if (int_error == IRQ_HANDLED)
3259 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3261 return int_error;
3265 * sep_callback - RAR callback
3266 * @sep_context_pointer: pointer to struct sep_device
3268 * Function that is called by rar_register when it is ready with
3269 * a region (only for Moorestown)
3271 static int sep_callback(unsigned long sep_context_pointer)
3273 int error;
3274 struct sep_device *sep = (struct sep_device *)sep_context_pointer;
3275 dma_addr_t rar_end_address;
3277 dev_dbg(&sep->pdev->dev, "callback start\n");
3279 error = rar_get_address(RAR_TYPE_IMAGE, &sep->rar_bus,
3280 &rar_end_address);
3282 if (error) {
3283 dev_warn(&sep->pdev->dev, "mrst can't get rar region\n");
3284 goto end_function;
3287 sep->rar_size = (size_t)(rar_end_address - sep->rar_bus + 1);
3289 if (!request_mem_region(sep->rar_bus, sep->rar_size,
3290 "sep_sec_driver")) {
3291 dev_warn(&sep->pdev->dev,
3292 "request mem region for mrst failed\n");
3293 error = -1;
3294 goto end_function;
3297 sep->rar_addr = ioremap_nocache(sep->rar_bus, sep->rar_size);
3298 if (!sep->rar_addr) {
3299 dev_warn(&sep->pdev->dev,
3300 "ioremap nocache for mrst rar failed\n");
3301 error = -ENOMEM;
3302 goto end_function;
3304 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx, size is %x\n",
3305 sep->rar_addr, (unsigned long long)sep->rar_bus,
3306 sep->rar_size);
3308 end_function:
3309 dev_dbg(&sep->pdev->dev, "callback end\n");
3310 return error;
3314 * sep_probe - probe a matching PCI device
3315 * @pdev: pci_device
3316 * @end: pci_device_id
3318 * Attempt to set up and configure a SEP device that has been
3319 * discovered by the PCI layer.
3321 static int __devinit sep_probe(struct pci_dev *pdev,
3322 const struct pci_device_id *ent)
3324 int error = 0;
3325 struct sep_device *sep;
3327 pr_debug("SEP pci probe starting\n");
3328 if (sep_dev != NULL) {
3329 dev_warn(&pdev->dev, "only one SEP supported.\n");
3330 return -EBUSY;
3333 /* Enable the device */
3334 error = pci_enable_device(pdev);
3335 if (error) {
3336 dev_warn(&pdev->dev, "error enabling pci device\n");
3337 goto end_function;
3340 /* Allocate the sep_device structure for this device */
3341 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
3342 if (sep_dev == NULL) {
3343 dev_warn(&pdev->dev,
3344 "can't kmalloc the sep_device structure\n");
3345 return -ENOMEM;
3349 * We're going to use another variable for actually
3350 * working with the device; this way, if we have
3351 * multiple devices in the future, it would be easier
3352 * to make appropriate changes
3354 sep = sep_dev;
3356 sep->pdev = pdev;
3358 if (pdev->device == MRST_PCI_DEVICE_ID)
3359 sep->mrst = 1;
3361 dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
3362 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
3364 /* Set up our register area */
3365 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
3366 if (!sep->reg_physical_addr) {
3367 dev_warn(&sep->pdev->dev, "Error getting register start\n");
3368 pci_dev_put(sep->pdev);
3369 return -ENODEV;
3372 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
3373 if (!sep->reg_physical_end) {
3374 dev_warn(&sep->pdev->dev, "Error getting register end\n");
3375 pci_dev_put(sep->pdev);
3376 return -ENODEV;
3379 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
3380 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
3381 if (!sep->reg_addr) {
3382 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
3383 pci_dev_put(sep->pdev);
3384 return -ENODEV;
3387 dev_dbg(&sep->pdev->dev,
3388 "Register area start %llx end %llx virtual %p\n",
3389 (unsigned long long)sep->reg_physical_addr,
3390 (unsigned long long)sep->reg_physical_end,
3391 sep->reg_addr);
3393 /* Allocate the shared area */
3394 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
3395 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
3396 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
3397 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
3398 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
3400 if (sep_map_and_alloc_shared_area(sep)) {
3401 error = -ENOMEM;
3402 /* Allocation failed */
3403 goto end_function_error;
3406 /* The next section depends on type of unit */
3407 if (sep->mrst) {
3408 error = register_rar(RAR_TYPE_IMAGE, &sep_callback,
3409 (unsigned long)sep);
3410 if (error) {
3411 dev_dbg(&sep->pdev->dev,
3412 "error register_rar\n");
3413 goto end_function_deallocate_sep_shared_area;
3415 } else {
3416 sep->rar_size = FAKE_RAR_SIZE;
3417 sep->rar_addr = dma_alloc_coherent(NULL,
3418 sep->rar_size, &sep->rar_bus, GFP_KERNEL);
3419 if (sep->rar_addr == NULL) {
3420 dev_warn(&sep->pdev->dev, "can't allocate mfld rar\n");
3421 error = -ENOMEM;
3422 goto end_function_deallocate_sep_shared_area;
3425 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
3426 " size is %x\n", sep->rar_addr,
3427 (unsigned long long)sep->rar_bus,
3428 sep->rar_size);
3431 dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
3433 /* Clear ICR register */
3434 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
3436 /* Set the IMR register - open only GPR 2 */
3437 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
3439 dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
3440 /* Get the interrupt line */
3441 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
3442 "sep_driver", sep);
3444 if (!error)
3445 goto end_function;
3447 if (sep->rar_addr)
3448 dma_free_coherent(&sep->pdev->dev, sep->rar_size,
3449 sep->rar_addr, sep->rar_bus);
3450 goto end_function;
3452 end_function_deallocate_sep_shared_area:
3453 /* De-allocate shared area */
3454 sep_unmap_and_free_shared_area(sep);
3456 end_function_error:
3457 iounmap(sep->reg_addr);
3458 kfree(sep_dev);
3459 sep_dev = NULL;
3461 end_function:
3462 return error;
3465 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
3466 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRST_PCI_DEVICE_ID)},
3467 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
3471 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
3473 /* Field for registering driver to PCI device */
3474 static struct pci_driver sep_pci_driver = {
3475 .name = "sep_sec_driver",
3476 .id_table = sep_pci_id_tbl,
3477 .probe = sep_probe
3478 /* FIXME: remove handler */
3481 /* File operation for singleton SEP operations */
3482 static const struct file_operations singleton_file_operations = {
3483 .owner = THIS_MODULE,
3484 .unlocked_ioctl = sep_singleton_ioctl,
3485 .poll = sep_poll,
3486 .open = sep_singleton_open,
3487 .release = sep_singleton_release,
3488 .mmap = sep_mmap,
3491 /* File operation for daemon operations */
3492 static const struct file_operations daemon_file_operations = {
3493 .owner = THIS_MODULE,
3494 .unlocked_ioctl = sep_request_daemon_ioctl,
3495 .poll = sep_request_daemon_poll,
3496 .open = sep_request_daemon_open,
3497 .release = sep_request_daemon_release,
3498 .mmap = sep_request_daemon_mmap,
3501 /* The files operations structure of the driver */
3502 static const struct file_operations sep_file_operations = {
3503 .owner = THIS_MODULE,
3504 .unlocked_ioctl = sep_ioctl,
3505 .poll = sep_poll,
3506 .open = sep_open,
3507 .release = sep_release,
3508 .mmap = sep_mmap,
3512 * sep_reconfig_shared_area - reconfigure shared area
3513 * @sep: pointer to struct sep_device
3515 * Reconfig the shared area between HOST and SEP - needed in case
3516 * the DX_CC_Init function was called before OS loading.
3518 static int sep_reconfig_shared_area(struct sep_device *sep)
3520 int ret_val;
3522 dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
3524 /* Send the new SHARED MESSAGE AREA to the SEP */
3525 dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
3526 (unsigned long long)sep->shared_bus);
3528 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3530 /* Poll for SEP response */
3531 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3533 while (ret_val != 0xffffffff && ret_val != sep->shared_bus)
3534 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3536 /* Check the return value (register) */
3537 if (ret_val != sep->shared_bus) {
3538 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3539 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3540 ret_val = -ENOMEM;
3541 } else
3542 ret_val = 0;
3544 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3545 return ret_val;
3549 * sep_register_driver_to_fs - register misc devices
3550 * @sep: pointer to struct sep_device
3552 * This function registers the driver to the file system
3554 static int sep_register_driver_to_fs(struct sep_device *sep)
3556 int ret_val;
3558 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
3559 sep->miscdev_sep.name = SEP_DEV_NAME;
3560 sep->miscdev_sep.fops = &sep_file_operations;
3562 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
3563 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
3564 sep->miscdev_singleton.fops = &singleton_file_operations;
3566 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
3567 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
3568 sep->miscdev_daemon.fops = &daemon_file_operations;
3570 ret_val = misc_register(&sep->miscdev_sep);
3571 if (ret_val) {
3572 dev_warn(&sep->pdev->dev, "misc reg fails for SEP %x\n",
3573 ret_val);
3574 return ret_val;
3577 ret_val = misc_register(&sep->miscdev_singleton);
3578 if (ret_val) {
3579 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
3580 ret_val);
3581 misc_deregister(&sep->miscdev_sep);
3582 return ret_val;
3585 if (!sep->mrst) {
3586 ret_val = misc_register(&sep->miscdev_daemon);
3587 if (ret_val) {
3588 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
3589 ret_val);
3590 misc_deregister(&sep->miscdev_sep);
3591 misc_deregister(&sep->miscdev_singleton);
3593 return ret_val;
3596 return ret_val;
3600 * sep_init - init function
3602 * Module load time. Register the PCI device driver.
3604 static int __init sep_init(void)
3606 int ret_val = 0;
3607 struct sep_device *sep = NULL;
3609 pr_debug("SEP driver: Init start\n");
3611 ret_val = pci_register_driver(&sep_pci_driver);
3612 if (ret_val) {
3613 pr_debug("sep_driver:sep_driver_to_device failed, ret_val is %d\n",
3614 ret_val);
3615 goto end_function;
3618 sep = sep_dev;
3620 init_waitqueue_head(&sep->event);
3621 init_waitqueue_head(&sep->event_request_daemon);
3622 spin_lock_init(&sep->snd_rply_lck);
3623 mutex_init(&sep->sep_mutex);
3624 mutex_init(&sep->ioctl_mutex);
3626 /* The new chip requires ashared area reconfigure */
3627 if (sep->pdev->revision == 4) { /* Only for new chip */
3628 ret_val = sep_reconfig_shared_area(sep);
3629 if (ret_val)
3630 goto end_function_unregister_pci;
3633 /* Register driver to fs */
3634 ret_val = sep_register_driver_to_fs(sep);
3635 if (ret_val) {
3636 dev_warn(&sep->pdev->dev, "error registering device to file\n");
3637 goto end_function_unregister_pci;
3639 goto end_function;
3641 end_function_unregister_pci:
3642 pci_unregister_driver(&sep_pci_driver);
3644 end_function:
3645 dev_dbg(&sep->pdev->dev, "Init end\n");
3646 return ret_val;
3651 * sep_exit - called to unload driver
3653 * Drop the misc devices then remove and unmap the various resources
3654 * that are not released by the driver remove method.
3656 static void __exit sep_exit(void)
3658 struct sep_device *sep;
3660 sep = sep_dev;
3661 pr_debug("Exit start\n");
3663 /* Unregister from fs */
3664 misc_deregister(&sep->miscdev_sep);
3665 misc_deregister(&sep->miscdev_singleton);
3666 misc_deregister(&sep->miscdev_daemon);
3668 /* Free the irq */
3669 free_irq(sep->pdev->irq, sep);
3671 /* Unregister the driver */
3672 pci_unregister_driver(&sep_pci_driver);
3674 /* Free the shared area */
3675 if (sep_dev) {
3676 sep_unmap_and_free_shared_area(sep_dev);
3677 dev_dbg(&sep->pdev->dev,
3678 "free pages SEP SHARED AREA\n");
3679 iounmap((void *) sep_dev->reg_addr);
3680 dev_dbg(&sep->pdev->dev,
3681 "iounmap\n");
3683 pr_debug("release_mem_region\n");
3684 pr_debug("Exit end\n");
3688 module_init(sep_init);
3689 module_exit(sep_exit);
3691 MODULE_LICENSE("GPL");