Staging: sep: Use kzalloc when needed
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / sep / sep_driver.c
blobff9df36d5461cc674e3fa828abe1be20c55bc446
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
21 * CONTACTS:
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
32 #define DEBUG
33 #include <linux/init.h>
34 #include <linux/module.h>
35 #include <linux/miscdevice.h>
36 #include <linux/fs.h>
37 #include <linux/cdev.h>
38 #include <linux/kdev_t.h>
39 #include <linux/mutex.h>
40 #include <linux/sched.h>
41 #include <linux/mm.h>
42 #include <linux/poll.h>
43 #include <linux/wait.h>
44 #include <linux/pci.h>
45 #include <linux/firmware.h>
46 #include <linux/slab.h>
47 #include <linux/ioctl.h>
48 #include <asm/current.h>
49 #include <linux/ioport.h>
50 #include <linux/io.h>
51 #include <linux/interrupt.h>
52 #include <linux/pagemap.h>
53 #include <asm/cacheflush.h>
54 #include <linux/sched.h>
55 #include <linux/delay.h>
56 #include <linux/rar_register.h>
58 #include "../memrar/memrar.h"
60 #include "sep_driver_hw_defs.h"
61 #include "sep_driver_config.h"
62 #include "sep_driver_api.h"
63 #include "sep_dev.h"
65 /*----------------------------------------
66 DEFINES
67 -----------------------------------------*/
69 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
71 /*--------------------------------------------
72 GLOBAL variables
73 --------------------------------------------*/
75 /* Keep this a single static object for now to keep the conversion easy */
77 static struct sep_device *sep_dev;
79 /**
80 * sep_load_firmware - copy firmware cache/resident
81 * @sep: pointer to struct sep_device we are loading
83 * This functions copies the cache and resident from their source
84 * location into destination shared memory.
86 static int sep_load_firmware(struct sep_device *sep)
88 const struct firmware *fw;
89 char *cache_name = "cache.image.bin";
90 char *res_name = "resident.image.bin";
91 char *extapp_name = "extapp.image.bin";
92 int error ;
93 unsigned int work1, work2, work3;
95 /* set addresses and load resident */
96 sep->resident_bus = sep->rar_bus;
97 sep->resident_addr = sep->rar_addr;
99 error = request_firmware(&fw, res_name, &sep->pdev->dev);
100 if (error) {
101 dev_warn(&sep->pdev->dev, "cant request resident fw\n");
102 return error;
105 memcpy(sep->resident_addr, (void *)fw->data, fw->size);
106 sep->resident_size = fw->size;
107 release_firmware(fw);
109 dev_dbg(&sep->pdev->dev, "resident virtual is %p\n",
110 sep->resident_addr);
111 dev_dbg(&sep->pdev->dev, "resident bus is %lx\n",
112 (unsigned long)sep->resident_bus);
113 dev_dbg(&sep->pdev->dev, "resident size is %08x\n",
114 sep->resident_size);
116 /* set addresses for dcache (no loading needed) */
117 work1 = (unsigned int)sep->resident_bus;
118 work2 = (unsigned int)sep->resident_size;
119 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
120 sep->dcache_bus = (dma_addr_t)work3;
122 work1 = (unsigned int)sep->resident_addr;
123 work2 = (unsigned int)sep->resident_size;
124 work3 = (work1 + work2 + (1024 * 4)) & 0xfffff000;
125 sep->dcache_addr = (void *)work3;
127 sep->dcache_size = 1024 * 128;
129 /* set addresses and load cache */
130 sep->cache_bus = sep->dcache_bus + sep->dcache_size;
131 sep->cache_addr = sep->dcache_addr + sep->dcache_size;
133 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
134 if (error) {
135 dev_warn(&sep->pdev->dev, "Unable to request cache firmware\n");
136 return error;
139 memcpy(sep->cache_addr, (void *)fw->data, fw->size);
140 sep->cache_size = fw->size;
141 release_firmware(fw);
143 dev_dbg(&sep->pdev->dev, "cache virtual is %p\n",
144 sep->cache_addr);
145 dev_dbg(&sep->pdev->dev, "cache bus is %08lx\n",
146 (unsigned long)sep->cache_bus);
147 dev_dbg(&sep->pdev->dev, "cache size is %08x\n",
148 sep->cache_size);
150 /* set addresses and load extapp */
151 sep->extapp_bus = sep->cache_bus + (1024 * 370);
152 sep->extapp_addr = sep->cache_addr + (1024 * 370);
154 error = request_firmware(&fw, extapp_name, &sep->pdev->dev);
155 if (error) {
156 dev_warn(&sep->pdev->dev, "Unable to request extapp firmware\n");
157 return error;
160 memcpy(sep->extapp_addr, (void *)fw->data, fw->size);
161 sep->extapp_size = fw->size;
162 release_firmware(fw);
164 dev_dbg(&sep->pdev->dev, "extapp virtual is %p\n",
165 sep->extapp_addr);
166 dev_dbg(&sep->pdev->dev, "extapp bus is %08llx\n",
167 (unsigned long long)sep->extapp_bus);
168 dev_dbg(&sep->pdev->dev, "extapp size is %08x\n",
169 sep->extapp_size);
171 return error;
174 MODULE_FIRMWARE("sep/cache.image.bin");
175 MODULE_FIRMWARE("sep/resident.image.bin");
176 MODULE_FIRMWARE("sep/extapp.image.bin");
179 * sep_dump_message - dump the message that is pending
180 * @sep: sep device
182 static void sep_dump_message(struct sep_device *sep)
184 int count;
185 u32 *p = sep->shared_addr;
186 for (count = 0; count < 12 * 4; count += 4)
187 dev_dbg(&sep->pdev->dev, "Word %d of the message is %x\n",
188 count, *p++);
192 * sep_map_and_alloc_shared_area - allocate shared block
193 * @sep: security processor
194 * @size: size of shared area
196 static int sep_map_and_alloc_shared_area(struct sep_device *sep)
198 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev,
199 sep->shared_size,
200 &sep->shared_bus, GFP_KERNEL);
202 if (!sep->shared_addr) {
203 dev_warn(&sep->pdev->dev,
204 "shared memory dma_alloc_coherent failed\n");
205 return -ENOMEM;
207 dev_dbg(&sep->pdev->dev,
208 "sep: shared_addr %x bytes @%p (bus %llx)\n",
209 sep->shared_size, sep->shared_addr,
210 (unsigned long long)sep->shared_bus);
211 return 0;
215 * sep_unmap_and_free_shared_area - free shared block
216 * @sep: security processor
218 static void sep_unmap_and_free_shared_area(struct sep_device *sep)
220 dev_dbg(&sep->pdev->dev, "shared area unmap and free\n");
221 dma_free_coherent(&sep->pdev->dev, sep->shared_size,
222 sep->shared_addr, sep->shared_bus);
226 * sep_shared_bus_to_virt - convert bus/virt addresses
227 * @sep: pointer to struct sep_device
228 * @bus_address: address to convert
230 * Returns virtual address inside the shared area according
231 * to the bus address.
233 static void *sep_shared_bus_to_virt(struct sep_device *sep,
234 dma_addr_t bus_address)
236 return sep->shared_addr + (bus_address - sep->shared_bus);
240 * open function for the singleton driver
241 * @inode_ptr struct inode *
242 * @file_ptr struct file *
244 * Called when the user opens the singleton device interface
246 static int sep_singleton_open(struct inode *inode_ptr, struct file *file_ptr)
248 int error = 0;
249 struct sep_device *sep;
252 * Get the sep device structure and use it for the
253 * private_data field in filp for other methods
255 sep = sep_dev;
257 file_ptr->private_data = sep;
259 dev_dbg(&sep->pdev->dev, "Singleton open for pid %d\n",
260 current->pid);
262 dev_dbg(&sep->pdev->dev, "calling test and set for singleton 0\n");
263 if (test_and_set_bit(0, &sep->singleton_access_flag)) {
264 error = -EBUSY;
265 goto end_function;
268 dev_dbg(&sep->pdev->dev,
269 "sep_singleton_open end\n");
270 end_function:
272 return error;
276 * sep_open - device open method
277 * @inode: inode of sep device
278 * @filp: file handle to sep device
280 * Open method for the SEP device. Called when userspace opens
281 * the SEP device node.
283 * Returns zero on success otherwise an error code.
285 static int sep_open(struct inode *inode, struct file *filp)
287 struct sep_device *sep;
290 * Get the sep device structure and use it for the
291 * private_data field in filp for other methods
293 sep = sep_dev;
294 filp->private_data = sep;
296 dev_dbg(&sep->pdev->dev, "Open for pid %d\n", current->pid);
298 /* Anyone can open; locking takes place at transaction level */
299 return 0;
303 * sep_singleton_release - close a SEP singleton device
304 * @inode: inode of SEP device
305 * @filp: file handle being closed
307 * Called on the final close of a SEP device. As the open protects against
308 * multiple simultaenous opens that means this method is called when the
309 * final reference to the open handle is dropped.
311 static int sep_singleton_release(struct inode *inode, struct file *filp)
313 struct sep_device *sep = filp->private_data;
315 dev_dbg(&sep->pdev->dev, "Singleton release for pid %d\n",
316 current->pid);
317 clear_bit(0, &sep->singleton_access_flag);
318 return 0;
322 * sep_request_daemonopen - request daemon open method
323 * @inode: inode of sep device
324 * @filp: file handle to sep device
326 * Open method for the SEP request daemon. Called when
327 * request daemon in userspace opens the SEP device node.
329 * Returns zero on success otherwise an error code.
331 static int sep_request_daemon_open(struct inode *inode, struct file *filp)
333 struct sep_device *sep = sep_dev;
334 int error = 0;
336 filp->private_data = sep;
338 dev_dbg(&sep->pdev->dev, "Request daemon open for pid %d\n",
339 current->pid);
341 /* There is supposed to be only one request daemon */
342 dev_dbg(&sep->pdev->dev, "calling test and set for req_dmon open 0\n");
343 if (test_and_set_bit(0, &sep->request_daemon_open))
344 error = -EBUSY;
345 return error;
349 * sep_request_daemon_release - close a SEP daemon
350 * @inode: inode of SEP device
351 * @filp: file handle being closed
353 * Called on the final close of a SEP daemon.
355 static int sep_request_daemon_release(struct inode *inode, struct file *filp)
357 struct sep_device *sep = filp->private_data;
359 dev_dbg(&sep->pdev->dev, "Reques daemon release for pid %d\n",
360 current->pid);
362 /* clear the request_daemon_open flag */
363 clear_bit(0, &sep->request_daemon_open);
364 return 0;
368 * sep_req_daemon_send_reply_command_handler - poke the SEP
369 * @sep: struct sep_device *
371 * This function raises interrupt to SEPm that signals that is has a
372 * new command from HOST
374 static int sep_req_daemon_send_reply_command_handler(struct sep_device *sep)
376 unsigned long lck_flags;
378 dev_dbg(&sep->pdev->dev,
379 "sep_req_daemon_send_reply_command_handler start\n");
381 sep_dump_message(sep);
383 /* counters are lockable region */
384 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
385 sep->send_ct++;
386 sep->reply_ct++;
388 /* send the interrupt to SEP */
389 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR,
390 sep->send_ct);
392 sep->send_ct++;
394 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
396 dev_dbg(&sep->pdev->dev,
397 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
398 sep->send_ct, sep->reply_ct);
400 dev_dbg(&sep->pdev->dev,
401 "sep_req_daemon_send_reply_command_handler end\n");
403 return 0;
408 * sep_free_dma_table_data_handler - free DMA table
409 * @sep: pointere to struct sep_device
411 * Handles the request to free dma table for synchronic actions
413 static int sep_free_dma_table_data_handler(struct sep_device *sep)
415 int count;
416 int dcb_counter;
417 /* pointer to the current dma_resource struct */
418 struct sep_dma_resource *dma;
420 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler start\n");
422 for (dcb_counter = 0; dcb_counter < sep->nr_dcb_creat; dcb_counter++) {
423 dma = &sep->dma_res_arr[dcb_counter];
425 /* unmap and free input map array */
426 if (dma->in_map_array) {
427 for (count = 0; count < dma->in_num_pages; count++) {
428 dma_unmap_page(&sep->pdev->dev,
429 dma->in_map_array[count].dma_addr,
430 dma->in_map_array[count].size,
431 DMA_TO_DEVICE);
433 kfree(dma->in_map_array);
436 /* unmap output map array, DON'T free it yet */
437 if (dma->out_map_array) {
438 for (count = 0; count < dma->out_num_pages; count++) {
439 dma_unmap_page(&sep->pdev->dev,
440 dma->out_map_array[count].dma_addr,
441 dma->out_map_array[count].size,
442 DMA_FROM_DEVICE);
444 kfree(dma->out_map_array);
447 /* free page cache for output */
448 if (dma->in_page_array) {
449 for (count = 0; count < dma->in_num_pages; count++) {
450 flush_dcache_page(dma->in_page_array[count]);
451 page_cache_release(dma->in_page_array[count]);
453 kfree(dma->in_page_array);
456 if (dma->out_page_array) {
457 for (count = 0; count < dma->out_num_pages; count++) {
458 if (!PageReserved(dma->out_page_array[count]))
459 SetPageDirty(dma->out_page_array[count]);
460 flush_dcache_page(dma->out_page_array[count]);
461 page_cache_release(dma->out_page_array[count]);
463 kfree(dma->out_page_array);
466 /* reset all the values */
467 dma->in_page_array = 0;
468 dma->out_page_array = 0;
469 dma->in_num_pages = 0;
470 dma->out_num_pages = 0;
471 dma->in_map_array = 0;
472 dma->out_map_array = 0;
473 dma->in_map_num_entries = 0;
474 dma->out_map_num_entries = 0;
477 sep->nr_dcb_creat = 0;
478 sep->num_lli_tables_created = 0;
480 dev_dbg(&sep->pdev->dev, "sep_free_dma_table_data_handler end\n");
481 return 0;
485 * sep_request_daemon_mmap - maps the shared area to user space
486 * @filp: pointer to struct file
487 * @vma: pointer to vm_area_struct
489 * Called by the kernel when the daemon attempts an mmap() syscall
490 * using our handle.
492 static int sep_request_daemon_mmap(struct file *filp,
493 struct vm_area_struct *vma)
495 struct sep_device *sep = filp->private_data;
496 dma_addr_t bus_address;
497 int error = 0;
499 dev_dbg(&sep->pdev->dev, "daemon mmap start\n");
501 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
502 error = -EINVAL;
503 goto end_function;
506 /* get physical address */
507 bus_address = sep->shared_bus;
509 dev_dbg(&sep->pdev->dev, "bus_address is %08lx\n",
510 (unsigned long)bus_address);
512 if (remap_pfn_range(vma, vma->vm_start, bus_address >> PAGE_SHIFT,
513 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
515 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
516 error = -EAGAIN;
517 goto end_function;
520 end_function:
521 dev_dbg(&sep->pdev->dev, "daemon mmap end\n");
522 return error;
526 * sep_request_daemon_poll - poll implementation
527 * @sep: struct sep_device * for current sep device
528 * @filp: struct file * for open file
529 * @wait: poll_table * for poll
531 * Called when our device is part of a poll() or select() syscall
533 static unsigned int sep_request_daemon_poll(struct file *filp,
534 poll_table *wait)
536 u32 mask = 0;
537 /* GPR2 register */
538 u32 retval2;
539 unsigned long lck_flags;
540 struct sep_device *sep = filp->private_data;
542 dev_dbg(&sep->pdev->dev, "daemon poll: start\n");
544 poll_wait(filp, &sep->event_request_daemon, wait);
546 dev_dbg(&sep->pdev->dev, "daemon poll: send_ct is %lx reply ct is %lx\n",
547 sep->send_ct, sep->reply_ct);
549 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
550 /* check if the data is ready */
551 if (sep->send_ct == sep->reply_ct) {
552 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
554 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
555 dev_dbg(&sep->pdev->dev,
556 "daemon poll: data check (GPR2) is %x\n", retval2);
558 /* check if PRINT request */
559 if ((retval2 >> 30) & 0x1) {
560 dev_dbg(&sep->pdev->dev, "daemon poll: PRINTF request in\n");
561 mask |= POLLIN;
562 goto end_function;
564 /* check if NVS request */
565 if (retval2 >> 31) {
566 dev_dbg(&sep->pdev->dev, "daemon poll: NVS request in\n");
567 mask |= POLLPRI | POLLWRNORM;
569 } else {
570 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
571 dev_dbg(&sep->pdev->dev,
572 "daemon poll: no reply received; returning 0\n");
573 mask = 0;
575 end_function:
576 dev_dbg(&sep->pdev->dev, "daemon poll: exit\n");
577 return mask;
581 * sep_release - close a SEP device
582 * @inode: inode of SEP device
583 * @filp: file handle being closed
585 * Called on the final close of a SEP device.
587 static int sep_release(struct inode *inode, struct file *filp)
589 struct sep_device *sep = filp->private_data;
591 dev_dbg(&sep->pdev->dev, "Release for pid %d\n", current->pid);
593 mutex_lock(&sep->sep_mutex);
594 /* is this the process that has a transaction open?
595 * If so, lets reset pid_doing_transaction to 0 and
596 * clear the in use flags, and then wake up sep_event
597 * so that other processes can do transactions
599 dev_dbg(&sep->pdev->dev, "waking up event and mmap_event\n");
600 if (sep->pid_doing_transaction == current->pid) {
601 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
602 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
603 sep_free_dma_table_data_handler(sep);
604 wake_up(&sep->event);
605 sep->pid_doing_transaction = 0;
608 mutex_unlock(&sep->sep_mutex);
609 return 0;
613 * sep_mmap - maps the shared area to user space
614 * @filp: pointer to struct file
615 * @vma: pointer to vm_area_struct
617 * Called on an mmap of our space via the normal sep device
619 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
621 dma_addr_t bus_addr;
622 struct sep_device *sep = filp->private_data;
623 unsigned long error = 0;
625 dev_dbg(&sep->pdev->dev, "mmap start\n");
627 /* Set the transaction busy (own the device) */
628 wait_event_interruptible(sep->event,
629 test_and_set_bit(SEP_MMAP_LOCK_BIT,
630 &sep->in_use_flags) == 0);
632 if (signal_pending(current)) {
633 error = -EINTR;
634 goto end_function_with_error;
637 * The pid_doing_transaction indicates that this process
638 * now owns the facilities to performa a transaction with
639 * the sep. While this process is performing a transaction,
640 * no other process who has the sep device open can perform
641 * any transactions. This method allows more than one process
642 * to have the device open at any given time, which provides
643 * finer granularity for device utilization by multiple
644 * processes.
646 mutex_lock(&sep->sep_mutex);
647 sep->pid_doing_transaction = current->pid;
648 mutex_unlock(&sep->sep_mutex);
650 /* zero the pools and the number of data pool alocation pointers */
651 sep->data_pool_bytes_allocated = 0;
652 sep->num_of_data_allocations = 0;
655 * check that the size of the mapped range is as the size of the message
656 * shared area
658 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
659 error = -EINVAL;
660 goto end_function_with_error;
663 dev_dbg(&sep->pdev->dev, "shared_addr is %p\n", sep->shared_addr);
665 /* get bus address */
666 bus_addr = sep->shared_bus;
668 dev_dbg(&sep->pdev->dev,
669 "bus_address is %lx\n", (unsigned long)bus_addr);
671 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT,
672 vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
673 dev_warn(&sep->pdev->dev, "remap_page_range failed\n");
674 error = -EAGAIN;
675 goto end_function_with_error;
677 dev_dbg(&sep->pdev->dev, "mmap end\n");
678 goto end_function;
680 end_function_with_error:
681 /* clear the bit */
682 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
683 mutex_lock(&sep->sep_mutex);
684 sep->pid_doing_transaction = 0;
685 mutex_unlock(&sep->sep_mutex);
687 /* raise event for stuck contextes */
689 dev_warn(&sep->pdev->dev, "mmap error - waking up event\n");
690 wake_up(&sep->event);
692 end_function:
693 return error;
697 * sep_poll - poll handler
698 * @filp: pointer to struct file
699 * @wait: pointer to poll_table
701 * Called by the OS when the kernel is asked to do a poll on
702 * a SEP file handle.
704 static unsigned int sep_poll(struct file *filp, poll_table *wait)
706 u32 mask = 0;
707 u32 retval = 0;
708 u32 retval2 = 0;
709 unsigned long lck_flags;
711 struct sep_device *sep = filp->private_data;
713 dev_dbg(&sep->pdev->dev, "poll: start\n");
715 /* Am I the process that owns the transaction? */
716 mutex_lock(&sep->sep_mutex);
717 if (current->pid != sep->pid_doing_transaction) {
718 dev_warn(&sep->pdev->dev, "poll; wrong pid\n");
719 mask = POLLERR;
720 mutex_unlock(&sep->sep_mutex);
721 goto end_function;
723 mutex_unlock(&sep->sep_mutex);
725 /* check if send command or send_reply were activated previously */
726 if (!test_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
727 dev_warn(&sep->pdev->dev, "poll; lock bit set\n");
728 mask = POLLERR;
729 goto end_function;
732 /* add the event to the polling wait table */
733 dev_dbg(&sep->pdev->dev, "poll: calling wait sep_event\n");
735 poll_wait(filp, &sep->event, wait);
737 dev_dbg(&sep->pdev->dev, "poll: send_ct is %lx reply ct is %lx\n",
738 sep->send_ct, sep->reply_ct);
740 /* check if error occured during poll */
741 retval2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
742 if (retval2 != 0x0) {
743 dev_warn(&sep->pdev->dev, "poll; poll error %x\n", retval2);
744 mask |= POLLERR;
745 goto end_function;
748 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
750 if (sep->send_ct == sep->reply_ct) {
751 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
752 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
753 dev_dbg(&sep->pdev->dev, "poll: data ready check (GPR2) %x\n",
754 retval);
756 /* check if printf request */
757 if ((retval >> 30) & 0x1) {
758 dev_dbg(&sep->pdev->dev, "poll: sep printf request\n");
759 wake_up(&sep->event_request_daemon);
760 goto end_function;
763 /* check if the this is sep reply or request */
764 if (retval >> 31) {
765 dev_dbg(&sep->pdev->dev, "poll: sep request\n");
766 wake_up(&sep->event_request_daemon);
767 } else {
768 dev_dbg(&sep->pdev->dev, "poll: normal return\n");
769 /* in case it is again by send_reply_comand */
770 clear_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags);
771 sep_dump_message(sep);
772 dev_dbg(&sep->pdev->dev,
773 "poll; sep reply POLLIN | POLLRDNORM\n");
775 mask |= POLLIN | POLLRDNORM;
777 } else {
778 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
779 dev_dbg(&sep->pdev->dev,
780 "poll; no reply received; returning mask of 0\n");
781 mask = 0;
784 end_function:
785 dev_dbg(&sep->pdev->dev, "poll: end\n");
786 return mask;
790 * sep_time_address - address in SEP memory of time
791 * @sep: SEP device we want the address from
793 * Return the address of the two dwords in memory used for time
794 * setting.
796 static u32 *sep_time_address(struct sep_device *sep)
798 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
802 * sep_set_time - set the SEP time
803 * @sep: the SEP we are setting the time for
805 * Calculates time and sets it at the predefined address.
806 * Called with the sep mutex held.
808 static unsigned long sep_set_time(struct sep_device *sep)
810 struct timeval time;
811 u32 *time_addr; /* address of time as seen by the kernel */
814 dev_dbg(&sep->pdev->dev, "sep:sep_set_time start\n");
816 do_gettimeofday(&time);
818 /* set value in the SYSTEM MEMORY offset */
819 time_addr = sep_time_address(sep);
821 time_addr[0] = SEP_TIME_VAL_TOKEN;
822 time_addr[1] = time.tv_sec;
824 dev_dbg(&sep->pdev->dev, "time.tv_sec is %lu\n", time.tv_sec);
825 dev_dbg(&sep->pdev->dev, "time_addr is %p\n", time_addr);
826 dev_dbg(&sep->pdev->dev, "sep->shared_addr is %p\n", sep->shared_addr);
828 return time.tv_sec;
832 * sep_set_caller_id_handler - insert caller id entry
833 * @sep: sep device
834 * @arg: pointer to struct caller_id_struct
836 * Inserts the data into the caller id table. Note that this function
837 * falls under the ioctl lock
839 static int sep_set_caller_id_handler(struct sep_device *sep, u32 arg)
841 void __user *hash;
842 int error = 0;
843 int i;
844 struct caller_id_struct command_args;
846 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler start\n");
848 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
849 if (sep->caller_id_table[i].pid == 0)
850 break;
853 if (i == SEP_CALLER_ID_TABLE_NUM_ENTRIES) {
854 dev_warn(&sep->pdev->dev, "no more caller id entries left\n");
855 dev_warn(&sep->pdev->dev, "maximum number is %d\n",
856 SEP_CALLER_ID_TABLE_NUM_ENTRIES);
857 error = -EUSERS;
858 goto end_function;
861 /* copy the data */
862 if (copy_from_user(&command_args, (void __user *)arg,
863 sizeof(command_args))) {
864 error = -EFAULT;
865 goto end_function;
868 hash = (void __user *)(unsigned long)command_args.callerIdAddress;
870 if (!command_args.pid || !command_args.callerIdSizeInBytes) {
871 error = -EINVAL;
872 goto end_function;
875 dev_dbg(&sep->pdev->dev, "pid is %x\n", command_args.pid);
876 dev_dbg(&sep->pdev->dev, "callerIdSizeInBytes is %x\n",
877 command_args.callerIdSizeInBytes);
879 if (command_args.callerIdSizeInBytes >
880 SEP_CALLER_ID_HASH_SIZE_IN_BYTES) {
881 error = -EMSGSIZE;
882 goto end_function;
885 sep->caller_id_table[i].pid = command_args.pid;
887 if (copy_from_user(sep->caller_id_table[i].callerIdHash,
888 hash, command_args.callerIdSizeInBytes))
889 error = -EFAULT;
890 end_function:
891 dev_dbg(&sep->pdev->dev, "sep_set_caller_id_handler end\n");
892 return error;
896 * sep_set_current_caller_id - set the caller id
897 * @sep: pointer to struct_sep
899 * Set the caller ID (if it exists) to the sep. Note that this
900 * function falls under the ioctl lock
902 static int sep_set_current_caller_id(struct sep_device *sep)
904 int i;
906 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id start\n");
907 dev_dbg(&sep->pdev->dev, "current process is %d\n", current->pid);
909 /* zero the previous value */
910 memset(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
911 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
913 for (i = 0; i < SEP_CALLER_ID_TABLE_NUM_ENTRIES; i++) {
914 if (sep->caller_id_table[i].pid == current->pid) {
915 dev_dbg(&sep->pdev->dev, "Caller Id found\n");
917 memcpy(sep->shared_addr + SEP_CALLER_ID_OFFSET_BYTES,
918 (void *)(sep->caller_id_table[i].callerIdHash),
919 SEP_CALLER_ID_HASH_SIZE_IN_BYTES);
920 break;
923 dev_dbg(&sep->pdev->dev, "sep_set_current_caller_id end\n");
924 return 0;
928 * sep_send_command_handler - kick off a command
929 * @sep: sep being signalled
931 * This function raises interrupt to SEP that signals that is has a new
932 * command from the host
934 * Note that this function does fall under the ioctl lock
936 static int sep_send_command_handler(struct sep_device *sep)
938 unsigned long lck_flags;
939 int error = 0;
941 dev_dbg(&sep->pdev->dev, "sep_send_command_handler start\n");
943 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT, &sep->in_use_flags)) {
944 error = -EPROTO;
945 goto end_function;
947 sep_set_time(sep);
949 /* only Medfield has caller id */
950 if (sep->mrst == 0)
951 sep_set_current_caller_id(sep);
953 sep_dump_message(sep);
955 /* update counter */
956 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
957 sep->send_ct++;
958 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
960 dev_dbg(&sep->pdev->dev,
961 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
962 sep->send_ct, sep->reply_ct);
964 /* send interrupt to SEP */
965 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
967 end_function:
968 dev_dbg(&sep->pdev->dev, "sep_send_command_handler end\n");
969 return error;
973 * sep_allocate_data_pool_memory_handler -allocate pool memory
974 * @sep: pointer to struct_sep
975 * @arg: pointer to struct alloc_struct
977 * This function handles the allocate data pool memory request
978 * This function returns calculates the bus address of the
979 * allocated memory, and the offset of this area from the mapped address.
980 * Therefore, the FVOs in user space can calculate the exact virtual
981 * address of this allocated memory
983 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
984 unsigned long arg)
986 int error = 0;
987 struct alloc_struct command_args;
989 /* Holds the allocated buffer address in the system memory pool */
990 u32 *token_addr;
992 dev_dbg(&sep->pdev->dev,
993 "sep_allocate_data_pool_memory_handler start\n");
995 if (copy_from_user(&command_args, (void __user *)arg,
996 sizeof(struct alloc_struct))) {
997 error = -EFAULT;
998 goto end_function;
1001 /* Allocate memory */
1002 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) >
1003 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
1004 error = -ENOMEM;
1005 goto end_function;
1008 dev_dbg(&sep->pdev->dev,
1009 "bytes_allocated: %x\n", (int)sep->data_pool_bytes_allocated);
1010 dev_dbg(&sep->pdev->dev,
1011 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES);
1012 /* Set the virtual and bus address */
1013 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1014 sep->data_pool_bytes_allocated;
1016 dev_dbg(&sep->pdev->dev,
1017 "command_args.offset: %x\n", command_args.offset);
1019 /* Place in the shared area that is known by the sep */
1020 token_addr = (u32 *)(sep->shared_addr +
1021 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES +
1022 (sep->num_of_data_allocations)*2*sizeof(u32));
1024 dev_dbg(&sep->pdev->dev, "allocation offset: %x\n",
1025 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES);
1026 dev_dbg(&sep->pdev->dev, "data pool token addr is %p\n", token_addr);
1028 token_addr[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN;
1029 token_addr[1] = (u32)sep->shared_bus +
1030 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES +
1031 sep->data_pool_bytes_allocated;
1033 dev_dbg(&sep->pdev->dev, "data pool token [0] %x\n", token_addr[0]);
1034 dev_dbg(&sep->pdev->dev, "data pool token [1] %x\n", token_addr[1]);
1036 /* Write the memory back to the user space */
1037 error = copy_to_user((void *)arg, (void *)&command_args,
1038 sizeof(struct alloc_struct));
1039 if (error) {
1040 error = -EFAULT;
1041 dev_warn(&sep->pdev->dev,
1042 "allocate data pool copy to user error\n");
1043 goto end_function;
1046 /* update the allocation */
1047 sep->data_pool_bytes_allocated += command_args.num_bytes;
1048 sep->num_of_data_allocations += 1;
1050 dev_dbg(&sep->pdev->dev, "data_allocations %d\n",
1051 sep->num_of_data_allocations);
1052 dev_dbg(&sep->pdev->dev, "bytes allocated %d\n",
1053 (int)sep->data_pool_bytes_allocated);
1055 end_function:
1056 dev_dbg(&sep->pdev->dev, "sep_allocate_data_pool_memory_handler end\n");
1057 return error;
1061 * sep_lock_kernel_pages - map kernel pages for DMA
1062 * @sep: pointer to struct sep_device
1063 * @kernel_virt_addr: address of data buffer in kernel
1064 * @data_size: size of data
1065 * @lli_array_ptr: lli array
1066 * @in_out_flag: input into device or output from device
1068 * This function locks all the physical pages of the kernel virtual buffer
1069 * and construct a basic lli array, where each entry holds the physical
1070 * page address and the size that application data holds in this page
1071 * This function is used only during kernel crypto mod calls from within
1072 * the kernel (when ioctl is not used)
1074 static int sep_lock_kernel_pages(struct sep_device *sep,
1075 u32 kernel_virt_addr,
1076 u32 data_size,
1077 struct sep_lli_entry **lli_array_ptr,
1078 int in_out_flag)
1081 int error = 0;
1082 /* array of lli */
1083 struct sep_lli_entry *lli_array;
1084 /* map array */
1085 struct sep_dma_map *map_array;
1087 dev_dbg(&sep->pdev->dev,
1088 "sep_lock_kernel_pages start\n");
1090 dev_dbg(&sep->pdev->dev,
1091 "kernel_virt_addr is %08x\n", kernel_virt_addr);
1092 dev_dbg(&sep->pdev->dev,
1093 "data_size is %x\n", data_size);
1095 lli_array = kmalloc(sizeof(struct sep_lli_entry), GFP_ATOMIC);
1096 if (!lli_array) {
1097 error = -ENOMEM;
1098 goto end_function;
1100 map_array = kmalloc(sizeof(struct sep_dma_map), GFP_ATOMIC);
1101 if (!map_array) {
1102 error = -ENOMEM;
1103 goto end_function_with_error;
1106 map_array[0].dma_addr =
1107 dma_map_single(&sep->pdev->dev, (void *)kernel_virt_addr,
1108 data_size, DMA_BIDIRECTIONAL);
1109 map_array[0].size = data_size;
1113 * set the start address of the first page - app data may start not at
1114 * the beginning of the page
1116 lli_array[0].bus_address = (u32)map_array[0].dma_addr;
1117 lli_array[0].block_size = map_array[0].size;
1119 dev_dbg(&sep->pdev->dev,
1120 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1121 (unsigned long)lli_array[0].bus_address,
1122 lli_array[0].block_size);
1124 /* set the output parameters */
1125 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1126 *lli_array_ptr = lli_array;
1127 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 1;
1128 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
1129 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1130 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries = 1;
1131 } else {
1132 *lli_array_ptr = lli_array;
1133 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = 1;
1134 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = 0;
1135 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1136 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries = 1;
1138 goto end_function;
1140 end_function_with_error:
1141 kfree(lli_array);
1143 end_function:
1144 dev_dbg(&sep->pdev->dev, "sep_lock_kernel_pages end\n");
1145 return error;
1149 * sep_lock_user_pages - lock and map user pages for DMA
1150 * @sep: pointer to struct sep_device
1151 * @app_virt_addr: user memory data buffer
1152 * @data_size: size of data buffer
1153 * @lli_array_ptr: lli array
1154 * @in_out_flag: input or output to device
1156 * This function locks all the physical pages of the application
1157 * virtual buffer and construct a basic lli array, where each entry
1158 * holds the physical page address and the size that application
1159 * data holds in this physical pages
1161 static int sep_lock_user_pages(struct sep_device *sep,
1162 u32 app_virt_addr,
1163 u32 data_size,
1164 struct sep_lli_entry **lli_array_ptr,
1165 int in_out_flag)
1168 int error = 0;
1169 u32 count;
1170 int result;
1171 /* the the page of the end address of the user space buffer */
1172 u32 end_page;
1173 /* the page of the start address of the user space buffer */
1174 u32 start_page;
1175 /* the range in pages */
1176 u32 num_pages;
1177 /* array of pointers to page */
1178 struct page **page_array;
1179 /* array of lli */
1180 struct sep_lli_entry *lli_array;
1181 /* map array */
1182 struct sep_dma_map *map_array;
1183 /* direction of the DMA mapping for locked pages */
1184 enum dma_data_direction dir;
1186 dev_dbg(&sep->pdev->dev,
1187 "sep_lock_user_pages start\n");
1189 /* set start and end pages and num pages */
1190 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
1191 start_page = app_virt_addr >> PAGE_SHIFT;
1192 num_pages = end_page - start_page + 1;
1194 dev_dbg(&sep->pdev->dev, "app_virt_addr is %x\n", app_virt_addr);
1195 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1196 dev_dbg(&sep->pdev->dev, "start_page is %x\n", start_page);
1197 dev_dbg(&sep->pdev->dev, "end_page is %x\n", end_page);
1198 dev_dbg(&sep->pdev->dev, "num_pages is %x\n", num_pages);
1200 dev_dbg(&sep->pdev->dev, "starting page_array malloc\n");
1202 /* allocate array of pages structure pointers */
1203 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
1204 if (!page_array) {
1205 error = -ENOMEM;
1206 goto end_function;
1208 map_array = kmalloc(sizeof(struct sep_dma_map) * num_pages, GFP_ATOMIC);
1209 if (!map_array) {
1210 dev_warn(&sep->pdev->dev, "kmalloc for map_array failed\n");
1211 error = -ENOMEM;
1212 goto end_function_with_error1;
1215 lli_array = kmalloc(sizeof(struct sep_lli_entry) * num_pages,
1216 GFP_ATOMIC);
1218 if (!lli_array) {
1219 dev_warn(&sep->pdev->dev, "kmalloc for lli_array failed\n");
1220 error = -ENOMEM;
1221 goto end_function_with_error2;
1224 dev_dbg(&sep->pdev->dev, "starting get_user_pages\n");
1226 /* convert the application virtual address into a set of physical */
1227 down_read(&current->mm->mmap_sem);
1228 result = get_user_pages(current, current->mm, app_virt_addr,
1229 num_pages,
1230 ((in_out_flag == SEP_DRIVER_IN_FLAG) ? 0 : 1),
1231 0, page_array, 0);
1233 up_read(&current->mm->mmap_sem);
1235 /* check the number of pages locked - if not all then exit with error */
1236 if (result != num_pages) {
1237 dev_warn(&sep->pdev->dev,
1238 "not all pages locked by get_user_pages\n");
1239 error = -ENOMEM;
1240 goto end_function_with_error3;
1243 dev_dbg(&sep->pdev->dev, "get_user_pages succeeded\n");
1245 /* set direction */
1246 if (in_out_flag == SEP_DRIVER_IN_FLAG)
1247 dir = DMA_TO_DEVICE;
1248 else
1249 dir = DMA_FROM_DEVICE;
1252 * fill the array using page array data and
1253 * map the pages - this action
1254 * will also flush the cache as needed
1256 for (count = 0; count < num_pages; count++) {
1257 /* fill the map array */
1258 map_array[count].dma_addr =
1259 dma_map_page(&sep->pdev->dev, page_array[count],
1260 0, PAGE_SIZE, /*dir*/DMA_BIDIRECTIONAL);
1262 map_array[count].size = PAGE_SIZE;
1264 /* fill the lli array entry */
1265 lli_array[count].bus_address = (u32)map_array[count].dma_addr;
1266 lli_array[count].block_size = PAGE_SIZE;
1268 dev_warn(&sep->pdev->dev, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1269 count, (unsigned long)lli_array[count].bus_address,
1270 count, lli_array[count].block_size);
1273 /* check the offset for the first page */
1274 lli_array[0].bus_address =
1275 lli_array[0].bus_address + (app_virt_addr & (~PAGE_MASK));
1277 /* check that not all the data is in the first page only */
1278 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
1279 lli_array[0].block_size = data_size;
1280 else
1281 lli_array[0].block_size =
1282 PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
1284 dev_dbg(&sep->pdev->dev,
1285 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1286 (unsigned long)lli_array[count].bus_address,
1287 lli_array[count].block_size);
1289 /* check the size of the last page */
1290 if (num_pages > 1) {
1291 lli_array[num_pages - 1].block_size =
1292 (app_virt_addr + data_size) & (~PAGE_MASK);
1294 dev_warn(&sep->pdev->dev,
1295 "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1296 num_pages - 1,
1297 (unsigned long)lli_array[count].bus_address,
1298 num_pages - 1,
1299 lli_array[count].block_size);
1302 /* set output params acording to the in_out flag */
1303 if (in_out_flag == SEP_DRIVER_IN_FLAG) {
1304 *lli_array_ptr = lli_array;
1305 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = num_pages;
1306 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = page_array;
1307 sep->dma_res_arr[sep->nr_dcb_creat].in_map_array = map_array;
1308 sep->dma_res_arr[sep->nr_dcb_creat].in_map_num_entries =
1309 num_pages;
1310 } else {
1311 *lli_array_ptr = lli_array;
1312 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages = num_pages;
1313 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array =
1314 page_array;
1315 sep->dma_res_arr[sep->nr_dcb_creat].out_map_array = map_array;
1316 sep->dma_res_arr[sep->nr_dcb_creat].out_map_num_entries =
1317 num_pages;
1319 goto end_function;
1321 end_function_with_error3:
1322 /* free lli array */
1323 kfree(lli_array);
1325 end_function_with_error2:
1326 kfree(map_array);
1328 end_function_with_error1:
1329 /* free page array */
1330 kfree(page_array);
1332 end_function:
1333 dev_dbg(&sep->pdev->dev, "sep_lock_user_pages end\n");
1334 return error;
1338 * u32 sep_calculate_lli_table_max_size - size the LLI table
1339 * @sep: pointer to struct sep_device
1340 * @lli_in_array_ptr
1341 * @num_array_entries
1342 * @last_table_flag
1344 * This function calculates the size of data that can be inserted into
1345 * the lli table from this array, such that either the table is full
1346 * (all entries are entered), or there are no more entries in the
1347 * lli array
1349 static u32 sep_calculate_lli_table_max_size(struct sep_device *sep,
1350 struct sep_lli_entry *lli_in_array_ptr,
1351 u32 num_array_entries,
1352 u32 *last_table_flag)
1354 u32 counter;
1355 /* table data size */
1356 u32 table_data_size = 0;
1357 /* data size for the next table */
1358 u32 next_table_data_size;
1360 *last_table_flag = 0;
1363 * calculate the data in the out lli table till we fill the whole
1364 * table or till the data has ended
1366 for (counter = 0;
1367 (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) &&
1368 (counter < num_array_entries); counter++)
1369 table_data_size += lli_in_array_ptr[counter].block_size;
1372 * check if we reached the last entry,
1373 * meaning this ia the last table to build,
1374 * and no need to check the block alignment
1376 if (counter == num_array_entries) {
1377 /* set the last table flag */
1378 *last_table_flag = 1;
1379 goto end_function;
1383 * calculate the data size of the next table.
1384 * Stop if no entries left or
1385 * if data size is more the DMA restriction
1387 next_table_data_size = 0;
1388 for (; counter < num_array_entries; counter++) {
1389 next_table_data_size += lli_in_array_ptr[counter].block_size;
1390 if (next_table_data_size >= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1391 break;
1395 * check if the next table data size is less then DMA rstriction.
1396 * if it is - recalculate the current table size, so that the next
1397 * table data size will be adaquete for DMA
1399 if (next_table_data_size &&
1400 next_table_data_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE)
1402 table_data_size -= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE -
1403 next_table_data_size);
1405 dev_dbg(&sep->pdev->dev, "table data size is %x\n",
1406 table_data_size);
1407 end_function:
1408 return table_data_size;
1412 * sep_build_lli_table - build an lli array for the given table
1413 * @sep: pointer to struct sep_device
1414 * @lli_array_ptr: pointer to lli array
1415 * @lli_table_ptr: pointer to lli table
1416 * @num_processed_entries_ptr: pointer to number of entries
1417 * @num_table_entries_ptr: pointer to number of tables
1418 * @table_data_size: total data size
1420 * Builds ant lli table from the lli_array according to
1421 * the given size of data
1423 static void sep_build_lli_table(struct sep_device *sep,
1424 struct sep_lli_entry *lli_array_ptr,
1425 struct sep_lli_entry *lli_table_ptr,
1426 u32 *num_processed_entries_ptr,
1427 u32 *num_table_entries_ptr,
1428 u32 table_data_size)
1430 /* current table data size */
1431 u32 curr_table_data_size;
1432 /* counter of lli array entry */
1433 u32 array_counter;
1435 dev_dbg(&sep->pdev->dev, "sep_build_lli_table start\n");
1437 /* init currrent table data size and lli array entry counter */
1438 curr_table_data_size = 0;
1439 array_counter = 0;
1440 *num_table_entries_ptr = 1;
1442 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n", table_data_size);
1444 /* fill the table till table size reaches the needed amount */
1445 while (curr_table_data_size < table_data_size) {
1446 /* update the number of entries in table */
1447 (*num_table_entries_ptr)++;
1449 lli_table_ptr->bus_address =
1450 cpu_to_le32(lli_array_ptr[array_counter].bus_address);
1452 lli_table_ptr->block_size =
1453 cpu_to_le32(lli_array_ptr[array_counter].block_size);
1455 curr_table_data_size += lli_array_ptr[array_counter].block_size;
1457 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n",
1458 lli_table_ptr);
1459 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1460 (unsigned long)lli_table_ptr->bus_address);
1461 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1462 lli_table_ptr->block_size);
1464 /* check for overflow of the table data */
1465 if (curr_table_data_size > table_data_size) {
1466 dev_dbg(&sep->pdev->dev,
1467 "curr_table_data_size too large\n");
1469 /* update the size of block in the table */
1470 lli_table_ptr->block_size -=
1471 cpu_to_le32((curr_table_data_size - table_data_size));
1473 /* update the physical address in the lli array */
1474 lli_array_ptr[array_counter].bus_address +=
1475 cpu_to_le32(lli_table_ptr->block_size);
1477 /* update the block size left in the lli array */
1478 lli_array_ptr[array_counter].block_size =
1479 (curr_table_data_size - table_data_size);
1480 } else
1481 /* advance to the next entry in the lli_array */
1482 array_counter++;
1484 dev_dbg(&sep->pdev->dev,
1485 "lli_table_ptr->bus_address is %08lx\n",
1486 (unsigned long)lli_table_ptr->bus_address);
1487 dev_dbg(&sep->pdev->dev,
1488 "lli_table_ptr->block_size is %x\n",
1489 lli_table_ptr->block_size);
1491 /* move to the next entry in table */
1492 lli_table_ptr++;
1495 /* set the info entry to default */
1496 lli_table_ptr->bus_address = 0xffffffff;
1497 lli_table_ptr->block_size = 0;
1499 dev_dbg(&sep->pdev->dev, "lli_table_ptr is %p\n", lli_table_ptr);
1500 dev_dbg(&sep->pdev->dev, "lli_table_ptr->bus_address is %08lx\n",
1501 (unsigned long)lli_table_ptr->bus_address);
1502 dev_dbg(&sep->pdev->dev, "lli_table_ptr->block_size is %x\n",
1503 lli_table_ptr->block_size);
1505 /* set the output parameter */
1506 *num_processed_entries_ptr += array_counter;
1508 dev_dbg(&sep->pdev->dev, "num_processed_entries_ptr is %x\n",
1509 *num_processed_entries_ptr);
1511 dev_dbg(&sep->pdev->dev, "sep_build_lli_table end\n");
1515 * sep_shared_area_virt_to_bus - map shared area to bus address
1516 * @sep: pointer to struct sep_device
1517 * @virt_address: virtual address to convert
1519 * This functions returns the physical address inside shared area according
1520 * to the virtual address. It can be either on the externa RAM device
1521 * (ioremapped), or on the system RAM
1522 * This implementation is for the external RAM
1524 static dma_addr_t sep_shared_area_virt_to_bus(struct sep_device *sep,
1525 void *virt_address)
1527 dev_dbg(&sep->pdev->dev, "sh virt to phys v %p\n", virt_address);
1528 dev_dbg(&sep->pdev->dev, "sh virt to phys p %08lx\n",
1529 (unsigned long)
1530 sep->shared_bus + (virt_address - sep->shared_addr));
1532 return sep->shared_bus + (size_t)(virt_address - sep->shared_addr);
1536 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1537 * @sep: pointer to struct sep_device
1538 * @bus_address: bus address to convert
1540 * This functions returns the virtual address inside shared area
1541 * according to the physical address. It can be either on the
1542 * externa RAM device (ioremapped), or on the system RAM
1543 * This implementation is for the external RAM
1545 static void *sep_shared_area_bus_to_virt(struct sep_device *sep,
1546 dma_addr_t bus_address)
1548 dev_dbg(&sep->pdev->dev, "shared bus to virt b=%x v=%x\n",
1549 (u32)bus_address, (u32)(sep->shared_addr +
1550 (size_t)(bus_address - sep->shared_bus)));
1552 return sep->shared_addr + (size_t)(bus_address - sep->shared_bus);
1556 * sep_debug_print_lli_tables - dump LLI table
1557 * @sep: pointer to struct sep_device
1558 * @lli_table_ptr: pointer to sep_lli_entry
1559 * @num_table_entries: number of entries
1560 * @table_data_size: total data size
1562 * Walk the the list of the print created tables and print all the data
1564 static void sep_debug_print_lli_tables(struct sep_device *sep,
1565 struct sep_lli_entry *lli_table_ptr,
1566 unsigned long num_table_entries,
1567 unsigned long table_data_size)
1569 unsigned long table_count = 1;
1570 unsigned long entries_count = 0;
1572 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables start\n");
1574 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1575 dev_dbg(&sep->pdev->dev,
1576 "lli table %08lx, table_data_size is %lu\n",
1577 table_count, table_data_size);
1578 dev_dbg(&sep->pdev->dev, "num_table_entries is %lu\n",
1579 num_table_entries);
1581 /* print entries of the table (without info entry) */
1582 for (entries_count = 0; entries_count < num_table_entries;
1583 entries_count++, lli_table_ptr++) {
1585 dev_dbg(&sep->pdev->dev,
1586 "lli_table_ptr address is %08lx\n",
1587 (unsigned long) lli_table_ptr);
1589 dev_dbg(&sep->pdev->dev,
1590 "phys address is %08lx block size is %x\n",
1591 (unsigned long)lli_table_ptr->bus_address,
1592 lli_table_ptr->block_size);
1594 /* point to the info entry */
1595 lli_table_ptr--;
1597 dev_dbg(&sep->pdev->dev,
1598 "phys lli_table_ptr->block_size is %x\n",
1599 lli_table_ptr->block_size);
1601 dev_dbg(&sep->pdev->dev,
1602 "phys lli_table_ptr->physical_address is %08lu\n",
1603 (unsigned long)lli_table_ptr->bus_address);
1606 table_data_size = lli_table_ptr->block_size & 0xffffff;
1607 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1608 lli_table_ptr = (struct sep_lli_entry *)
1609 (lli_table_ptr->bus_address);
1611 dev_dbg(&sep->pdev->dev,
1612 "phys table_data_size is %lu num_table_entries is"
1613 " %lu lli_table_ptr is%lu\n", table_data_size,
1614 num_table_entries, (unsigned long)lli_table_ptr);
1616 if ((unsigned long)lli_table_ptr != 0xffffffff)
1617 lli_table_ptr = (struct sep_lli_entry *)
1618 sep_shared_bus_to_virt(sep,
1619 (unsigned long)lli_table_ptr);
1621 table_count++;
1623 dev_dbg(&sep->pdev->dev, "sep_debug_print_lli_tables end\n");
1628 * sep_prepare_empty_lli_table - create a blank LLI table
1629 * @sep: pointer to struct sep_device
1630 * @lli_table_addr_ptr: pointer to lli table
1631 * @num_entries_ptr: pointer to number of entries
1632 * @table_data_size_ptr: point to table data size
1634 * This function creates empty lli tables when there is no data
1636 static void sep_prepare_empty_lli_table(struct sep_device *sep,
1637 dma_addr_t *lli_table_addr_ptr,
1638 u32 *num_entries_ptr,
1639 u32 *table_data_size_ptr)
1641 struct sep_lli_entry *lli_table_ptr;
1643 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1645 /* find the area for new table */
1646 lli_table_ptr =
1647 (struct sep_lli_entry *)(sep->shared_addr +
1648 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1649 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1650 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1652 lli_table_ptr->bus_address = 0;
1653 lli_table_ptr->block_size = 0;
1655 lli_table_ptr++;
1656 lli_table_ptr->bus_address = 0xFFFFFFFF;
1657 lli_table_ptr->block_size = 0;
1659 /* set the output parameter value */
1660 *lli_table_addr_ptr = sep->shared_bus +
1661 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1662 sep->num_lli_tables_created *
1663 sizeof(struct sep_lli_entry) *
1664 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1666 /* set the num of entries and table data size for empty table */
1667 *num_entries_ptr = 2;
1668 *table_data_size_ptr = 0;
1670 /* update the number of created tables */
1671 sep->num_lli_tables_created++;
1673 dev_dbg(&sep->pdev->dev, "sep_prepare_empty_lli_table start\n");
1678 * sep_prepare_input_dma_table - prepare input DMA mappings
1679 * @sep: pointer to struct sep_device
1680 * @data_size:
1681 * @block_size:
1682 * @lli_table_ptr:
1683 * @num_entries_ptr:
1684 * @table_data_size_ptr:
1685 * @is_kva: set for kernel data (kernel cryptio call)
1687 * This function prepares only input DMA table for synhronic symmetric
1688 * operations (HASH)
1689 * Note that all bus addresses that are passed to the sep
1690 * are in 32 bit format; the SEP is a 32 bit device
1692 static int sep_prepare_input_dma_table(struct sep_device *sep,
1693 unsigned long app_virt_addr,
1694 u32 data_size,
1695 u32 block_size,
1696 dma_addr_t *lli_table_ptr,
1697 u32 *num_entries_ptr,
1698 u32 *table_data_size_ptr,
1699 bool is_kva)
1701 int error = 0;
1702 /* pointer to the info entry of the table - the last entry */
1703 struct sep_lli_entry *info_entry_ptr;
1704 /* array of pointers to page */
1705 struct sep_lli_entry *lli_array_ptr;
1706 /* points to the first entry to be processed in the lli_in_array */
1707 u32 current_entry = 0;
1708 /* num entries in the virtual buffer */
1709 u32 sep_lli_entries = 0;
1710 /* lli table pointer */
1711 struct sep_lli_entry *in_lli_table_ptr;
1712 /* the total data in one table */
1713 u32 table_data_size = 0;
1714 /* flag for last table */
1715 u32 last_table_flag = 0;
1716 /* number of entries in lli table */
1717 u32 num_entries_in_table = 0;
1718 /* next table address */
1719 u32 lli_table_alloc_addr = 0;
1721 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table start\n");
1722 dev_dbg(&sep->pdev->dev, "data_size is %x\n", data_size);
1723 dev_dbg(&sep->pdev->dev, "block_size is %x\n", block_size);
1725 /* initialize the pages pointers */
1726 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
1727 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages = 0;
1729 /* set the kernel address for first table to be allocated */
1730 lli_table_alloc_addr = (u32)(sep->shared_addr +
1731 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1732 sep->num_lli_tables_created * sizeof(struct sep_lli_entry) *
1733 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1735 if (data_size == 0) {
1736 /* special case - create meptu table - 2 entries, zero data */
1737 sep_prepare_empty_lli_table(sep, lli_table_ptr,
1738 num_entries_ptr, table_data_size_ptr);
1739 goto update_dcb_counter;
1742 /* check if the pages are in Kernel Virtual Address layout */
1743 if (is_kva == true)
1744 /* lock the pages in the kernel */
1745 error = sep_lock_kernel_pages(sep, app_virt_addr,
1746 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1747 else
1749 * lock the pages of the user buffer
1750 * and translate them to pages
1752 error = sep_lock_user_pages(sep, app_virt_addr,
1753 data_size, &lli_array_ptr, SEP_DRIVER_IN_FLAG);
1755 if (error)
1756 goto end_function;
1758 dev_dbg(&sep->pdev->dev, "output sep_in_num_pages is %x\n",
1759 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
1761 current_entry = 0;
1762 info_entry_ptr = 0;
1764 sep_lli_entries = sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages;
1766 /* loop till all the entries in in array are not processed */
1767 while (current_entry < sep_lli_entries) {
1769 /* set the new input and output tables */
1770 in_lli_table_ptr =
1771 (struct sep_lli_entry *)lli_table_alloc_addr;
1773 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1774 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1776 if (lli_table_alloc_addr >
1777 ((u32)sep->shared_addr +
1778 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1779 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1781 error = -ENOMEM;
1782 goto end_function_error;
1786 /* update the number of created tables */
1787 sep->num_lli_tables_created++;
1789 /* calculate the maximum size of data for input table */
1790 table_data_size = sep_calculate_lli_table_max_size(sep,
1791 &lli_array_ptr[current_entry],
1792 (sep_lli_entries - current_entry),
1793 &last_table_flag);
1796 * if this is not the last table -
1797 * then allign it to the block size
1799 if (!last_table_flag)
1800 table_data_size =
1801 (table_data_size / block_size) * block_size;
1803 dev_dbg(&sep->pdev->dev, "output table_data_size is %x\n",
1804 table_data_size);
1806 /* construct input lli table */
1807 sep_build_lli_table(sep, &lli_array_ptr[current_entry],
1808 in_lli_table_ptr,
1809 &current_entry, &num_entries_in_table, table_data_size);
1811 if (info_entry_ptr == 0) {
1813 /* set the output parameters to physical addresses */
1814 *lli_table_ptr = sep_shared_area_virt_to_bus(sep,
1815 in_lli_table_ptr);
1816 *num_entries_ptr = num_entries_in_table;
1817 *table_data_size_ptr = table_data_size;
1819 dev_dbg(&sep->pdev->dev,
1820 "output lli_table_in_ptr is %08lx\n",
1821 (unsigned long)*lli_table_ptr);
1823 } else {
1824 /* update the info entry of the previous in table */
1825 info_entry_ptr->bus_address =
1826 sep_shared_area_virt_to_bus(sep,
1827 in_lli_table_ptr);
1828 info_entry_ptr->block_size =
1829 ((num_entries_in_table) << 24) |
1830 (table_data_size);
1832 /* save the pointer to the info entry of the current tables */
1833 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1835 /* print input tables */
1836 sep_debug_print_lli_tables(sep, (struct sep_lli_entry *)
1837 sep_shared_area_bus_to_virt(sep, *lli_table_ptr),
1838 *num_entries_ptr, *table_data_size_ptr);
1839 /* the array of the pages */
1840 kfree(lli_array_ptr);
1842 update_dcb_counter:
1843 /* update dcb counter */
1844 sep->nr_dcb_creat++;
1845 goto end_function;
1847 end_function_error:
1848 /* free all the allocated resources */
1849 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
1850 kfree(lli_array_ptr);
1851 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
1853 end_function:
1854 dev_dbg(&sep->pdev->dev, "sep_prepare_input_dma_table end\n");
1855 return error;
1859 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1860 * @sep: pointer to struct_sep
1861 * @lli_in_array:
1862 * @sep_in_lli_entries:
1863 * @lli_out_array:
1864 * @sep_out_lli_entries
1865 * @block_size
1866 * @lli_table_in_ptr
1867 * @lli_table_out_ptr
1868 * @in_num_entries_ptr
1869 * @out_num_entries_ptr
1870 * @table_data_size_ptr
1872 * This function creates the input and output dma tables for
1873 * symmetric operations (AES/DES) according to the block
1874 * size from LLI arays
1875 * Note that all bus addresses that are passed to the sep
1876 * are in 32 bit format; the SEP is a 32 bit device
1878 static int sep_construct_dma_tables_from_lli(
1879 struct sep_device *sep,
1880 struct sep_lli_entry *lli_in_array,
1881 u32 sep_in_lli_entries,
1882 struct sep_lli_entry *lli_out_array,
1883 u32 sep_out_lli_entries,
1884 u32 block_size,
1885 dma_addr_t *lli_table_in_ptr,
1886 dma_addr_t *lli_table_out_ptr,
1887 u32 *in_num_entries_ptr,
1888 u32 *out_num_entries_ptr,
1889 u32 *table_data_size_ptr)
1891 /* points to the area where next lli table can be allocated */
1892 u32 lli_table_alloc_addr = 0;
1893 /* input lli table */
1894 struct sep_lli_entry *in_lli_table_ptr = 0;
1895 /* output lli table */
1896 struct sep_lli_entry *out_lli_table_ptr = 0;
1897 /* pointer to the info entry of the table - the last entry */
1898 struct sep_lli_entry *info_in_entry_ptr = 0;
1899 /* pointer to the info entry of the table - the last entry */
1900 struct sep_lli_entry *info_out_entry_ptr = 0;
1901 /* points to the first entry to be processed in the lli_in_array */
1902 u32 current_in_entry = 0;
1903 /* points to the first entry to be processed in the lli_out_array */
1904 u32 current_out_entry = 0;
1905 /* max size of the input table */
1906 u32 in_table_data_size = 0;
1907 /* max size of the output table */
1908 u32 out_table_data_size = 0;
1909 /* flag te signifies if this is the last tables build */
1910 u32 last_table_flag = 0;
1911 /* the data size that should be in table */
1912 u32 table_data_size = 0;
1913 /* number of etnries in the input table */
1914 u32 num_entries_in_table = 0;
1915 /* number of etnries in the output table */
1916 u32 num_entries_out_table = 0;
1918 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli start\n");
1920 /* initiate to point after the message area */
1921 lli_table_alloc_addr = (u32)(sep->shared_addr +
1922 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1923 (sep->num_lli_tables_created *
1924 (sizeof(struct sep_lli_entry) *
1925 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP)));
1927 /* loop till all the entries in in array are not processed */
1928 while (current_in_entry < sep_in_lli_entries) {
1929 /* set the new input and output tables */
1930 in_lli_table_ptr =
1931 (struct sep_lli_entry *)lli_table_alloc_addr;
1933 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1934 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1936 /* set the first output tables */
1937 out_lli_table_ptr =
1938 (struct sep_lli_entry *)lli_table_alloc_addr;
1940 /* check if the DMA table area limit was overrun */
1941 if ((lli_table_alloc_addr + sizeof(struct sep_lli_entry) *
1942 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP) >
1943 ((u32)sep->shared_addr +
1944 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES +
1945 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES)) {
1947 dev_warn(&sep->pdev->dev, "dma table limit overrun\n");
1948 return -ENOMEM;
1951 /* update the number of the lli tables created */
1952 sep->num_lli_tables_created += 2;
1954 lli_table_alloc_addr += sizeof(struct sep_lli_entry) *
1955 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1957 /* calculate the maximum size of data for input table */
1958 in_table_data_size =
1959 sep_calculate_lli_table_max_size(sep,
1960 &lli_in_array[current_in_entry],
1961 (sep_in_lli_entries - current_in_entry),
1962 &last_table_flag);
1964 /* calculate the maximum size of data for output table */
1965 out_table_data_size =
1966 sep_calculate_lli_table_max_size(sep,
1967 &lli_out_array[current_out_entry],
1968 (sep_out_lli_entries - current_out_entry),
1969 &last_table_flag);
1971 dev_dbg(&sep->pdev->dev,
1972 "in_table_data_size is %x\n",
1973 in_table_data_size);
1975 dev_dbg(&sep->pdev->dev,
1976 "out_table_data_size is %x\n",
1977 out_table_data_size);
1979 table_data_size = in_table_data_size;
1981 if (!last_table_flag) {
1983 * if this is not the last table,
1984 * then must check where the data is smallest
1985 * and then align it to the block size
1987 if (table_data_size > out_table_data_size)
1988 table_data_size = out_table_data_size;
1991 * now calculate the table size so that
1992 * it will be module block size
1994 table_data_size = (table_data_size / block_size) *
1995 block_size;
1998 dev_dbg(&sep->pdev->dev, "table_data_size is %x\n",
1999 table_data_size);
2001 /* construct input lli table */
2002 sep_build_lli_table(sep, &lli_in_array[current_in_entry],
2003 in_lli_table_ptr,
2004 &current_in_entry,
2005 &num_entries_in_table,
2006 table_data_size);
2008 /* construct output lli table */
2009 sep_build_lli_table(sep, &lli_out_array[current_out_entry],
2010 out_lli_table_ptr,
2011 &current_out_entry,
2012 &num_entries_out_table,
2013 table_data_size);
2015 /* if info entry is null - this is the first table built */
2016 if (info_in_entry_ptr == 0) {
2017 /* set the output parameters to physical addresses */
2018 *lli_table_in_ptr =
2019 sep_shared_area_virt_to_bus(sep, in_lli_table_ptr);
2021 *in_num_entries_ptr = num_entries_in_table;
2023 *lli_table_out_ptr =
2024 sep_shared_area_virt_to_bus(sep,
2025 out_lli_table_ptr);
2027 *out_num_entries_ptr = num_entries_out_table;
2028 *table_data_size_ptr = table_data_size;
2030 dev_dbg(&sep->pdev->dev,
2031 "output lli_table_in_ptr is %08lx\n",
2032 (unsigned long)*lli_table_in_ptr);
2033 dev_dbg(&sep->pdev->dev,
2034 "output lli_table_out_ptr is %08lx\n",
2035 (unsigned long)*lli_table_out_ptr);
2036 } else {
2037 /* update the info entry of the previous in table */
2038 info_in_entry_ptr->bus_address =
2039 sep_shared_area_virt_to_bus(sep,
2040 in_lli_table_ptr);
2042 info_in_entry_ptr->block_size =
2043 ((num_entries_in_table) << 24) |
2044 (table_data_size);
2046 /* update the info entry of the previous in table */
2047 info_out_entry_ptr->bus_address =
2048 sep_shared_area_virt_to_bus(sep,
2049 out_lli_table_ptr);
2051 info_out_entry_ptr->block_size =
2052 ((num_entries_out_table) << 24) |
2053 (table_data_size);
2055 dev_dbg(&sep->pdev->dev,
2056 "output lli_table_in_ptr:%08lx %08x\n",
2057 (unsigned long)info_in_entry_ptr->bus_address,
2058 info_in_entry_ptr->block_size);
2060 dev_dbg(&sep->pdev->dev,
2061 "output lli_table_out_ptr:%08lx %08x\n",
2062 (unsigned long)info_out_entry_ptr->bus_address,
2063 info_out_entry_ptr->block_size);
2066 /* save the pointer to the info entry of the current tables */
2067 info_in_entry_ptr = in_lli_table_ptr +
2068 num_entries_in_table - 1;
2069 info_out_entry_ptr = out_lli_table_ptr +
2070 num_entries_out_table - 1;
2072 dev_dbg(&sep->pdev->dev,
2073 "output num_entries_out_table is %x\n",
2074 (u32)num_entries_out_table);
2075 dev_dbg(&sep->pdev->dev,
2076 "output info_in_entry_ptr is %lx\n",
2077 (unsigned long)info_in_entry_ptr);
2078 dev_dbg(&sep->pdev->dev,
2079 "output info_out_entry_ptr is %lx\n",
2080 (unsigned long)info_out_entry_ptr);
2083 /* print input tables */
2084 sep_debug_print_lli_tables(sep,
2085 (struct sep_lli_entry *)
2086 sep_shared_area_bus_to_virt(sep, *lli_table_in_ptr),
2087 *in_num_entries_ptr,
2088 *table_data_size_ptr);
2090 /* print output tables */
2091 sep_debug_print_lli_tables(sep,
2092 (struct sep_lli_entry *)
2093 sep_shared_area_bus_to_virt(sep, *lli_table_out_ptr),
2094 *out_num_entries_ptr,
2095 *table_data_size_ptr);
2097 dev_dbg(&sep->pdev->dev, "sep_construct_dma_tables_from_lli end\n");
2098 return 0;
2102 * sep_prepare_input_output_dma_table - prepare DMA I/O table
2103 * @app_virt_in_addr:
2104 * @app_virt_out_addr:
2105 * @data_size:
2106 * @block_size:
2107 * @lli_table_in_ptr:
2108 * @lli_table_out_ptr:
2109 * @in_num_entries_ptr:
2110 * @out_num_entries_ptr:
2111 * @table_data_size_ptr:
2112 * @is_kva: set for kernel data; used only for kernel crypto module
2114 * This function builds input and output DMA tables for synhronic
2115 * symmetric operations (AES, DES, HASH). It also checks that each table
2116 * is of the modular block size
2117 * Note that all bus addresses that are passed to the sep
2118 * are in 32 bit format; the SEP is a 32 bit device
2120 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
2121 unsigned long app_virt_in_addr,
2122 unsigned long app_virt_out_addr,
2123 u32 data_size,
2124 u32 block_size,
2125 dma_addr_t *lli_table_in_ptr,
2126 dma_addr_t *lli_table_out_ptr,
2127 u32 *in_num_entries_ptr,
2128 u32 *out_num_entries_ptr,
2129 u32 *table_data_size_ptr,
2130 bool is_kva)
2133 int error = 0;
2134 /* array of pointers of page */
2135 struct sep_lli_entry *lli_in_array;
2136 /* array of pointers of page */
2137 struct sep_lli_entry *lli_out_array;
2139 dev_dbg(&sep->pdev->dev, "sep_prepare_input_output_dma_table start\n");
2141 if (data_size == 0) {
2142 /* prepare empty table for input and output */
2143 sep_prepare_empty_lli_table(sep, lli_table_in_ptr,
2144 in_num_entries_ptr, table_data_size_ptr);
2146 sep_prepare_empty_lli_table(sep, lli_table_out_ptr,
2147 out_num_entries_ptr, table_data_size_ptr);
2149 goto update_dcb_counter;
2152 /* initialize the pages pointers */
2153 sep->dma_res_arr[sep->nr_dcb_creat].in_page_array = 0;
2154 sep->dma_res_arr[sep->nr_dcb_creat].out_page_array = 0;
2156 /* lock the pages of the buffer and translate them to pages */
2157 if (is_kva == true) {
2158 error = sep_lock_kernel_pages(sep, app_virt_in_addr,
2159 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2161 if (error) {
2162 dev_warn(&sep->pdev->dev,
2163 "lock kernel for in failed\n");
2164 goto end_function;
2167 error = sep_lock_kernel_pages(sep, app_virt_out_addr,
2168 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2170 if (error) {
2171 dev_warn(&sep->pdev->dev,
2172 "lock kernel for out failed\n");
2173 goto end_function;
2177 else {
2178 error = sep_lock_user_pages(sep, app_virt_in_addr,
2179 data_size, &lli_in_array, SEP_DRIVER_IN_FLAG);
2180 if (error) {
2181 dev_warn(&sep->pdev->dev,
2182 "sep_lock_user_pages for input virtual buffer failed\n");
2183 goto end_function;
2186 error = sep_lock_user_pages(sep, app_virt_out_addr,
2187 data_size, &lli_out_array, SEP_DRIVER_OUT_FLAG);
2189 if (error) {
2190 dev_warn(&sep->pdev->dev,
2191 "sep_lock_user_pages for output virtual buffer failed\n");
2192 goto end_function_free_lli_in;
2196 dev_dbg(&sep->pdev->dev, "sep_in_num_pages is %x\n",
2197 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages);
2198 dev_dbg(&sep->pdev->dev, "sep_out_num_pages is %x\n",
2199 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages);
2200 dev_dbg(&sep->pdev->dev, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
2201 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
2203 /* call the fucntion that creates table from the lli arrays */
2204 error = sep_construct_dma_tables_from_lli(sep, lli_in_array,
2205 sep->dma_res_arr[sep->nr_dcb_creat].in_num_pages,
2206 lli_out_array,
2207 sep->dma_res_arr[sep->nr_dcb_creat].out_num_pages,
2208 block_size, lli_table_in_ptr, lli_table_out_ptr,
2209 in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
2211 if (error) {
2212 dev_warn(&sep->pdev->dev,
2213 "sep_construct_dma_tables_from_lli failed\n");
2214 goto end_function_with_error;
2217 kfree(lli_out_array);
2218 kfree(lli_in_array);
2220 update_dcb_counter:
2221 /* update dcb counter */
2222 sep->nr_dcb_creat++;
2223 /* fall through - free the lli entry arrays */
2224 dev_dbg(&sep->pdev->dev, "in_num_entries_ptr is %08x\n",
2225 *in_num_entries_ptr);
2226 dev_dbg(&sep->pdev->dev, "out_num_entries_ptr is %08x\n",
2227 *out_num_entries_ptr);
2228 dev_dbg(&sep->pdev->dev, "table_data_size_ptr is %08x\n",
2229 *table_data_size_ptr);
2231 goto end_function;
2233 end_function_with_error:
2234 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_map_array);
2235 kfree(sep->dma_res_arr[sep->nr_dcb_creat].out_page_array);
2236 kfree(lli_out_array);
2239 end_function_free_lli_in:
2240 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_map_array);
2241 kfree(sep->dma_res_arr[sep->nr_dcb_creat].in_page_array);
2242 kfree(lli_in_array);
2244 end_function:
2245 dev_dbg(&sep->pdev->dev,
2246 "sep_prepare_input_output_dma_table end result = %d\n", error);
2248 return error;
2253 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2254 * @app_in_address: unsigned long; for data buffer in (user space)
2255 * @app_out_address: unsigned long; for data buffer out (user space)
2256 * @data_in_size: u32; for size of data
2257 * @block_size: u32; for block size
2258 * @tail_block_size: u32; for size of tail block
2259 * @isapplet: bool; to indicate external app
2260 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2262 * This function prepares the linked dma tables and puts the
2263 * address for the linked list of tables inta a dcb (data control
2264 * block) the address of which is known by the sep hardware
2265 * Note that all bus addresses that are passed to the sep
2266 * are in 32 bit format; the SEP is a 32 bit device
2268 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device *sep,
2269 u32 app_in_address,
2270 u32 app_out_address,
2271 u32 data_in_size,
2272 u32 block_size,
2273 u32 tail_block_size,
2274 bool isapplet,
2275 bool is_kva)
2277 int error = 0;
2278 /* size of tail */
2279 u32 tail_size = 0;
2280 /* address of the created dcb table */
2281 struct sep_dcblock *dcb_table_ptr = 0;
2282 /* the physical address of the first input DMA table */
2283 dma_addr_t in_first_mlli_address = 0;
2284 /* number of entries in the first input DMA table */
2285 u32 in_first_num_entries = 0;
2286 /* the physical address of the first output DMA table */
2287 dma_addr_t out_first_mlli_address = 0;
2288 /* number of entries in the first output DMA table */
2289 u32 out_first_num_entries = 0;
2290 /* data in the first input/output table */
2291 u32 first_data_size = 0;
2293 dev_dbg(&sep->pdev->dev, "prepare_input_output_dma_table_in_dcb start\n");
2295 if (sep->nr_dcb_creat == SEP_MAX_NUM_SYNC_DMA_OPS) {
2296 /* No more DCBS to allocate */
2297 dev_warn(&sep->pdev->dev, "no more dcb's available\n");
2298 error = -ENOSPC;
2299 goto end_function;
2302 /* allocate new DCB */
2303 dcb_table_ptr = (struct sep_dcblock *)(sep->shared_addr +
2304 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES +
2305 (sep->nr_dcb_creat * sizeof(struct sep_dcblock)));
2307 /* set the default values in the dcb */
2308 dcb_table_ptr->input_mlli_address = 0;
2309 dcb_table_ptr->input_mlli_num_entries = 0;
2310 dcb_table_ptr->input_mlli_data_size = 0;
2311 dcb_table_ptr->output_mlli_address = 0;
2312 dcb_table_ptr->output_mlli_num_entries = 0;
2313 dcb_table_ptr->output_mlli_data_size = 0;
2314 dcb_table_ptr->tail_data_size = 0;
2315 dcb_table_ptr->out_vr_tail_pt = 0;
2317 if (isapplet == true) {
2318 tail_size = data_in_size % block_size;
2319 if (tail_size) {
2320 if (data_in_size < tail_block_size) {
2321 dev_warn(&sep->pdev->dev, "data in size smaller than tail block size\n");
2322 error = -ENOSPC;
2323 goto end_function;
2325 if (tail_block_size)
2327 * case the tail size should be
2328 * bigger than the real block size
2330 tail_size = tail_block_size +
2331 ((data_in_size -
2332 tail_block_size) % block_size);
2335 /* check if there is enough data for dma operation */
2336 if (data_in_size < SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE) {
2337 if (is_kva == true) {
2338 memcpy(dcb_table_ptr->tail_data,
2339 (void *)app_in_address, data_in_size);
2340 } else {
2341 if (copy_from_user(dcb_table_ptr->tail_data,
2342 (void __user *)app_in_address,
2343 data_in_size)) {
2344 error = -EFAULT;
2345 goto end_function;
2349 dcb_table_ptr->tail_data_size = data_in_size;
2351 /* set the output user-space address for mem2mem op */
2352 if (app_out_address)
2353 dcb_table_ptr->out_vr_tail_pt =
2354 (u32)app_out_address;
2357 * Update both data length parameters in order to avoid
2358 * second data copy and allow building of empty mlli
2359 * tables
2361 tail_size = 0x0;
2362 data_in_size = 0x0;
2364 if (tail_size) {
2365 if (is_kva == true) {
2366 memcpy(dcb_table_ptr->tail_data,
2367 (void *)(app_in_address + data_in_size -
2368 tail_size), tail_size);
2369 } else {
2370 /* we have tail data - copy it to dcb */
2371 if (copy_from_user(dcb_table_ptr->tail_data,
2372 (void *)(app_in_address +
2373 data_in_size - tail_size), tail_size)) {
2374 error = -EFAULT;
2375 goto end_function;
2378 if (app_out_address)
2380 * Calculate the output address
2381 * according to tail data size
2383 dcb_table_ptr->out_vr_tail_pt =
2384 app_out_address + data_in_size
2385 - tail_size;
2387 /* Save the real tail data size */
2388 dcb_table_ptr->tail_data_size = tail_size;
2390 * Update the data size without the tail
2391 * data size AKA data for the dma
2393 data_in_size = (data_in_size - tail_size);
2396 /* check if we need to build only input table or input/output */
2397 if (app_out_address) {
2398 /* prepare input/output tables */
2399 error = sep_prepare_input_output_dma_table(sep,
2400 app_in_address,
2401 app_out_address,
2402 data_in_size,
2403 block_size,
2404 &in_first_mlli_address,
2405 &out_first_mlli_address,
2406 &in_first_num_entries,
2407 &out_first_num_entries,
2408 &first_data_size,
2409 is_kva);
2410 } else {
2411 /* prepare input tables */
2412 error = sep_prepare_input_dma_table(sep,
2413 app_in_address,
2414 data_in_size,
2415 block_size,
2416 &in_first_mlli_address,
2417 &in_first_num_entries,
2418 &first_data_size,
2419 is_kva);
2422 if (error) {
2423 dev_warn(&sep->pdev->dev, "prepare dma table call failed from prepare dcb call\n");
2424 goto end_function;
2427 /* set the dcb values */
2428 dcb_table_ptr->input_mlli_address = in_first_mlli_address;
2429 dcb_table_ptr->input_mlli_num_entries = in_first_num_entries;
2430 dcb_table_ptr->input_mlli_data_size = first_data_size;
2431 dcb_table_ptr->output_mlli_address = out_first_mlli_address;
2432 dcb_table_ptr->output_mlli_num_entries = out_first_num_entries;
2433 dcb_table_ptr->output_mlli_data_size = first_data_size;
2435 end_function:
2436 dev_dbg(&sep->pdev->dev,
2437 "sep_prepare_input_output_dma_table_in_dcb end\n");
2438 return error;
2444 * sep_create_sync_dma_tables_handler - create sync dma tables
2445 * @sep: pointer to struct sep_device
2446 * @arg: pointer to struct bld_syn_tab_struct
2448 * Handle the request for creation of the DMA tables for the synchronic
2449 * symmetric operations (AES,DES). Note that all bus addresses that are
2450 * passed to the SEP are in 32 bit format; the SEP is a 32 bit device
2452 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
2453 unsigned long arg)
2455 int error = 0;
2457 /* command arguments */
2458 struct bld_syn_tab_struct command_args;
2460 dev_dbg(&sep->pdev->dev,
2461 "sep_create_sync_dma_tables_handler start\n");
2463 if (copy_from_user(&command_args, (void __user *)arg,
2464 sizeof(struct bld_syn_tab_struct))) {
2465 error = -EFAULT;
2466 goto end_function;
2469 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2470 command_args.app_in_address);
2471 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2472 command_args.app_out_address);
2473 dev_dbg(&sep->pdev->dev, "data_size is %u\n",
2474 command_args.data_in_size);
2475 dev_dbg(&sep->pdev->dev, "block_size is %u\n",
2476 command_args.block_size);
2478 /* validate user parameters */
2479 if (!command_args.app_in_address) {
2480 error = -EINVAL;
2481 goto end_function;
2484 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2485 command_args.app_in_address,
2486 command_args.app_out_address,
2487 command_args.data_in_size,
2488 command_args.block_size,
2489 0x0,
2490 false,
2491 false);
2493 end_function:
2494 dev_dbg(&sep->pdev->dev, "sep_create_sync_dma_tables_handler end\n");
2495 return error;
2499 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2500 * @sep: pointer to struct sep_device
2501 * @isapplet: indicates external application (used for kernel access)
2502 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2504 * This function frees the dma tables and dcb block
2506 static int sep_free_dma_tables_and_dcb(struct sep_device *sep, bool isapplet,
2507 bool is_kva)
2509 int i = 0;
2510 int error = 0;
2511 int error_temp = 0;
2512 struct sep_dcblock *dcb_table_ptr;
2514 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb start\n");
2516 if (isapplet == true) {
2517 /* set pointer to first dcb table */
2518 dcb_table_ptr = (struct sep_dcblock *)
2519 (sep->shared_addr +
2520 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES);
2522 /* go over each dcb and see if tail pointer must be updated */
2523 for (i = 0; i < sep->nr_dcb_creat; i++, dcb_table_ptr++) {
2524 if (dcb_table_ptr->out_vr_tail_pt) {
2525 if (is_kva == true) {
2526 memcpy((void *)dcb_table_ptr->out_vr_tail_pt,
2527 dcb_table_ptr->tail_data,
2528 dcb_table_ptr->tail_data_size);
2529 } else {
2530 error_temp = copy_to_user(
2531 (void *)dcb_table_ptr->out_vr_tail_pt,
2532 dcb_table_ptr->tail_data,
2533 dcb_table_ptr->tail_data_size);
2535 if (error_temp) {
2536 /* release the dma resource */
2537 error = -EFAULT;
2538 break;
2543 /* free the output pages, if any */
2544 sep_free_dma_table_data_handler(sep);
2546 dev_dbg(&sep->pdev->dev, "sep_free_dma_tables_and_dcb end\n");
2547 return error;
2551 * sep_get_static_pool_addr_handler - get static pool address
2552 * @sep: pointer to struct sep_device
2553 * @arg: parameters from user space application
2555 * This function sets the bus and virtual addresses of the static pool
2556 * and returns the virtual address
2558 static int sep_get_static_pool_addr_handler(struct sep_device *sep,
2559 unsigned long arg)
2561 struct stat_pool_addr_struct command_args;
2562 u32 *static_pool_addr = 0;
2563 unsigned long addr_hold;
2565 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler start\n");
2567 static_pool_addr = (u32 *)(sep->shared_addr +
2568 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2570 static_pool_addr[0] = SEP_STATIC_POOL_VAL_TOKEN;
2571 static_pool_addr[1] = sep->shared_bus +
2572 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
2574 addr_hold = (unsigned long)
2575 (sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES);
2576 command_args.static_virt_address = (aligned_u64)addr_hold;
2578 dev_dbg(&sep->pdev->dev, "static pool: physical %x virtual %x\n",
2579 (u32)static_pool_addr[1],
2580 (u32)command_args.static_virt_address);
2582 /* send the parameters to user application */
2583 if (copy_to_user((void __user *) arg, &command_args,
2584 sizeof(struct stat_pool_addr_struct)))
2585 return -EFAULT;
2587 dev_dbg(&sep->pdev->dev, "sep_get_static_pool_addr_handler end\n");
2589 return 0;
2593 * sep_start_handler - start device
2594 * @sep: pointer to struct sep_device
2596 static int sep_start_handler(struct sep_device *sep)
2598 unsigned long reg_val;
2599 unsigned long error = 0;
2601 dev_dbg(&sep->pdev->dev, "sep_start_handler start\n");
2603 /* wait in polling for message from SEP */
2605 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2606 while (!reg_val);
2608 /* check the value */
2609 if (reg_val == 0x1)
2610 /* fatal error - read error status from GPRO */
2611 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2612 dev_dbg(&sep->pdev->dev, "sep_start_handler end\n");
2613 return error;
2617 * ep_check_sum_calc - checksum messages
2618 * @data: buffer to checksum
2619 * @length: buffer size
2621 * This function performs a checksum for messages that are sent
2622 * to the sep
2624 static u32 sep_check_sum_calc(u8 *data, u32 length)
2626 u32 sum = 0;
2627 u16 *Tdata = (u16 *)data;
2629 while (length > 1) {
2630 /* This is the inner loop */
2631 sum += *Tdata++;
2632 length -= 2;
2635 /* Add left-over byte, if any */
2636 if (length > 0)
2637 sum += *(u8 *)Tdata;
2639 /* Fold 32-bit sum to 16 bits */
2640 while (sum>>16)
2641 sum = (sum & 0xffff) + (sum >> 16);
2643 return ~sum & 0xFFFF;
2647 * sep_init_handler -
2648 * @sep: pointer to struct sep_device
2649 * @arg: parameters from user space application
2651 * Handles the request for SEP initialization
2652 * Note that this will go away for Medfield once the SCU
2653 * SEP initialization is complete
2654 * Also note that the message to the sep has components
2655 * from user space as well as components written by the driver
2656 * This is becuase the portions of the message that pertain to
2657 * physical addresses must be set by the driver after the message
2658 * leaves custody of the user space application for security
2659 * reasons.
2661 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2663 u32 message_buff[14];
2664 u32 counter;
2665 int error = 0;
2666 u32 reg_val;
2667 dma_addr_t new_base_addr;
2668 unsigned long addr_hold;
2669 struct init_struct command_args;
2671 dev_dbg(&sep->pdev->dev, "sep_init_handler start\n");
2673 /* make sure that we have not initialized already */
2674 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2676 if (reg_val != 0x2) {
2677 error = SEP_ALREADY_INITIALIZED_ERR;
2678 dev_warn(&sep->pdev->dev, "init; device already initialized\n");
2679 goto end_function;
2682 /* only root can initialize */
2683 if (!capable(CAP_SYS_ADMIN)) {
2684 error = -EACCES;
2685 goto end_function;
2688 /* copy in the parameters */
2689 error = copy_from_user(&command_args, (void __user *)arg,
2690 sizeof(struct init_struct));
2692 if (error) {
2693 error = -EFAULT;
2694 goto end_function;
2697 /* validate parameters */
2698 if (!command_args.message_addr || !command_args.sep_sram_addr ||
2699 command_args.message_size_in_words > 14) {
2700 error = -EINVAL;
2701 goto end_function;
2704 /* copy in the sep init message */
2705 addr_hold = (unsigned long)command_args.message_addr;
2706 error = copy_from_user(message_buff,
2707 (void __user *)addr_hold,
2708 command_args.message_size_in_words*sizeof(u32));
2710 if (error) {
2711 error = -EFAULT;
2712 goto end_function;
2715 /* load resident, cache, and extapp firmware */
2716 error = sep_load_firmware(sep);
2718 if (error) {
2719 dev_warn(&sep->pdev->dev,
2720 "init; copy sep init message failed %x\n", error);
2721 goto end_function;
2724 /* compute the base address */
2725 new_base_addr = sep->shared_bus;
2727 if (sep->resident_bus < new_base_addr)
2728 new_base_addr = sep->resident_bus;
2730 if (sep->cache_bus < new_base_addr)
2731 new_base_addr = sep->cache_bus;
2733 if (sep->dcache_bus < new_base_addr)
2734 new_base_addr = sep->dcache_bus;
2736 /* put physical addresses in sep message */
2737 message_buff[3] = (u32)new_base_addr;
2738 message_buff[4] = (u32)sep->shared_bus;
2739 message_buff[6] = (u32)sep->resident_bus;
2740 message_buff[7] = (u32)sep->cache_bus;
2741 message_buff[8] = (u32)sep->dcache_bus;
2743 message_buff[command_args.message_size_in_words - 1] = 0x0;
2744 message_buff[command_args.message_size_in_words - 1] =
2745 sep_check_sum_calc((u8 *)message_buff,
2746 command_args.message_size_in_words*sizeof(u32));
2748 /* debug print of message */
2749 for (counter = 0; counter < command_args.message_size_in_words;
2750 counter++)
2751 dev_dbg(&sep->pdev->dev, "init; sep message word %d is %x\n",
2752 counter, message_buff[counter]);
2754 /* tell the sep the sram address */
2755 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, command_args.sep_sram_addr);
2757 /* push the message to the sep */
2758 for (counter = 0; counter < command_args.message_size_in_words;
2759 counter++) {
2760 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR,
2761 message_buff[counter]);
2762 sep_wait_sram_write(sep);
2765 /* signal sep that message is ready and to init */
2766 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2768 /* wait for acknowledge */
2769 dev_dbg(&sep->pdev->dev, "init; waiting for msg response\n");
2772 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2773 while (!(reg_val & 0xFFFFFFFD));
2775 if (reg_val == 0x1) {
2776 dev_warn(&sep->pdev->dev, "init; device int failed\n");
2777 error = sep_read_reg(sep, 0x8060);
2778 dev_warn(&sep->pdev->dev, "init; sw monitor is %x\n", error);
2779 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2780 dev_warn(&sep->pdev->dev, "init; error is %x\n", error);
2781 goto end_function;
2783 dev_dbg(&sep->pdev->dev, "init; end CC INIT, reg_val is %x\n", reg_val);
2785 /* signal sep to zero the GPR3 */
2786 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x10);
2788 /* wait for response */
2789 dev_dbg(&sep->pdev->dev, "init; waiting for zero set response\n");
2792 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2793 while (reg_val != 0);
2795 end_function:
2796 dev_dbg(&sep->pdev->dev, "init is done\n");
2797 return error;
2801 * sep_end_transaction_handler - end transaction
2802 * @sep: pointer to struct sep_device
2804 * This API handles the end transaction request
2806 static int sep_end_transaction_handler(struct sep_device *sep)
2808 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler start\n");
2810 /* clear the data pool pointers Token */
2811 memset((void *)(sep->shared_addr +
2812 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES),
2813 0, sep->num_of_data_allocations*2*sizeof(u32));
2815 /* check that all the dma resources were freed */
2816 sep_free_dma_table_data_handler(sep);
2818 clear_bit(SEP_MMAP_LOCK_BIT, &sep->in_use_flags);
2821 * we are now through with the transaction. Let's
2822 * allow other processes who have the device open
2823 * to perform transactions
2825 mutex_lock(&sep->sep_mutex);
2826 sep->pid_doing_transaction = 0;
2827 mutex_unlock(&sep->sep_mutex);
2828 /* raise event for stuck contextes */
2829 wake_up(&sep->event);
2831 dev_dbg(&sep->pdev->dev, "waking up event\n");
2832 dev_dbg(&sep->pdev->dev, "sep_end_transaction_handler end\n");
2834 return 0;
2838 * sep_prepare_dcb_handler - prepare a control block
2839 * @sep: pointer to struct sep_device
2840 * @arg: pointer to user parameters
2842 * This function will retrieve the RAR buffer physical addresses, type
2843 * & size corresponding to the RAR handles provided in the buffers vector.
2845 static int sep_prepare_dcb_handler(struct sep_device *sep, unsigned long arg)
2847 /* error */
2848 int error;
2849 /* command arguments */
2850 struct build_dcb_struct command_args;
2852 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2854 /* Get the command arguments */
2855 if (copy_from_user(&command_args, (void __user *)arg,
2856 sizeof(struct build_dcb_struct))) {
2857 error = -EFAULT;
2858 goto end_function;
2861 dev_dbg(&sep->pdev->dev, "app_in_address is %08llx\n",
2862 command_args.app_in_address);
2863 dev_dbg(&sep->pdev->dev, "app_out_address is %08llx\n",
2864 command_args.app_out_address);
2865 dev_dbg(&sep->pdev->dev, "data_size is %x\n",
2866 command_args.data_in_size);
2867 dev_dbg(&sep->pdev->dev, "block_size is %x\n",
2868 command_args.block_size);
2869 dev_dbg(&sep->pdev->dev, "tail block_size is %x\n",
2870 command_args.tail_block_size);
2872 error = sep_prepare_input_output_dma_table_in_dcb(sep,
2873 command_args.app_in_address, command_args.app_out_address,
2874 command_args.data_in_size, command_args.block_size,
2875 command_args.tail_block_size, true, false);
2877 end_function:
2878 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler end\n");
2879 return error;
2884 * sep_free_dcb_handler - free control block resources
2885 * @sep: pointer to struct sep_device
2887 * This function frees the DCB resources and updates the needed
2888 * user-space buffers.
2890 static int sep_free_dcb_handler(struct sep_device *sep)
2892 int error ;
2894 dev_dbg(&sep->pdev->dev, "sep_prepare_dcb_handler start\n");
2895 dev_dbg(&sep->pdev->dev, "num of DCBs %x\n", sep->nr_dcb_creat);
2897 error = sep_free_dma_tables_and_dcb(sep, false, false);
2899 dev_dbg(&sep->pdev->dev, "sep_free_dcb_handler end\n");
2900 return error;
2904 * sep_rar_prepare_output_msg_handler - prepare an output message
2905 * @sep: pointer to struct sep_device
2906 * @arg: pointer to user parameters
2908 * This function will retrieve the RAR buffer physical addresses, type
2909 * & size corresponding to the RAR handles provided in the buffers vector.
2911 static int sep_rar_prepare_output_msg_handler(struct sep_device *sep,
2912 unsigned long arg)
2914 int error = 0;
2915 /* command args */
2916 struct rar_hndl_to_bus_struct command_args;
2917 struct RAR_buffer rar_buf;
2918 /* bus address */
2919 dma_addr_t rar_bus = 0;
2920 /* holds the RAR address in the system memory offset */
2921 u32 *rar_addr;
2923 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2925 /* copy the data */
2926 if (copy_from_user(&command_args, (void __user *)arg,
2927 sizeof(command_args))) {
2928 error = -EFAULT;
2929 goto end_function;
2932 /* call to translation function only if user handle is not NULL */
2933 if (command_args.rar_handle) {
2934 memset(&rar_buf, 0, sizeof(rar_buf));
2935 rar_buf.info.handle = (u32)command_args.rar_handle;
2937 if (rar_handle_to_bus(&rar_buf, 1) != 1) {
2938 dev_dbg(&sep->pdev->dev, "rar_handle_to_bus failure\n");
2939 error = -EFAULT;
2940 goto end_function;
2942 rar_bus = rar_buf.bus_address;
2944 dev_dbg(&sep->pdev->dev, "rar msg; rar_addr_bus = %x\n", (u32)rar_bus);
2946 /* set value in the SYSTEM MEMORY offset */
2947 rar_addr = (u32 *)(sep->shared_addr +
2948 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES);
2950 /* copy the physical address to the System Area for the sep */
2951 rar_addr[0] = SEP_RAR_VAL_TOKEN;
2952 rar_addr[1] = rar_bus;
2954 end_function:
2955 dev_dbg(&sep->pdev->dev, "sep_rar_prepare_output_msg_handler start\n");
2956 return error;
2960 * sep_realloc_ext_cache_handler - report location of extcache
2961 * @sep: pointer to struct sep_device
2962 * @arg: pointer to user parameters
2964 * This function tells the sep where the extapp is located
2966 static int sep_realloc_ext_cache_handler(struct sep_device *sep,
2967 unsigned long arg)
2969 /* holds the new ext cache address in the system memory offset */
2970 u32 *system_addr;
2972 /* set value in the SYSTEM MEMORY offset */
2973 system_addr = (u32 *)(sep->shared_addr +
2974 SEP_DRIVER_SYSTEM_EXT_CACHE_ADDR_OFFSET_IN_BYTES);
2976 /* copy the physical address to the System Area for the sep */
2977 system_addr[0] = SEP_EXT_CACHE_ADDR_VAL_TOKEN;
2978 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 0 is %x\n",
2979 system_addr[0]);
2980 system_addr[1] = sep->extapp_bus;
2981 dev_dbg(&sep->pdev->dev, "ext cache init; system addr 1 is %x\n",
2982 system_addr[1]);
2984 return 0;
2988 * sep_ioctl - ioctl api
2989 * @filp: pointer to struct file
2990 * @cmd: command
2991 * @arg: pointer to argument structure
2993 * Implement the ioctl methods availble on the SEP device.
2995 static long sep_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
2997 int error = 0;
2998 struct sep_device *sep = filp->private_data;
3000 dev_dbg(&sep->pdev->dev, "ioctl start\n");
3002 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
3003 dev_dbg(&sep->pdev->dev,
3004 "SEP_IOCSENDSEPCOMMAND is %x\n", SEP_IOCSENDSEPCOMMAND);
3005 dev_dbg(&sep->pdev->dev,
3006 "SEP_IOCALLOCDATAPOLL is %x\n", SEP_IOCALLOCDATAPOLL);
3007 dev_dbg(&sep->pdev->dev,
3008 "SEP_IOCCREATESYMDMATABLE is %x\n", SEP_IOCCREATESYMDMATABLE);
3009 dev_dbg(&sep->pdev->dev,
3010 "SEP_IOCFREEDMATABLEDATA is %x\n", SEP_IOCFREEDMATABLEDATA);
3011 dev_dbg(&sep->pdev->dev,
3012 "SEP_IOCSEPSTART is %x\n", SEP_IOCSEPSTART);
3013 dev_dbg(&sep->pdev->dev,
3014 "SEP_IOCSEPINIT is %x\n", SEP_IOCSEPINIT);
3015 dev_dbg(&sep->pdev->dev,
3016 "SEP_IOCGETSTATICPOOLADDR is %x\n", SEP_IOCGETSTATICPOOLADDR);
3017 dev_dbg(&sep->pdev->dev,
3018 "SEP_IOCENDTRANSACTION is %x\n", SEP_IOCENDTRANSACTION);
3019 dev_dbg(&sep->pdev->dev,
3020 "SEP_IOCREALLOCEXTCACHE is %x\n", SEP_IOCREALLOCEXTCACHE);
3021 dev_dbg(&sep->pdev->dev,
3022 "SEP_IOCRARPREPAREMESSAGE is %x\n", SEP_IOCRARPREPAREMESSAGE);
3023 dev_dbg(&sep->pdev->dev,
3024 "SEP_IOCPREPAREDCB is %x\n", SEP_IOCPREPAREDCB);
3025 dev_dbg(&sep->pdev->dev,
3026 "SEP_IOCFREEDCB is %x\n", SEP_IOCFREEDCB);
3028 /* make sure we own this device */
3029 mutex_lock(&sep->sep_mutex);
3030 if ((current->pid != sep->pid_doing_transaction) &&
3031 (sep->pid_doing_transaction != 0)) {
3032 dev_dbg(&sep->pdev->dev, "ioctl pid is not owner\n");
3033 mutex_unlock(&sep->sep_mutex);
3034 error = -EACCES;
3035 goto end_function;
3038 mutex_unlock(&sep->sep_mutex);
3040 /* check that the command is for sep device */
3041 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3042 error = -ENOTTY;
3043 goto end_function;
3046 /* lock to prevent the daemon to interfere with operation */
3047 mutex_lock(&sep->ioctl_mutex);
3049 switch (cmd) {
3050 case SEP_IOCSENDSEPCOMMAND:
3051 /* send command to SEP */
3052 error = sep_send_command_handler(sep);
3053 break;
3054 case SEP_IOCALLOCDATAPOLL:
3055 /* allocate data pool */
3056 error = sep_allocate_data_pool_memory_handler(sep, arg);
3057 break;
3058 case SEP_IOCCREATESYMDMATABLE:
3059 /* create dma table for synhronic operation */
3060 error = sep_create_sync_dma_tables_handler(sep, arg);
3061 break;
3062 case SEP_IOCFREEDMATABLEDATA:
3063 /* free the pages */
3064 error = sep_free_dma_table_data_handler(sep);
3065 break;
3066 case SEP_IOCSEPSTART:
3067 /* start command to sep */
3068 if (sep->pdev->revision == 0) /* only for old chip */
3069 error = sep_start_handler(sep);
3070 else
3071 error = -EPERM; /* not permitted on new chip */
3072 break;
3073 case SEP_IOCSEPINIT:
3074 /* init command to sep */
3075 if (sep->pdev->revision == 0) /* only for old chip */
3076 error = sep_init_handler(sep, arg);
3077 else
3078 error = -EPERM; /* not permitted on new chip */
3079 break;
3080 case SEP_IOCGETSTATICPOOLADDR:
3081 /* get the physical and virtual addresses of the static pool */
3082 error = sep_get_static_pool_addr_handler(sep, arg);
3083 break;
3084 case SEP_IOCENDTRANSACTION:
3085 error = sep_end_transaction_handler(sep);
3086 break;
3087 case SEP_IOCREALLOCEXTCACHE:
3088 if (sep->mrst)
3089 error = -ENODEV;
3090 if (sep->pdev->revision == 0) /* only for old chip */
3091 error = sep_realloc_ext_cache_handler(sep, arg);
3092 else
3093 error = -EPERM; /* not permitted on new chip */
3094 break;
3095 case SEP_IOCRARPREPAREMESSAGE:
3096 error = sep_rar_prepare_output_msg_handler(sep, arg);
3097 break;
3098 case SEP_IOCPREPAREDCB:
3099 error = sep_prepare_dcb_handler(sep, arg);
3100 break;
3101 case SEP_IOCFREEDCB:
3102 error = sep_free_dcb_handler(sep);
3103 break;
3104 default:
3105 dev_dbg(&sep->pdev->dev, "invalid ioctl %x\n", cmd);
3106 error = -ENOTTY;
3107 break;
3109 mutex_unlock(&sep->ioctl_mutex);
3111 end_function:
3112 dev_dbg(&sep->pdev->dev, "ioctl end\n");
3113 return error;
3117 * sep_singleton_ioctl - ioctl api for singleton interface
3118 * @filp: pointer to struct file
3119 * @cmd: command
3120 * @arg: pointer to argument structure
3122 * Implement the additional ioctls for the singleton device
3124 static long sep_singleton_ioctl(struct file *filp, u32 cmd, unsigned long arg)
3126 /* error */
3127 long error = 0;
3128 struct sep_device *sep = filp->private_data;
3130 dev_dbg(&sep->pdev->dev, "singleton_ioctl start\n");
3131 dev_dbg(&sep->pdev->dev, "cmd is %x\n", cmd);
3133 /* check that the command is for sep device */
3134 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3135 error = -ENOTTY;
3136 goto end_function;
3139 /* make sure we own this device */
3140 mutex_lock(&sep->sep_mutex);
3141 if ((current->pid != sep->pid_doing_transaction) &&
3142 (sep->pid_doing_transaction != 0)) {
3143 dev_dbg(&sep->pdev->dev, "singleton ioctl pid is not owner\n");
3144 mutex_unlock(&sep->sep_mutex);
3145 error = -EACCES;
3146 goto end_function;
3149 mutex_unlock(&sep->sep_mutex);
3151 switch (cmd) {
3152 case SEP_IOCTLSETCALLERID:
3153 mutex_lock(&sep->ioctl_mutex);
3154 error = sep_set_caller_id_handler(sep, arg);
3155 mutex_unlock(&sep->ioctl_mutex);
3156 break;
3157 default:
3158 error = sep_ioctl(filp, cmd, arg);
3159 break;
3162 end_function:
3163 dev_dbg(&sep->pdev->dev, "singleton ioctl end\n");
3164 return error;
3168 * sep_request_daemon_ioctl - ioctl for daemon
3169 * @filp: pointer to struct file
3170 * @cmd: command
3171 * @arg: pointer to argument structure
3173 * Called by the request daemon to perform ioctls on the daemon device
3175 static long sep_request_daemon_ioctl(struct file *filp, u32 cmd,
3176 unsigned long arg)
3179 long error;
3180 struct sep_device *sep = filp->private_data;
3182 dev_dbg(&sep->pdev->dev, "daemon ioctl: start\n");
3183 dev_dbg(&sep->pdev->dev, "daemon ioctl: cmd is %x\n", cmd);
3185 /* check that the command is for sep device */
3186 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER) {
3187 error = -ENOTTY;
3188 goto end_function;
3191 /* only one process can access ioctl at any given time */
3192 mutex_lock(&sep->ioctl_mutex);
3194 switch (cmd) {
3195 case SEP_IOCSENDSEPRPLYCOMMAND:
3196 /* send reply command to SEP */
3197 error = sep_req_daemon_send_reply_command_handler(sep);
3198 break;
3199 case SEP_IOCENDTRANSACTION:
3201 * end req daemon transaction, do nothing
3202 * will be removed upon update in middleware
3203 * API library
3205 error = 0;
3206 break;
3207 default:
3208 dev_dbg(&sep->pdev->dev, "daemon ioctl: no such IOCTL\n");
3209 error = -ENOTTY;
3211 mutex_unlock(&sep->ioctl_mutex);
3213 end_function:
3214 dev_dbg(&sep->pdev->dev, "daemon ioctl: end\n");
3215 return error;
3220 * sep_inthandler - Interrupt Handler
3221 * @irq: interrupt
3222 * @dev_id: device id
3224 static irqreturn_t sep_inthandler(int irq, void *dev_id)
3226 irqreturn_t int_error = IRQ_HANDLED;
3227 unsigned long lck_flags;
3228 u32 reg_val, reg_val2 = 0;
3229 struct sep_device *sep = dev_id;
3231 /* read the IRR register to check if this is SEP interrupt */
3232 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
3233 dev_dbg(&sep->pdev->dev, "SEP Interrupt - reg is %08x\n", reg_val);
3235 if (reg_val & (0x1 << 13)) {
3236 /* lock and update the counter of reply messages */
3237 spin_lock_irqsave(&sep->snd_rply_lck, lck_flags);
3238 sep->reply_ct++;
3239 spin_unlock_irqrestore(&sep->snd_rply_lck, lck_flags);
3241 dev_dbg(&sep->pdev->dev, "sep int: send_ct %lx reply_ct %lx\n",
3242 sep->send_ct, sep->reply_ct);
3244 /* is this printf or daemon request? */
3245 reg_val2 = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
3246 dev_dbg(&sep->pdev->dev,
3247 "SEP Interrupt - reg2 is %08x\n", reg_val2);
3249 if ((reg_val2 >> 30) & 0x1) {
3250 dev_dbg(&sep->pdev->dev, "int: printf request\n");
3251 wake_up(&sep->event_request_daemon);
3252 } else if (reg_val2 >> 31) {
3253 dev_dbg(&sep->pdev->dev, "int: daemon request\n");
3254 wake_up(&sep->event_request_daemon);
3255 } else {
3256 dev_dbg(&sep->pdev->dev, "int: sep reply\n");
3257 wake_up(&sep->event);
3259 } else {
3260 dev_dbg(&sep->pdev->dev, "int: not sep interrupt\n");
3261 int_error = IRQ_NONE;
3263 if (int_error == IRQ_HANDLED)
3264 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
3266 return int_error;
3270 * sep_callback - RAR callback
3271 * @sep_context_pointer: pointer to struct sep_device
3273 * Function that is called by rar_register when it is ready with
3274 * a region (only for Moorestown)
3276 static int sep_callback(unsigned long sep_context_pointer)
3278 int error;
3279 struct sep_device *sep = (struct sep_device *)sep_context_pointer;
3280 dma_addr_t rar_end_address;
3282 dev_dbg(&sep->pdev->dev, "callback start\n");
3284 error = rar_get_address(RAR_TYPE_IMAGE, &sep->rar_bus,
3285 &rar_end_address);
3287 if (error) {
3288 dev_warn(&sep->pdev->dev, "mrst cant get rar region\n");
3289 goto end_function;
3292 sep->rar_size = (size_t)(rar_end_address - sep->rar_bus + 1);
3294 if (!request_mem_region(sep->rar_bus, sep->rar_size,
3295 "sep_sec_driver")) {
3296 dev_warn(&sep->pdev->dev,
3297 "request mem region for mrst failed\n");
3298 error = -1;
3299 goto end_function;
3302 sep->rar_addr = ioremap_nocache(sep->rar_bus, sep->rar_size);
3303 if (!sep->rar_addr) {
3304 dev_warn(&sep->pdev->dev,
3305 "ioremap nocache for mrst rar failed\n");
3306 error = -ENOMEM;
3307 goto end_function;
3309 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx, size is %x\n",
3310 sep->rar_addr, (unsigned long long)sep->rar_bus,
3311 sep->rar_size);
3313 end_function:
3314 dev_dbg(&sep->pdev->dev, "callback end\n");
3315 return error;
3319 * sep_probe - probe a matching PCI device
3320 * @pdev: pci_device
3321 * @end: pci_device_id
3323 * Attempt to set up and configure a SEP device that has been
3324 * discovered by the PCI layer.
3326 static int __devinit sep_probe(struct pci_dev *pdev,
3327 const struct pci_device_id *ent)
3329 int error = 0;
3330 struct sep_device *sep;
3332 pr_debug("Sep pci probe starting\n");
3333 if (sep_dev != NULL) {
3334 dev_warn(&pdev->dev, "only one SEP supported.\n");
3335 return -EBUSY;
3338 /* enable the device */
3339 error = pci_enable_device(pdev);
3340 if (error) {
3341 dev_warn(&pdev->dev, "error enabling pci device\n");
3342 goto end_function;
3345 /* allocate the sep_device structure for this device */
3346 sep_dev = kzalloc(sizeof(struct sep_device), GFP_ATOMIC);
3347 if (sep_dev == NULL) {
3348 dev_warn(&pdev->dev,
3349 "can't kmalloc the sep_device structure\n");
3350 return -ENOMEM;
3354 * we're going to use another variable for actually
3355 * working with the device; this way, if we have
3356 * multiple devices in the future, it would be easier
3357 * to make appropriate changes
3359 sep = sep_dev;
3361 sep->pdev = pdev;
3363 if (pdev->device == MRST_PCI_DEVICE_ID)
3364 sep->mrst = 1;
3366 dev_dbg(&sep->pdev->dev, "PCI obtained, device being prepared\n");
3367 dev_dbg(&sep->pdev->dev, "revision is %d\n", sep->pdev->revision);
3369 /* set up our register area */
3370 sep->reg_physical_addr = pci_resource_start(sep->pdev, 0);
3371 if (!sep->reg_physical_addr) {
3372 dev_warn(&sep->pdev->dev, "Error getting register start\n");
3373 pci_dev_put(sep->pdev);
3374 return -ENODEV;
3377 sep->reg_physical_end = pci_resource_end(sep->pdev, 0);
3378 if (!sep->reg_physical_end) {
3379 dev_warn(&sep->pdev->dev, "Error getting register end\n");
3380 pci_dev_put(sep->pdev);
3381 return -ENODEV;
3384 sep->reg_addr = ioremap_nocache(sep->reg_physical_addr,
3385 (size_t)(sep->reg_physical_end - sep->reg_physical_addr + 1));
3386 if (!sep->reg_addr) {
3387 dev_warn(&sep->pdev->dev, "Error getting register virtual\n");
3388 pci_dev_put(sep->pdev);
3389 return -ENODEV;
3392 dev_dbg(&sep->pdev->dev,
3393 "Register area start %llx end %llx virtual %p\n",
3394 (unsigned long long)sep->reg_physical_addr,
3395 (unsigned long long)sep->reg_physical_end,
3396 sep->reg_addr);
3398 /* allocate the shared area */
3399 sep->shared_size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
3400 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES +
3401 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES +
3402 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES +
3403 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
3405 if (sep_map_and_alloc_shared_area(sep)) {
3406 error = -ENOMEM;
3407 /* allocation failed */
3408 goto end_function_error;
3411 /* the next section depends on type of unit */
3412 if (sep->mrst) {
3413 error = register_rar(RAR_TYPE_IMAGE, &sep_callback,
3414 (unsigned long)sep);
3415 if (error) {
3416 dev_dbg(&sep->pdev->dev,
3417 "error register_rar\n");
3418 goto end_function_deallocate_sep_shared_area;
3420 } else {
3421 sep->rar_size = FAKE_RAR_SIZE;
3422 sep->rar_addr = dma_alloc_coherent(NULL,
3423 sep->rar_size, &sep->rar_bus, GFP_KERNEL);
3424 if (sep->rar_addr == NULL) {
3425 dev_warn(&sep->pdev->dev, "cant allocate mfld rar\n");
3426 error = -ENOMEM;
3427 goto end_function_deallocate_sep_shared_area;
3430 dev_dbg(&sep->pdev->dev, "rar start is %p, phy is %llx,"
3431 " size is %x\n", sep->rar_addr,
3432 (unsigned long long)sep->rar_bus,
3433 sep->rar_size);
3436 dev_dbg(&sep->pdev->dev, "about to write IMR and ICR REG_ADDR\n");
3438 /* clear ICR register */
3439 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
3441 /* set the IMR register - open only GPR 2 */
3442 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
3444 dev_dbg(&sep->pdev->dev, "about to call request_irq\n");
3445 /* get the interrupt line */
3446 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED,
3447 "sep_driver", sep);
3449 if (!error)
3450 goto end_function;
3452 if (sep->rar_addr)
3453 dma_free_coherent(&sep->pdev->dev, sep->rar_size,
3454 sep->rar_addr, sep->rar_bus);
3455 goto end_function;
3457 end_function_deallocate_sep_shared_area:
3458 /* de-allocate shared area */
3459 sep_unmap_and_free_shared_area(sep);
3461 end_function_error:
3462 iounmap(sep->reg_addr);
3463 kfree(sep_dev);
3464 sep_dev = NULL;
3466 end_function:
3467 return error;
3470 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl) = {
3471 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MRST_PCI_DEVICE_ID)},
3472 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MFLD_PCI_DEVICE_ID)},
3476 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
3478 /* field for registering driver to PCI device */
3479 static struct pci_driver sep_pci_driver = {
3480 .name = "sep_sec_driver",
3481 .id_table = sep_pci_id_tbl,
3482 .probe = sep_probe
3483 /* FIXME: remove handler */
3486 /* file operation for singleton sep operations */
3487 static const struct file_operations singleton_file_operations = {
3488 .owner = THIS_MODULE,
3489 .unlocked_ioctl = sep_singleton_ioctl,
3490 .poll = sep_poll,
3491 .open = sep_singleton_open,
3492 .release = sep_singleton_release,
3493 .mmap = sep_mmap,
3496 /* file operation for daemon operations */
3497 static const struct file_operations daemon_file_operations = {
3498 .owner = THIS_MODULE,
3499 .unlocked_ioctl = sep_request_daemon_ioctl,
3500 .poll = sep_request_daemon_poll,
3501 .open = sep_request_daemon_open,
3502 .release = sep_request_daemon_release,
3503 .mmap = sep_request_daemon_mmap,
3506 /* the files operations structure of the driver */
3507 static const struct file_operations sep_file_operations = {
3508 .owner = THIS_MODULE,
3509 .unlocked_ioctl = sep_ioctl,
3510 .poll = sep_poll,
3511 .open = sep_open,
3512 .release = sep_release,
3513 .mmap = sep_mmap,
3517 * sep_reconfig_shared_area - reconfigure shared area
3518 * @sep: pointer to struct sep_device
3520 * Reconfig the shared area between HOST and SEP - needed in case
3521 * the DX_CC_Init function was called before OS loading.
3523 static int sep_reconfig_shared_area(struct sep_device *sep)
3525 int ret_val;
3527 dev_dbg(&sep->pdev->dev, "reconfig shared area start\n");
3529 /* send the new SHARED MESSAGE AREA to the SEP */
3530 dev_dbg(&sep->pdev->dev, "sending %08llx to sep\n",
3531 (unsigned long long)sep->shared_bus);
3533 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
3535 /* poll for SEP response */
3536 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3538 while (ret_val != 0xffffffff && ret_val != sep->shared_bus)
3539 ret_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
3541 /* check the return value (register) */
3542 if (ret_val != sep->shared_bus) {
3543 dev_warn(&sep->pdev->dev, "could not reconfig shared area\n");
3544 dev_warn(&sep->pdev->dev, "result was %x\n", ret_val);
3545 ret_val = -ENOMEM;
3546 } else
3547 ret_val = 0;
3549 dev_dbg(&sep->pdev->dev, "reconfig shared area end\n");
3550 return ret_val;
3554 * sep_register_driver_to_fs - register misc devices
3555 * @sep: pointer to struct sep_device
3557 * This function registers the driver to the file system
3559 static int sep_register_driver_to_fs(struct sep_device *sep)
3561 int ret_val;
3563 sep->miscdev_sep.minor = MISC_DYNAMIC_MINOR;
3564 sep->miscdev_sep.name = SEP_DEV_NAME;
3565 sep->miscdev_sep.fops = &sep_file_operations;
3567 sep->miscdev_singleton.minor = MISC_DYNAMIC_MINOR;
3568 sep->miscdev_singleton.name = SEP_DEV_SINGLETON;
3569 sep->miscdev_singleton.fops = &singleton_file_operations;
3571 sep->miscdev_daemon.minor = MISC_DYNAMIC_MINOR;
3572 sep->miscdev_daemon.name = SEP_DEV_DAEMON;
3573 sep->miscdev_daemon.fops = &daemon_file_operations;
3575 ret_val = misc_register(&sep->miscdev_sep);
3576 if (ret_val) {
3577 dev_warn(&sep->pdev->dev, "misc reg fails for sep %x\n",
3578 ret_val);
3579 return ret_val;
3582 ret_val = misc_register(&sep->miscdev_singleton);
3583 if (ret_val) {
3584 dev_warn(&sep->pdev->dev, "misc reg fails for sing %x\n",
3585 ret_val);
3586 misc_deregister(&sep->miscdev_sep);
3587 return ret_val;
3590 if (!sep->mrst) {
3591 ret_val = misc_register(&sep->miscdev_daemon);
3592 if (ret_val) {
3593 dev_warn(&sep->pdev->dev, "misc reg fails for dmn %x\n",
3594 ret_val);
3595 misc_deregister(&sep->miscdev_sep);
3596 misc_deregister(&sep->miscdev_singleton);
3598 return ret_val;
3601 return ret_val;
3605 * sep_init - init function
3607 * Module load time. Register the PCI device driver.
3609 static int __init sep_init(void)
3611 int ret_val = 0;
3612 struct sep_device *sep = NULL;
3614 pr_debug("Sep driver: Init start\n");
3616 ret_val = pci_register_driver(&sep_pci_driver);
3617 if (ret_val) {
3618 pr_debug("sep_driver:sep_driver_to_device failed, ret_val is %d\n",
3619 ret_val);
3620 goto end_function;
3623 sep = sep_dev;
3625 init_waitqueue_head(&sep->event);
3626 init_waitqueue_head(&sep->event_request_daemon);
3627 spin_lock_init(&sep->snd_rply_lck);
3628 mutex_init(&sep->sep_mutex);
3629 mutex_init(&sep->ioctl_mutex);
3631 /* new chip requires share area reconfigure */
3632 if (sep->pdev->revision == 4) { /* only for new chip */
3633 ret_val = sep_reconfig_shared_area(sep);
3634 if (ret_val)
3635 goto end_function_unregister_pci;
3638 /* register driver to fs */
3639 ret_val = sep_register_driver_to_fs(sep);
3640 if (ret_val) {
3641 dev_warn(&sep->pdev->dev, "error registering device to file\n");
3642 goto end_function_unregister_pci;
3644 goto end_function;
3646 end_function_unregister_pci:
3647 pci_unregister_driver(&sep_pci_driver);
3649 end_function:
3650 dev_dbg(&sep->pdev->dev, "Init end\n");
3651 return ret_val;
3656 * sep_exit - called to unload driver
3658 * Drop the misc devices then remove and unmap the various resources
3659 * that are not released by the driver remove method.
3661 static void __exit sep_exit(void)
3663 struct sep_device *sep;
3665 sep = sep_dev;
3666 pr_debug("Exit start\n");
3668 /* unregister from fs */
3669 misc_deregister(&sep->miscdev_sep);
3670 misc_deregister(&sep->miscdev_singleton);
3671 misc_deregister(&sep->miscdev_daemon);
3673 /* free the irq */
3674 free_irq(sep->pdev->irq, sep);
3676 /* unregister the driver */
3677 pci_unregister_driver(&sep_pci_driver);
3679 /* free shared area */
3680 if (sep_dev) {
3681 sep_unmap_and_free_shared_area(sep_dev);
3682 dev_dbg(&sep->pdev->dev,
3683 "free pages SEP SHARED AREA\n");
3684 iounmap((void *) sep_dev->reg_addr);
3685 dev_dbg(&sep->pdev->dev,
3686 "iounmap\n");
3688 pr_debug("release_mem_region\n");
3689 pr_debug("Exit end\n");
3693 module_init(sep_init);
3694 module_exit(sep_exit);
3696 MODULE_LICENSE("GPL");