CacheFiles: Better showing of debugging information in active object problems
[linux-2.6/x86.git] / drivers / staging / sep / sep_driver.c
blobf890a16096c0c628f04b8cdfa098626b0d8473c3
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * CONTACTS:
24 * Mark Allyn mark.a.allyn@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/sched.h>
42 #include <linux/pci.h>
43 #include <linux/firmware.h>
44 #include <asm/ioctl.h>
45 #include <linux/ioport.h>
46 #include <asm/io.h>
47 #include <linux/interrupt.h>
48 #include <linux/pagemap.h>
49 #include <asm/cacheflush.h>
50 #include "sep_driver_hw_defs.h"
51 #include "sep_driver_config.h"
52 #include "sep_driver_api.h"
53 #include "sep_dev.h"
55 #if SEP_DRIVER_ARM_DEBUG_MODE
57 #define CRYS_SEP_ROM_length 0x4000
58 #define CRYS_SEP_ROM_start_address 0x8000C000UL
59 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
60 #define SEP_ROM_BANK_register 0x80008420UL
61 #define SEP_ROM_BANK_register_offset 0x8420UL
62 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
65 * THESE 2 definitions are specific to the board - must be
66 * defined during integration
68 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70 /* 2M size */
72 static void sep_load_rom_code(struct sep_device *sep)
74 /* Index variables */
75 unsigned long i, k, j;
76 u32 reg;
77 u32 error;
78 u32 warning;
80 /* Loading ROM from SEP_ROM_image.h file */
81 k = sizeof(CRYS_SEP_ROM);
83 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85 edbg("SEP Driver: k is %lu\n", k);
86 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
87 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89 for (i = 0; i < 4; i++) {
90 /* write bank */
91 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
94 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96 k = k - 4;
98 if (k == 0) {
99 j = CRYS_SEP_ROM_length;
100 i = 4;
105 /* reset the SEP */
106 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108 /* poll for SEP ROM boot finish */
110 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
111 while (!reg);
113 edbg("SEP Driver: ROM polling ended\n");
115 switch (reg) {
116 case 0x1:
117 /* fatal error - read erro status from GPRO */
118 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
119 edbg("SEP Driver: ROM polling case 1\n");
120 break;
121 case 0x4:
122 /* Cold boot ended successfully */
123 case 0x8:
124 /* Warmboot ended successfully */
125 case 0x10:
126 /* ColdWarm boot ended successfully */
127 error = 0;
128 case 0x2:
129 /* Boot First Phase ended */
130 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
131 case 0x20:
132 edbg("SEP Driver: ROM polling case %d\n", reg);
133 break;
138 #else
139 static void sep_load_rom_code(struct sep_device *sep) { }
140 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
144 /*----------------------------------------
145 DEFINES
146 -----------------------------------------*/
148 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
149 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151 /*--------------------------------------------
152 GLOBAL variables
153 --------------------------------------------*/
155 /* debug messages level */
156 static int debug;
157 module_param(debug, int , 0);
158 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160 /* Keep this a single static object for now to keep the conversion easy */
162 static struct sep_device sep_instance;
163 static struct sep_device *sep_dev = &sep_instance;
166 mutex for the access to the internals of the sep driver
168 static DEFINE_MUTEX(sep_mutex);
171 /* wait queue head (event) of the driver */
172 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
175 * sep_load_firmware - copy firmware cache/resident
176 * @sep: device we are loading
178 * This functions copies the cache and resident from their source
179 * location into destination shared memory.
182 static int sep_load_firmware(struct sep_device *sep)
184 const struct firmware *fw;
185 char *cache_name = "cache.image.bin";
186 char *res_name = "resident.image.bin";
187 int error;
189 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
190 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192 /* load cache */
193 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
194 if (error) {
195 edbg("SEP Driver:cant request cache fw\n");
196 return error;
198 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
201 sep->cache_size = fw->size;
202 release_firmware(fw);
204 sep->resident_bus = sep->rar_bus + sep->cache_size;
205 sep->resident_addr = sep->rar_addr + sep->cache_size;
207 /* load resident */
208 error = request_firmware(&fw, res_name, &sep->pdev->dev);
209 if (error) {
210 edbg("SEP Driver:cant request res fw\n");
211 return error;
213 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
216 sep->resident_size = fw->size;
217 release_firmware(fw);
219 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
220 sep->resident_addr, (unsigned long long)sep->resident_bus,
221 sep->rar_addr, (unsigned long long)sep->rar_bus);
222 return 0;
226 * sep_map_and_alloc_shared_area - allocate shared block
227 * @sep: security processor
228 * @size: size of shared area
230 * Allocate a shared buffer in host memory that can be used by both the
231 * kernel and also the hardware interface via DMA.
234 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
235 unsigned long size)
237 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
238 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
239 &sep->shared_bus, GFP_KERNEL);
241 if (!sep->shared_addr) {
242 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
243 return -ENOMEM;
245 /* set the bus address of the shared area */
246 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
247 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
248 return 0;
252 * sep_unmap_and_free_shared_area - free shared block
253 * @sep: security processor
255 * Free the shared area allocated to the security processor. The
256 * processor must have finished with this and any final posted
257 * writes cleared before we do so.
259 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
261 dma_free_coherent(&sep->pdev->dev, size,
262 sep->shared_addr, sep->shared_bus);
266 * sep_shared_virt_to_bus - convert bus/virt addresses
268 * Returns the bus address inside the shared area according
269 * to the virtual address.
272 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
273 void *virt_address)
275 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
276 edbg("sep: virt to bus b %08llx v %p\n", pa, virt_address);
277 return pa;
281 * sep_shared_bus_to_virt - convert bus/virt addresses
283 * Returns virtual address inside the shared area according
284 * to the bus address.
287 static void *sep_shared_bus_to_virt(struct sep_device *sep,
288 dma_addr_t bus_address)
290 return sep->shared_addr + (bus_address - sep->shared_bus);
295 * sep_try_open - attempt to open a SEP device
296 * @sep: device to attempt to open
298 * Atomically attempt to get ownership of a SEP device.
299 * Returns 1 if the device was opened, 0 on failure.
302 static int sep_try_open(struct sep_device *sep)
304 if (!test_and_set_bit(0, &sep->in_use))
305 return 1;
306 return 0;
310 * sep_open - device open method
311 * @inode: inode of sep device
312 * @filp: file handle to sep device
314 * Open method for the SEP device. Called when userspace opens
315 * the SEP device node. Must also release the memory data pool
316 * allocations.
318 * Returns zero on success otherwise an error code.
321 static int sep_open(struct inode *inode, struct file *filp)
323 if (sep_dev == NULL)
324 return -ENODEV;
326 /* check the blocking mode */
327 if (filp->f_flags & O_NDELAY) {
328 if (sep_try_open(sep_dev) == 0)
329 return -EAGAIN;
330 } else
331 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
332 return -EINTR;
334 /* Bind to the device, we only have one which makes it easy */
335 filp->private_data = sep_dev;
336 /* release data pool allocations */
337 sep_dev->data_pool_bytes_allocated = 0;
338 return 0;
343 * sep_release - close a SEP device
344 * @inode: inode of SEP device
345 * @filp: file handle being closed
347 * Called on the final close of a SEP device. As the open protects against
348 * multiple simultaenous opens that means this method is called when the
349 * final reference to the open handle is dropped.
352 static int sep_release(struct inode *inode, struct file *filp)
354 struct sep_device *sep = filp->private_data;
355 #if 0 /*!SEP_DRIVER_POLLING_MODE */
356 /* close IMR */
357 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
358 /* release IRQ line */
359 free_irq(SEP_DIRVER_IRQ_NUM, sep);
361 #endif
362 /* Ensure any blocked open progresses */
363 clear_bit(0, &sep->in_use);
364 wake_up(&sep_event);
365 return 0;
368 /*---------------------------------------------------------------
369 map function - this functions maps the message shared area
370 -----------------------------------------------------------------*/
371 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
373 dma_addr_t bus_addr;
374 struct sep_device *sep = filp->private_data;
376 dbg("-------->SEP Driver: mmap start\n");
378 /* check that the size of the mapped range is as the size of the message
379 shared area */
380 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
381 edbg("SEP Driver mmap requested size is more than allowed\n");
382 printk(KERN_WARNING "SEP Driver mmap requested size is more \
383 than allowed\n");
384 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
385 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
386 return -EAGAIN;
389 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
391 /* get bus address */
392 bus_addr = sep->shared_bus;
394 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
396 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
397 edbg("SEP Driver remap_page_range failed\n");
398 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
399 return -EAGAIN;
402 dbg("SEP Driver:<-------- mmap end\n");
404 return 0;
408 /*-----------------------------------------------
409 poll function
410 *----------------------------------------------*/
411 static unsigned int sep_poll(struct file *filp, poll_table * wait)
413 unsigned long count;
414 unsigned int mask = 0;
415 unsigned long retval = 0; /* flow id */
416 struct sep_device *sep = filp->private_data;
418 dbg("---------->SEP Driver poll: start\n");
421 #if SEP_DRIVER_POLLING_MODE
423 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
424 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
426 for (count = 0; count < 10 * 4; count += 4)
427 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
430 sep->reply_ct++;
431 #else
432 /* add the event to the polling wait table */
433 poll_wait(filp, &sep_event, wait);
435 #endif
437 edbg("sep->send_ct is %lu\n", sep->send_ct);
438 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
440 /* check if the data is ready */
441 if (sep->send_ct == sep->reply_ct) {
442 for (count = 0; count < 12 * 4; count += 4)
443 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
445 for (count = 0; count < 10 * 4; count += 4)
446 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
448 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
449 edbg("retval is %lu\n", retval);
450 /* check if the this is sep reply or request */
451 if (retval >> 31) {
452 edbg("SEP Driver: sep request in\n");
453 /* request */
454 mask |= POLLOUT | POLLWRNORM;
455 } else {
456 edbg("SEP Driver: sep reply in\n");
457 mask |= POLLIN | POLLRDNORM;
460 dbg("SEP Driver:<-------- poll exit\n");
461 return mask;
465 * sep_time_address - address in SEP memory of time
466 * @sep: SEP device we want the address from
468 * Return the address of the two dwords in memory used for time
469 * setting.
472 static u32 *sep_time_address(struct sep_device *sep)
474 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
478 * sep_set_time - set the SEP time
479 * @sep: the SEP we are setting the time for
481 * Calculates time and sets it at the predefined address.
482 * Called with the sep mutex held.
484 static unsigned long sep_set_time(struct sep_device *sep)
486 struct timeval time;
487 u32 *time_addr; /* address of time as seen by the kernel */
490 dbg("sep:sep_set_time start\n");
492 do_gettimeofday(&time);
494 /* set value in the SYSTEM MEMORY offset */
495 time_addr = sep_time_address(sep);
497 time_addr[0] = SEP_TIME_VAL_TOKEN;
498 time_addr[1] = time.tv_sec;
500 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
501 edbg("SEP Driver:time_addr is %p\n", time_addr);
502 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
504 return time.tv_sec;
508 * sep_dump_message - dump the message that is pending
509 * @sep: sep device
511 * Dump out the message pending in the shared message area
514 static void sep_dump_message(struct sep_device *sep)
516 int count;
517 for (count = 0; count < 12 * 4; count += 4)
518 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
522 * sep_send_command_handler - kick off a command
523 * @sep: sep being signalled
525 * This function raises interrupt to SEP that signals that is has a new
526 * command from the host
529 static void sep_send_command_handler(struct sep_device *sep)
531 dbg("sep:sep_send_command_handler start\n");
533 mutex_lock(&sep_mutex);
534 sep_set_time(sep);
536 /* FIXME: flush cache */
537 flush_cache_all();
539 sep_dump_message(sep);
540 /* update counter */
541 sep->send_ct++;
542 /* send interrupt to SEP */
543 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
544 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
545 mutex_unlock(&sep_mutex);
546 return;
550 * sep_send_reply_command_handler - kick off a command reply
551 * @sep: sep being signalled
553 * This function raises interrupt to SEP that signals that is has a new
554 * command from the host
557 static void sep_send_reply_command_handler(struct sep_device *sep)
559 dbg("sep:sep_send_reply_command_handler start\n");
561 /* flash cache */
562 flush_cache_all();
564 sep_dump_message(sep);
566 mutex_lock(&sep_mutex);
567 sep->send_ct++; /* update counter */
568 /* send the interrupt to SEP */
569 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
570 /* update both counters */
571 sep->send_ct++;
572 sep->reply_ct++;
573 mutex_unlock(&sep_mutex);
574 dbg("sep: sep_send_reply_command_handler end\n");
578 This function handles the allocate data pool memory request
579 This function returns calculates the bus address of the
580 allocated memory, and the offset of this area from the mapped address.
581 Therefore, the FVOs in user space can calculate the exact virtual
582 address of this allocated memory
584 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
585 unsigned long arg)
587 int error;
588 struct sep_driver_alloc_t command_args;
590 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
592 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
593 if (error)
594 goto end_function;
596 /* allocate memory */
597 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
598 error = -ENOMEM;
599 goto end_function;
602 /* set the virtual and bus address */
603 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
604 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
606 /* write the memory back to the user space */
607 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
608 if (error)
609 goto end_function;
611 /* set the allocation */
612 sep->data_pool_bytes_allocated += command_args.num_bytes;
614 end_function:
615 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
616 return error;
620 This function handles write into allocated data pool command
622 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
624 int error;
625 void *virt_address;
626 unsigned long va;
627 unsigned long app_in_address;
628 unsigned long num_bytes;
629 void *data_pool_area_addr;
631 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
633 /* get the application address */
634 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
635 if (error)
636 goto end_function;
638 /* get the virtual kernel address address */
639 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
640 if (error)
641 goto end_function;
642 virt_address = (void *)va;
644 /* get the number of bytes */
645 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
646 if (error)
647 goto end_function;
649 /* calculate the start of the data pool */
650 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
653 /* check that the range of the virtual kernel address is correct */
654 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
655 error = -EINVAL;
656 goto end_function;
658 /* copy the application data */
659 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
660 end_function:
661 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
662 return error;
666 this function handles the read from data pool command
668 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
670 int error;
671 /* virtual address of dest application buffer */
672 unsigned long app_out_address;
673 /* virtual address of the data pool */
674 unsigned long va;
675 void *virt_address;
676 unsigned long num_bytes;
677 void *data_pool_area_addr;
679 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
681 /* get the application address */
682 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
683 if (error)
684 goto end_function;
686 /* get the virtual kernel address address */
687 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
688 if (error)
689 goto end_function;
690 virt_address = (void *)va;
692 /* get the number of bytes */
693 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
694 if (error)
695 goto end_function;
697 /* calculate the start of the data pool */
698 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
700 /* FIXME: These are incomplete all over the driver: what about + len
701 and when doing that also overflows */
702 /* check that the range of the virtual kernel address is correct */
703 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
704 error = -EINVAL;
705 goto end_function;
708 /* copy the application data */
709 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
710 end_function:
711 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
712 return error;
716 This function releases all the application virtual buffer physical pages,
717 that were previously locked
719 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
721 unsigned long count;
723 if (dirtyFlag) {
724 for (count = 0; count < num_pages; count++) {
725 /* the out array was written, therefore the data was changed */
726 if (!PageReserved(page_array_ptr[count]))
727 SetPageDirty(page_array_ptr[count]);
728 page_cache_release(page_array_ptr[count]);
730 } else {
731 /* free in pages - the data was only read, therefore no update was done
732 on those pages */
733 for (count = 0; count < num_pages; count++)
734 page_cache_release(page_array_ptr[count]);
737 if (page_array_ptr)
738 /* free the array */
739 kfree(page_array_ptr);
741 return 0;
745 This function locks all the physical pages of the kernel virtual buffer
746 and construct a basic lli array, where each entry holds the physical
747 page address and the size that application data holds in this physical pages
749 static int sep_lock_kernel_pages(struct sep_device *sep,
750 unsigned long kernel_virt_addr,
751 unsigned long data_size,
752 unsigned long *num_pages_ptr,
753 struct sep_lli_entry_t **lli_array_ptr,
754 struct page ***page_array_ptr)
756 int error = 0;
757 /* the the page of the end address of the user space buffer */
758 unsigned long end_page;
759 /* the page of the start address of the user space buffer */
760 unsigned long start_page;
761 /* the range in pages */
762 unsigned long num_pages;
763 struct sep_lli_entry_t *lli_array;
764 /* next kernel address to map */
765 unsigned long next_kernel_address;
766 unsigned long count;
768 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
770 /* set start and end pages and num pages */
771 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
772 start_page = kernel_virt_addr >> PAGE_SHIFT;
773 num_pages = end_page - start_page + 1;
775 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
776 edbg("SEP Driver: data_size is %lu\n", data_size);
777 edbg("SEP Driver: start_page is %lx\n", start_page);
778 edbg("SEP Driver: end_page is %lx\n", end_page);
779 edbg("SEP Driver: num_pages is %lu\n", num_pages);
781 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
782 if (!lli_array) {
783 edbg("SEP Driver: kmalloc for lli_array failed\n");
784 error = -ENOMEM;
785 goto end_function;
788 /* set the start address of the first page - app data may start not at
789 the beginning of the page */
790 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
792 /* check that not all the data is in the first page only */
793 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
794 lli_array[0].block_size = data_size;
795 else
796 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
798 /* debug print */
799 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
801 /* advance the address to the start of the next page */
802 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
804 /* go from the second page to the prev before last */
805 for (count = 1; count < (num_pages - 1); count++) {
806 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
807 lli_array[count].block_size = PAGE_SIZE;
809 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
810 next_kernel_address += PAGE_SIZE;
813 /* if more then 1 pages locked - then update for the last page size needed */
814 if (num_pages > 1) {
815 /* update the address of the last page */
816 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
818 /* set the size of the last page */
819 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
821 if (lli_array[count].block_size == 0) {
822 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
823 dbg("data_size is %lu\n", data_size);
824 while (1);
827 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
829 /* set output params */
830 *lli_array_ptr = lli_array;
831 *num_pages_ptr = num_pages;
832 *page_array_ptr = 0;
833 end_function:
834 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
835 return 0;
839 This function locks all the physical pages of the application virtual buffer
840 and construct a basic lli array, where each entry holds the physical page
841 address and the size that application data holds in this physical pages
843 static int sep_lock_user_pages(struct sep_device *sep,
844 unsigned long app_virt_addr,
845 unsigned long data_size,
846 unsigned long *num_pages_ptr,
847 struct sep_lli_entry_t **lli_array_ptr,
848 struct page ***page_array_ptr)
850 int error = 0;
851 /* the the page of the end address of the user space buffer */
852 unsigned long end_page;
853 /* the page of the start address of the user space buffer */
854 unsigned long start_page;
855 /* the range in pages */
856 unsigned long num_pages;
857 struct page **page_array;
858 struct sep_lli_entry_t *lli_array;
859 unsigned long count;
860 int result;
862 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
864 /* set start and end pages and num pages */
865 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
866 start_page = app_virt_addr >> PAGE_SHIFT;
867 num_pages = end_page - start_page + 1;
869 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
870 edbg("SEP Driver: data_size is %lu\n", data_size);
871 edbg("SEP Driver: start_page is %lu\n", start_page);
872 edbg("SEP Driver: end_page is %lu\n", end_page);
873 edbg("SEP Driver: num_pages is %lu\n", num_pages);
875 /* allocate array of pages structure pointers */
876 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
877 if (!page_array) {
878 edbg("SEP Driver: kmalloc for page_array failed\n");
880 error = -ENOMEM;
881 goto end_function;
884 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
885 if (!lli_array) {
886 edbg("SEP Driver: kmalloc for lli_array failed\n");
888 error = -ENOMEM;
889 goto end_function_with_error1;
892 /* convert the application virtual address into a set of physical */
893 down_read(&current->mm->mmap_sem);
894 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
895 up_read(&current->mm->mmap_sem);
897 /* check the number of pages locked - if not all then exit with error */
898 if (result != num_pages) {
899 dbg("SEP Driver: not all pages locked by get_user_pages\n");
901 error = -ENOMEM;
902 goto end_function_with_error2;
905 /* flush the cache */
906 for (count = 0; count < num_pages; count++)
907 flush_dcache_page(page_array[count]);
909 /* set the start address of the first page - app data may start not at
910 the beginning of the page */
911 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
913 /* check that not all the data is in the first page only */
914 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
915 lli_array[0].block_size = data_size;
916 else
917 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
919 /* debug print */
920 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
922 /* go from the second page to the prev before last */
923 for (count = 1; count < (num_pages - 1); count++) {
924 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
925 lli_array[count].block_size = PAGE_SIZE;
927 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
930 /* if more then 1 pages locked - then update for the last page size needed */
931 if (num_pages > 1) {
932 /* update the address of the last page */
933 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
935 /* set the size of the last page */
936 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
938 if (lli_array[count].block_size == 0) {
939 dbg("app_virt_addr is %08lx\n", app_virt_addr);
940 dbg("data_size is %lu\n", data_size);
941 while (1);
943 edbg("lli_array[%lu].physical_address is %08lx, \
944 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
947 /* set output params */
948 *lli_array_ptr = lli_array;
949 *num_pages_ptr = num_pages;
950 *page_array_ptr = page_array;
951 goto end_function;
953 end_function_with_error2:
954 /* release the cache */
955 for (count = 0; count < num_pages; count++)
956 page_cache_release(page_array[count]);
957 kfree(lli_array);
958 end_function_with_error1:
959 kfree(page_array);
960 end_function:
961 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
962 return 0;
967 this function calculates the size of data that can be inserted into the lli
968 table from this array the condition is that either the table is full
969 (all etnries are entered), or there are no more entries in the lli array
971 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
973 unsigned long table_data_size = 0;
974 unsigned long counter;
976 /* calculate the data in the out lli table if till we fill the whole
977 table or till the data has ended */
978 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
979 table_data_size += lli_in_array_ptr[counter].block_size;
980 return table_data_size;
984 this functions builds ont lli table from the lli_array according to
985 the given size of data
987 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
989 unsigned long curr_table_data_size;
990 /* counter of lli array entry */
991 unsigned long array_counter;
993 dbg("SEP Driver:--------> sep_build_lli_table start\n");
995 /* init currrent table data size and lli array entry counter */
996 curr_table_data_size = 0;
997 array_counter = 0;
998 *num_table_entries_ptr = 1;
1000 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1002 /* fill the table till table size reaches the needed amount */
1003 while (curr_table_data_size < table_data_size) {
1004 /* update the number of entries in table */
1005 (*num_table_entries_ptr)++;
1007 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1008 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1009 curr_table_data_size += lli_table_ptr->block_size;
1011 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1012 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1013 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1015 /* check for overflow of the table data */
1016 if (curr_table_data_size > table_data_size) {
1017 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1019 /* update the size of block in the table */
1020 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1022 /* update the physical address in the lli array */
1023 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1025 /* update the block size left in the lli array */
1026 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1027 } else
1028 /* advance to the next entry in the lli_array */
1029 array_counter++;
1031 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1032 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1034 /* move to the next entry in table */
1035 lli_table_ptr++;
1038 /* set the info entry to default */
1039 lli_table_ptr->physical_address = 0xffffffff;
1040 lli_table_ptr->block_size = 0;
1042 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1043 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1044 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1046 /* set the output parameter */
1047 *num_processed_entries_ptr += array_counter;
1049 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1050 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1051 return;
1055 this function goes over the list of the print created tables and
1056 prints all the data
1058 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1060 unsigned long table_count;
1061 unsigned long entries_count;
1063 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1065 table_count = 1;
1066 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1067 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1068 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1070 /* print entries of the table (without info entry) */
1071 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1072 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1073 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1076 /* point to the info entry */
1077 lli_table_ptr--;
1079 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1080 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1083 table_data_size = lli_table_ptr->block_size & 0xffffff;
1084 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1085 lli_table_ptr = (struct sep_lli_entry_t *)
1086 (lli_table_ptr->physical_address);
1088 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1090 if ((unsigned long) lli_table_ptr != 0xffffffff)
1091 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1093 table_count++;
1095 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1100 This function prepares only input DMA table for synhronic symmetric
1101 operations (HASH)
1103 static int sep_prepare_input_dma_table(struct sep_device *sep,
1104 unsigned long app_virt_addr,
1105 unsigned long data_size,
1106 unsigned long block_size,
1107 unsigned long *lli_table_ptr,
1108 unsigned long *num_entries_ptr,
1109 unsigned long *table_data_size_ptr,
1110 bool isKernelVirtualAddress)
1112 /* pointer to the info entry of the table - the last entry */
1113 struct sep_lli_entry_t *info_entry_ptr;
1114 /* array of pointers ot page */
1115 struct sep_lli_entry_t *lli_array_ptr;
1116 /* points to the first entry to be processed in the lli_in_array */
1117 unsigned long current_entry;
1118 /* num entries in the virtual buffer */
1119 unsigned long sep_lli_entries;
1120 /* lli table pointer */
1121 struct sep_lli_entry_t *in_lli_table_ptr;
1122 /* the total data in one table */
1123 unsigned long table_data_size;
1124 /* number of entries in lli table */
1125 unsigned long num_entries_in_table;
1126 /* next table address */
1127 void *lli_table_alloc_addr;
1128 unsigned long result;
1130 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1132 edbg("SEP Driver:data_size is %lu\n", data_size);
1133 edbg("SEP Driver:block_size is %lu\n", block_size);
1135 /* initialize the pages pointers */
1136 sep->in_page_array = 0;
1137 sep->in_num_pages = 0;
1139 if (data_size == 0) {
1140 /* special case - created 2 entries table with zero data */
1141 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1142 /* FIXME: Should the entry below not be for _bus */
1143 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1144 in_lli_table_ptr->block_size = 0;
1146 in_lli_table_ptr++;
1147 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1148 in_lli_table_ptr->block_size = 0;
1150 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1151 *num_entries_ptr = 2;
1152 *table_data_size_ptr = 0;
1154 goto end_function;
1157 /* check if the pages are in Kernel Virtual Address layout */
1158 if (isKernelVirtualAddress == true)
1159 /* lock the pages of the kernel buffer and translate them to pages */
1160 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1161 else
1162 /* lock the pages of the user buffer and translate them to pages */
1163 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1165 if (result)
1166 return result;
1168 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1170 current_entry = 0;
1171 info_entry_ptr = 0;
1172 sep_lli_entries = sep->in_num_pages;
1174 /* initiate to point after the message area */
1175 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1177 /* loop till all the entries in in array are not processed */
1178 while (current_entry < sep_lli_entries) {
1179 /* set the new input and output tables */
1180 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1182 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1184 /* calculate the maximum size of data for input table */
1185 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1187 /* now calculate the table size so that it will be module block size */
1188 table_data_size = (table_data_size / block_size) * block_size;
1190 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1192 /* construct input lli table */
1193 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1195 if (info_entry_ptr == 0) {
1196 /* set the output parameters to physical addresses */
1197 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1198 *num_entries_ptr = num_entries_in_table;
1199 *table_data_size_ptr = table_data_size;
1201 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1202 } else {
1203 /* update the info entry of the previous in table */
1204 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1205 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1208 /* save the pointer to the info entry of the current tables */
1209 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1212 /* print input tables */
1213 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1214 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1216 /* the array of the pages */
1217 kfree(lli_array_ptr);
1218 end_function:
1219 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1220 return 0;
1225 This function creates the input and output dma tables for
1226 symmetric operations (AES/DES) according to the block size from LLI arays
1228 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1229 struct sep_lli_entry_t *lli_in_array,
1230 unsigned long sep_in_lli_entries,
1231 struct sep_lli_entry_t *lli_out_array,
1232 unsigned long sep_out_lli_entries,
1233 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1235 /* points to the area where next lli table can be allocated: keep void *
1236 as there is pointer scaling to fix otherwise */
1237 void *lli_table_alloc_addr;
1238 /* input lli table */
1239 struct sep_lli_entry_t *in_lli_table_ptr;
1240 /* output lli table */
1241 struct sep_lli_entry_t *out_lli_table_ptr;
1242 /* pointer to the info entry of the table - the last entry */
1243 struct sep_lli_entry_t *info_in_entry_ptr;
1244 /* pointer to the info entry of the table - the last entry */
1245 struct sep_lli_entry_t *info_out_entry_ptr;
1246 /* points to the first entry to be processed in the lli_in_array */
1247 unsigned long current_in_entry;
1248 /* points to the first entry to be processed in the lli_out_array */
1249 unsigned long current_out_entry;
1250 /* max size of the input table */
1251 unsigned long in_table_data_size;
1252 /* max size of the output table */
1253 unsigned long out_table_data_size;
1254 /* flag te signifies if this is the first tables build from the arrays */
1255 unsigned long first_table_flag;
1256 /* the data size that should be in table */
1257 unsigned long table_data_size;
1258 /* number of etnries in the input table */
1259 unsigned long num_entries_in_table;
1260 /* number of etnries in the output table */
1261 unsigned long num_entries_out_table;
1263 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1265 /* initiate to pint after the message area */
1266 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1268 current_in_entry = 0;
1269 current_out_entry = 0;
1270 first_table_flag = 1;
1271 info_in_entry_ptr = 0;
1272 info_out_entry_ptr = 0;
1274 /* loop till all the entries in in array are not processed */
1275 while (current_in_entry < sep_in_lli_entries) {
1276 /* set the new input and output tables */
1277 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1279 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1281 /* set the first output tables */
1282 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1284 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1286 /* calculate the maximum size of data for input table */
1287 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1289 /* calculate the maximum size of data for output table */
1290 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1292 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1293 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1295 /* check where the data is smallest */
1296 table_data_size = in_table_data_size;
1297 if (table_data_size > out_table_data_size)
1298 table_data_size = out_table_data_size;
1300 /* now calculate the table size so that it will be module block size */
1301 table_data_size = (table_data_size / block_size) * block_size;
1303 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1305 /* construct input lli table */
1306 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1308 /* construct output lli table */
1309 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1311 /* if info entry is null - this is the first table built */
1312 if (info_in_entry_ptr == 0) {
1313 /* set the output parameters to physical addresses */
1314 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1315 *in_num_entries_ptr = num_entries_in_table;
1316 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1317 *out_num_entries_ptr = num_entries_out_table;
1318 *table_data_size_ptr = table_data_size;
1320 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1321 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1322 } else {
1323 /* update the info entry of the previous in table */
1324 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1325 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1327 /* update the info entry of the previous in table */
1328 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1329 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1332 /* save the pointer to the info entry of the current tables */
1333 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1334 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1336 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1337 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1338 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1341 /* print input tables */
1342 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1343 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1344 /* print output tables */
1345 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1346 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1347 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1348 return 0;
1353 This function builds input and output DMA tables for synhronic
1354 symmetric operations (AES, DES). It also checks that each table
1355 is of the modular block size
1357 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1358 unsigned long app_virt_in_addr,
1359 unsigned long app_virt_out_addr,
1360 unsigned long data_size,
1361 unsigned long block_size,
1362 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1364 /* array of pointers of page */
1365 struct sep_lli_entry_t *lli_in_array;
1366 /* array of pointers of page */
1367 struct sep_lli_entry_t *lli_out_array;
1368 int result = 0;
1370 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1372 /* initialize the pages pointers */
1373 sep->in_page_array = 0;
1374 sep->out_page_array = 0;
1376 /* check if the pages are in Kernel Virtual Address layout */
1377 if (isKernelVirtualAddress == true) {
1378 /* lock the pages of the kernel buffer and translate them to pages */
1379 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1380 if (result) {
1381 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1382 goto end_function;
1384 } else {
1385 /* lock the pages of the user buffer and translate them to pages */
1386 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1387 if (result) {
1388 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1389 goto end_function;
1393 if (isKernelVirtualAddress == true) {
1394 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1395 if (result) {
1396 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1397 goto end_function_with_error1;
1399 } else {
1400 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1401 if (result) {
1402 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1403 goto end_function_with_error1;
1406 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1407 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1408 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1411 /* call the fucntion that creates table from the lli arrays */
1412 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1413 if (result) {
1414 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1415 goto end_function_with_error2;
1418 /* fall through - free the lli entry arrays */
1419 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1420 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1421 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1422 end_function_with_error2:
1423 kfree(lli_out_array);
1424 end_function_with_error1:
1425 kfree(lli_in_array);
1426 end_function:
1427 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1428 return result;
1433 this function handles tha request for creation of the DMA table
1434 for the synchronic symmetric operations (AES,DES)
1436 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1437 unsigned long arg)
1439 int error;
1440 /* command arguments */
1441 struct sep_driver_build_sync_table_t command_args;
1443 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1445 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1446 if (error)
1447 goto end_function;
1449 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1450 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1451 edbg("data_size is %lu\n", command_args.data_in_size);
1452 edbg("block_size is %lu\n", command_args.block_size);
1454 /* check if we need to build only input table or input/output */
1455 if (command_args.app_out_address)
1456 /* prepare input and output tables */
1457 error = sep_prepare_input_output_dma_table(sep,
1458 command_args.app_in_address,
1459 command_args.app_out_address,
1460 command_args.data_in_size,
1461 command_args.block_size,
1462 &command_args.in_table_address,
1463 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1464 else
1465 /* prepare input tables */
1466 error = sep_prepare_input_dma_table(sep,
1467 command_args.app_in_address,
1468 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1470 if (error)
1471 goto end_function;
1472 /* copy to user */
1473 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1474 error = -EFAULT;
1475 end_function:
1476 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1477 return error;
1481 this function handles the request for freeing dma table for synhronic actions
1483 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1485 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1487 /* free input pages array */
1488 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1490 /* free output pages array if needed */
1491 if (sep->out_page_array)
1492 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1494 /* reset all the values */
1495 sep->in_page_array = 0;
1496 sep->out_page_array = 0;
1497 sep->in_num_pages = 0;
1498 sep->out_num_pages = 0;
1499 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1500 return 0;
1504 this function find a space for the new flow dma table
1506 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1507 unsigned long **table_address_ptr)
1509 int error = 0;
1510 /* pointer to the id field of the flow dma table */
1511 unsigned long *start_table_ptr;
1512 /* Do not make start_addr unsigned long * unless fixing the offset
1513 computations ! */
1514 void *flow_dma_area_start_addr;
1515 unsigned long *flow_dma_area_end_addr;
1516 /* maximum table size in words */
1517 unsigned long table_size_in_words;
1519 /* find the start address of the flow DMA table area */
1520 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1522 /* set end address of the flow table area */
1523 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1525 /* set table size in words */
1526 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1528 /* set the pointer to the start address of DMA area */
1529 start_table_ptr = flow_dma_area_start_addr;
1531 /* find the space for the next table */
1532 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1533 start_table_ptr += table_size_in_words;
1535 /* check if we reached the end of floa tables area */
1536 if (start_table_ptr >= flow_dma_area_end_addr)
1537 error = -1;
1538 else
1539 *table_address_ptr = start_table_ptr;
1541 return error;
1545 This function creates one DMA table for flow and returns its data,
1546 and pointer to its info entry
1548 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1549 unsigned long virt_buff_addr,
1550 unsigned long virt_buff_size,
1551 struct sep_lli_entry_t *table_data,
1552 struct sep_lli_entry_t **info_entry_ptr,
1553 struct sep_flow_context_t *flow_data_ptr,
1554 bool isKernelVirtualAddress)
1556 int error;
1557 /* the range in pages */
1558 unsigned long lli_array_size;
1559 struct sep_lli_entry_t *lli_array;
1560 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1561 unsigned long *start_dma_table_ptr;
1562 /* total table data counter */
1563 unsigned long dma_table_data_count;
1564 /* pointer that will keep the pointer to the pages of the virtual buffer */
1565 struct page **page_array_ptr;
1566 unsigned long entry_count;
1568 /* find the space for the new table */
1569 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1570 if (error)
1571 goto end_function;
1573 /* check if the pages are in Kernel Virtual Address layout */
1574 if (isKernelVirtualAddress == true)
1575 /* lock kernel buffer in the memory */
1576 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1577 else
1578 /* lock user buffer in the memory */
1579 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1581 if (error)
1582 goto end_function;
1584 /* set the pointer to page array at the beginning of table - this table is
1585 now considered taken */
1586 *start_dma_table_ptr = lli_array_size;
1588 /* point to the place of the pages pointers of the table */
1589 start_dma_table_ptr++;
1591 /* set the pages pointer */
1592 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1594 /* set the pointer to the first entry */
1595 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1597 /* now create the entries for table */
1598 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1599 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1601 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1603 /* set the total data of a table */
1604 dma_table_data_count += lli_array[entry_count].block_size;
1606 flow_dma_table_entry_ptr++;
1609 /* set the physical address */
1610 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1612 /* set the num_entries and total data size */
1613 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1615 /* set the info entry */
1616 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1617 flow_dma_table_entry_ptr->block_size = 0;
1619 /* set the pointer to info entry */
1620 *info_entry_ptr = flow_dma_table_entry_ptr;
1622 /* the array of the lli entries */
1623 kfree(lli_array);
1624 end_function:
1625 return error;
1631 This function creates a list of tables for flow and returns the data for
1632 the first and last tables of the list
1634 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1635 unsigned long num_virtual_buffers,
1636 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1638 int error;
1639 unsigned long virt_buff_addr;
1640 unsigned long virt_buff_size;
1641 struct sep_lli_entry_t table_data;
1642 struct sep_lli_entry_t *info_entry_ptr;
1643 struct sep_lli_entry_t *prev_info_entry_ptr;
1644 unsigned long i;
1646 /* init vars */
1647 error = 0;
1648 prev_info_entry_ptr = 0;
1650 /* init the first table to default */
1651 table_data.physical_address = 0xffffffff;
1652 first_table_data_ptr->physical_address = 0xffffffff;
1653 table_data.block_size = 0;
1655 for (i = 0; i < num_virtual_buffers; i++) {
1656 /* get the virtual buffer address */
1657 error = get_user(virt_buff_addr, &first_buff_addr);
1658 if (error)
1659 goto end_function;
1661 /* get the virtual buffer size */
1662 first_buff_addr++;
1663 error = get_user(virt_buff_size, &first_buff_addr);
1664 if (error)
1665 goto end_function;
1667 /* advance the address to point to the next pair of address|size */
1668 first_buff_addr++;
1670 /* now prepare the one flow LLI table from the data */
1671 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1672 if (error)
1673 goto end_function;
1675 if (i == 0) {
1676 /* if this is the first table - save it to return to the user
1677 application */
1678 *first_table_data_ptr = table_data;
1680 /* set the pointer to info entry */
1681 prev_info_entry_ptr = info_entry_ptr;
1682 } else {
1683 /* not first table - the previous table info entry should
1684 be updated */
1685 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1687 /* set the pointer to info entry */
1688 prev_info_entry_ptr = info_entry_ptr;
1692 /* set the last table data */
1693 *last_table_data_ptr = table_data;
1694 end_function:
1695 return error;
1699 this function goes over all the flow tables connected to the given
1700 table and deallocate them
1702 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1704 /* id pointer */
1705 unsigned long *table_ptr;
1706 /* end address of the flow dma area */
1707 unsigned long num_entries;
1708 unsigned long num_pages;
1709 struct page **pages_ptr;
1710 /* maximum table size in words */
1711 struct sep_lli_entry_t *info_entry_ptr;
1713 /* set the pointer to the first table */
1714 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1716 /* set the num of entries */
1717 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1718 & SEP_NUM_ENTRIES_MASK;
1720 /* go over all the connected tables */
1721 while (*table_ptr != 0xffffffff) {
1722 /* get number of pages */
1723 num_pages = *(table_ptr - 2);
1725 /* get the pointer to the pages */
1726 pages_ptr = (struct page **) (*(table_ptr - 1));
1728 /* free the pages */
1729 sep_free_dma_pages(pages_ptr, num_pages, 1);
1731 /* goto to the info entry */
1732 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1734 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1735 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1738 return;
1742 * sep_find_flow_context - find a flow
1743 * @sep: the SEP we are working with
1744 * @flow_id: flow identifier
1746 * Returns a pointer the matching flow, or NULL if the flow does not
1747 * exist.
1750 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1751 unsigned long flow_id)
1753 int count;
1755 * always search for flow with id default first - in case we
1756 * already started working on the flow there can be no situation
1757 * when 2 flows are with default flag
1759 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1760 if (sep->flows[count].flow_id == flow_id)
1761 return &sep->flows[count];
1763 return NULL;
1768 this function handles the request to create the DMA tables for flow
1770 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1771 unsigned long arg)
1773 int error;
1774 struct sep_driver_build_flow_table_t command_args;
1775 /* first table - output */
1776 struct sep_lli_entry_t first_table_data;
1777 /* dma table data */
1778 struct sep_lli_entry_t last_table_data;
1779 /* pointer to the info entry of the previuos DMA table */
1780 struct sep_lli_entry_t *prev_info_entry_ptr;
1781 /* pointer to the flow data strucutre */
1782 struct sep_flow_context_t *flow_context_ptr;
1784 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1786 /* init variables */
1787 prev_info_entry_ptr = 0;
1788 first_table_data.physical_address = 0xffffffff;
1790 /* find the free structure for flow data */
1791 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1792 if (flow_context_ptr == NULL)
1793 goto end_function;
1795 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1796 if (error)
1797 goto end_function;
1799 /* create flow tables */
1800 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1801 if (error)
1802 goto end_function_with_error;
1804 /* check if flow is static */
1805 if (!command_args.flow_type)
1806 /* point the info entry of the last to the info entry of the first */
1807 last_table_data = first_table_data;
1809 /* set output params */
1810 command_args.first_table_addr = first_table_data.physical_address;
1811 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1812 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1814 /* send the parameters to user application */
1815 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1816 if (error)
1817 goto end_function_with_error;
1819 /* all the flow created - update the flow entry with temp id */
1820 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1822 /* set the processing tables data in the context */
1823 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1824 flow_context_ptr->input_tables_in_process = first_table_data;
1825 else
1826 flow_context_ptr->output_tables_in_process = first_table_data;
1828 goto end_function;
1830 end_function_with_error:
1831 /* free the allocated tables */
1832 sep_deallocated_flow_tables(&first_table_data);
1833 end_function:
1834 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1835 return error;
1839 this function handles add tables to flow
1841 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1843 int error;
1844 unsigned long num_entries;
1845 struct sep_driver_add_flow_table_t command_args;
1846 struct sep_flow_context_t *flow_context_ptr;
1847 /* first dma table data */
1848 struct sep_lli_entry_t first_table_data;
1849 /* last dma table data */
1850 struct sep_lli_entry_t last_table_data;
1851 /* pointer to the info entry of the current DMA table */
1852 struct sep_lli_entry_t *info_entry_ptr;
1854 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1856 /* get input parameters */
1857 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1858 if (error)
1859 goto end_function;
1861 /* find the flow structure for the flow id */
1862 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1863 if (flow_context_ptr == NULL)
1864 goto end_function;
1866 /* prepare the flow dma tables */
1867 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1868 if (error)
1869 goto end_function_with_error;
1871 /* now check if there is already an existing add table for this flow */
1872 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1873 /* this buffer was for input buffers */
1874 if (flow_context_ptr->input_tables_flag) {
1875 /* add table already exists - add the new tables to the end
1876 of the previous */
1877 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1879 info_entry_ptr = (struct sep_lli_entry_t *)
1880 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1882 /* connect to list of tables */
1883 *info_entry_ptr = first_table_data;
1885 /* set the first table data */
1886 first_table_data = flow_context_ptr->first_input_table;
1887 } else {
1888 /* set the input flag */
1889 flow_context_ptr->input_tables_flag = 1;
1891 /* set the first table data */
1892 flow_context_ptr->first_input_table = first_table_data;
1894 /* set the last table data */
1895 flow_context_ptr->last_input_table = last_table_data;
1896 } else { /* this is output tables */
1898 /* this buffer was for input buffers */
1899 if (flow_context_ptr->output_tables_flag) {
1900 /* add table already exists - add the new tables to
1901 the end of the previous */
1902 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1904 info_entry_ptr = (struct sep_lli_entry_t *)
1905 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1907 /* connect to list of tables */
1908 *info_entry_ptr = first_table_data;
1910 /* set the first table data */
1911 first_table_data = flow_context_ptr->first_output_table;
1912 } else {
1913 /* set the input flag */
1914 flow_context_ptr->output_tables_flag = 1;
1916 /* set the first table data */
1917 flow_context_ptr->first_output_table = first_table_data;
1919 /* set the last table data */
1920 flow_context_ptr->last_output_table = last_table_data;
1923 /* set output params */
1924 command_args.first_table_addr = first_table_data.physical_address;
1925 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1926 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1928 /* send the parameters to user application */
1929 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1930 end_function_with_error:
1931 /* free the allocated tables */
1932 sep_deallocated_flow_tables(&first_table_data);
1933 end_function:
1934 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1935 return error;
1939 this function add the flow add message to the specific flow
1941 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1943 int error;
1944 struct sep_driver_add_message_t command_args;
1945 struct sep_flow_context_t *flow_context_ptr;
1947 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1949 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1950 if (error)
1951 goto end_function;
1953 /* check input */
1954 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1955 error = -ENOMEM;
1956 goto end_function;
1959 /* find the flow context */
1960 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1961 if (flow_context_ptr == NULL)
1962 goto end_function;
1964 /* copy the message into context */
1965 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1966 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1967 end_function:
1968 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1969 return error;
1974 this function returns the bus and virtual addresses of the static pool
1976 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1978 int error;
1979 struct sep_driver_static_pool_addr_t command_args;
1981 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1983 /*prepare the output parameters in the struct */
1984 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1985 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1987 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1989 /* send the parameters to user application */
1990 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1991 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1992 return error;
1996 this address gets the offset of the physical address from the start
1997 of the mapped area
1999 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2001 int error;
2002 struct sep_driver_get_mapped_offset_t command_args;
2004 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2006 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2007 if (error)
2008 goto end_function;
2010 if (command_args.physical_address < sep->shared_bus) {
2011 error = -EINVAL;
2012 goto end_function;
2015 /*prepare the output parameters in the struct */
2016 command_args.offset = command_args.physical_address - sep->shared_bus;
2018 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2020 /* send the parameters to user application */
2021 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2022 end_function:
2023 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2024 return error;
2031 static int sep_start_handler(struct sep_device *sep)
2033 unsigned long reg_val;
2034 unsigned long error = 0;
2036 dbg("SEP Driver:--------> sep_start_handler start\n");
2038 /* wait in polling for message from SEP */
2040 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2041 while (!reg_val);
2043 /* check the value */
2044 if (reg_val == 0x1)
2045 /* fatal error - read error status from GPRO */
2046 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2047 dbg("SEP Driver:<-------- sep_start_handler end\n");
2048 return error;
2052 this function handles the request for SEP initialization
2054 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2056 unsigned long message_word;
2057 unsigned long *message_ptr;
2058 struct sep_driver_init_t command_args;
2059 unsigned long counter;
2060 unsigned long error;
2061 unsigned long reg_val;
2063 dbg("SEP Driver:--------> sep_init_handler start\n");
2064 error = 0;
2066 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2068 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2070 if (error)
2071 goto end_function;
2073 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2074 /*sep_configure_dma_burst(); */
2076 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2078 message_ptr = (unsigned long *) command_args.message_addr;
2080 /* set the base address of the SRAM */
2081 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2083 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2084 get_user(message_word, message_ptr);
2085 /* write data to SRAM */
2086 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2087 edbg("SEP Driver:message_word is %lu\n", message_word);
2088 /* wait for write complete */
2089 sep_wait_sram_write(sep);
2091 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2092 /* signal SEP */
2093 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2096 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2097 while (!(reg_val & 0xFFFFFFFD));
2099 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2101 /* check the value */
2102 if (reg_val == 0x1) {
2103 edbg("SEP Driver:init failed\n");
2105 error = sep_read_reg(sep, 0x8060);
2106 edbg("SEP Driver:sw monitor is %lu\n", error);
2108 /* fatal error - read erro status from GPRO */
2109 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2110 edbg("SEP Driver:error is %lu\n", error);
2112 end_function:
2113 dbg("SEP Driver:<-------- sep_init_handler end\n");
2114 return error;
2119 this function handles the request cache and resident reallocation
2121 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2122 unsigned long arg)
2124 struct sep_driver_realloc_cache_resident_t command_args;
2125 int error;
2127 /* copy cache and resident to the their intended locations */
2128 error = sep_load_firmware(sep);
2129 if (error)
2130 return error;
2132 command_args.new_base_addr = sep->shared_bus;
2134 /* find the new base address according to the lowest address between
2135 cache, resident and shared area */
2136 if (sep->resident_bus < command_args.new_base_addr)
2137 command_args.new_base_addr = sep->resident_bus;
2138 if (sep->rar_bus < command_args.new_base_addr)
2139 command_args.new_base_addr = sep->rar_bus;
2141 /* set the return parameters */
2142 command_args.new_cache_addr = sep->rar_bus;
2143 command_args.new_resident_addr = sep->resident_bus;
2145 /* set the new shared area */
2146 command_args.new_shared_area_addr = sep->shared_bus;
2148 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2149 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2150 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2151 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2153 /* return to user */
2154 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2155 return -EFAULT;
2156 return 0;
2160 * sep_get_time_handler - time request from user space
2161 * @sep: sep we are to set the time for
2162 * @arg: pointer to user space arg buffer
2164 * This function reports back the time and the address in the SEP
2165 * shared buffer at which it has been placed. (Do we really need this!!!)
2168 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2170 struct sep_driver_get_time_t command_args;
2172 mutex_lock(&sep_mutex);
2173 command_args.time_value = sep_set_time(sep);
2174 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2175 mutex_unlock(&sep_mutex);
2176 if (copy_to_user((void __user *)arg,
2177 &command_args, sizeof(struct sep_driver_get_time_t)))
2178 return -EFAULT;
2179 return 0;
2184 This API handles the end transaction request
2186 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2188 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2190 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2191 /* close IMR */
2192 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2194 /* release IRQ line */
2195 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2197 /* lock the sep mutex */
2198 mutex_unlock(&sep_mutex);
2199 #endif
2201 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2203 return 0;
2208 * sep_set_flow_id_handler - handle flow setting
2209 * @sep: the SEP we are configuring
2210 * @flow_id: the flow we are setting
2212 * This function handler the set flow id command
2214 static int sep_set_flow_id_handler(struct sep_device *sep,
2215 unsigned long flow_id)
2217 int error = 0;
2218 struct sep_flow_context_t *flow_data_ptr;
2220 /* find the flow data structure that was just used for creating new flow
2221 - its id should be default */
2223 mutex_lock(&sep_mutex);
2224 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2225 if (flow_data_ptr)
2226 flow_data_ptr->flow_id = flow_id; /* set flow id */
2227 else
2228 error = -EINVAL;
2229 mutex_unlock(&sep_mutex);
2230 return error;
2233 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2235 int error = 0;
2236 struct sep_device *sep = filp->private_data;
2238 dbg("------------>SEP Driver: ioctl start\n");
2240 edbg("SEP Driver: cmd is %x\n", cmd);
2242 switch (cmd) {
2243 case SEP_IOCSENDSEPCOMMAND:
2244 /* send command to SEP */
2245 sep_send_command_handler(sep);
2246 edbg("SEP Driver: after sep_send_command_handler\n");
2247 break;
2248 case SEP_IOCSENDSEPRPLYCOMMAND:
2249 /* send reply command to SEP */
2250 sep_send_reply_command_handler(sep);
2251 break;
2252 case SEP_IOCALLOCDATAPOLL:
2253 /* allocate data pool */
2254 error = sep_allocate_data_pool_memory_handler(sep, arg);
2255 break;
2256 case SEP_IOCWRITEDATAPOLL:
2257 /* write data into memory pool */
2258 error = sep_write_into_data_pool_handler(sep, arg);
2259 break;
2260 case SEP_IOCREADDATAPOLL:
2261 /* read data from data pool into application memory */
2262 error = sep_read_from_data_pool_handler(sep, arg);
2263 break;
2264 case SEP_IOCCREATESYMDMATABLE:
2265 /* create dma table for synhronic operation */
2266 error = sep_create_sync_dma_tables_handler(sep, arg);
2267 break;
2268 case SEP_IOCCREATEFLOWDMATABLE:
2269 /* create flow dma tables */
2270 error = sep_create_flow_dma_tables_handler(sep, arg);
2271 break;
2272 case SEP_IOCFREEDMATABLEDATA:
2273 /* free the pages */
2274 error = sep_free_dma_table_data_handler(sep);
2275 break;
2276 case SEP_IOCSETFLOWID:
2277 /* set flow id */
2278 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2279 break;
2280 case SEP_IOCADDFLOWTABLE:
2281 /* add tables to the dynamic flow */
2282 error = sep_add_flow_tables_handler(sep, arg);
2283 break;
2284 case SEP_IOCADDFLOWMESSAGE:
2285 /* add message of add tables to flow */
2286 error = sep_add_flow_tables_message_handler(sep, arg);
2287 break;
2288 case SEP_IOCSEPSTART:
2289 /* start command to sep */
2290 error = sep_start_handler(sep);
2291 break;
2292 case SEP_IOCSEPINIT:
2293 /* init command to sep */
2294 error = sep_init_handler(sep, arg);
2295 break;
2296 case SEP_IOCGETSTATICPOOLADDR:
2297 /* get the physical and virtual addresses of the static pool */
2298 error = sep_get_static_pool_addr_handler(sep, arg);
2299 break;
2300 case SEP_IOCENDTRANSACTION:
2301 error = sep_end_transaction_handler(sep, arg);
2302 break;
2303 case SEP_IOCREALLOCCACHERES:
2304 error = sep_realloc_cache_resident_handler(sep, arg);
2305 break;
2306 case SEP_IOCGETMAPPEDADDROFFSET:
2307 error = sep_get_physical_mapped_offset_handler(sep, arg);
2308 break;
2309 case SEP_IOCGETIME:
2310 error = sep_get_time_handler(sep, arg);
2311 break;
2312 default:
2313 error = -ENOTTY;
2314 break;
2316 dbg("SEP Driver:<-------- ioctl end\n");
2317 return error;
2322 #if !SEP_DRIVER_POLLING_MODE
2324 /* handler for flow done interrupt */
2326 static void sep_flow_done_handler(struct work_struct *work)
2328 struct sep_flow_context_t *flow_data_ptr;
2330 /* obtain the mutex */
2331 mutex_lock(&sep_mutex);
2333 /* get the pointer to context */
2334 flow_data_ptr = (struct sep_flow_context_t *) work;
2336 /* free all the current input tables in sep */
2337 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2339 /* free all the current tables output tables in SEP (if needed) */
2340 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2341 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2343 /* check if we have additional tables to be sent to SEP only input
2344 flag may be checked */
2345 if (flow_data_ptr->input_tables_flag) {
2346 /* copy the message to the shared RAM and signal SEP */
2347 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2349 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2351 mutex_unlock(&sep_mutex);
2354 interrupt handler function
2356 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2358 irqreturn_t int_error;
2359 unsigned long reg_val;
2360 unsigned long flow_id;
2361 struct sep_flow_context_t *flow_context_ptr;
2362 struct sep_device *sep = dev_id;
2364 int_error = IRQ_HANDLED;
2366 /* read the IRR register to check if this is SEP interrupt */
2367 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2368 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2370 /* check if this is the flow interrupt */
2371 if (0 /*reg_val & (0x1 << 11) */ ) {
2372 /* read GPRO to find out the which flow is done */
2373 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2375 /* find the contex of the flow */
2376 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2377 if (flow_context_ptr == NULL)
2378 goto end_function_with_error;
2380 /* queue the work */
2381 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2382 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2384 } else {
2385 /* check if this is reply interrupt from SEP */
2386 if (reg_val & (0x1 << 13)) {
2387 /* update the counter of reply messages */
2388 sep->reply_ct++;
2389 /* wake up the waiting process */
2390 wake_up(&sep_event);
2391 } else {
2392 int_error = IRQ_NONE;
2393 goto end_function;
2396 end_function_with_error:
2397 /* clear the interrupt */
2398 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2399 end_function:
2400 return int_error;
2403 #endif
2407 #if 0
2409 static void sep_wait_busy(struct sep_device *sep)
2411 u32 reg;
2413 do {
2414 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2415 } while (reg);
2419 PATCH for configuring the DMA to single burst instead of multi-burst
2421 static void sep_configure_dma_burst(struct sep_device *sep)
2423 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2425 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2427 /* request access to registers from SEP */
2428 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2430 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2432 sep_wait_busy(sep);
2434 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2436 /* set the DMA burst register to single burst */
2437 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2439 /* release the sep busy */
2440 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2441 sep_wait_busy(sep);
2443 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2447 #endif
2450 Function that is activaed on the succesful probe of the SEP device
2452 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2454 int error = 0;
2455 struct sep_device *sep;
2456 int counter;
2457 int size; /* size of memory for allocation */
2459 edbg("Sep pci probe starting\n");
2460 if (sep_dev != NULL) {
2461 dev_warn(&pdev->dev, "only one SEP supported.\n");
2462 return -EBUSY;
2465 /* enable the device */
2466 error = pci_enable_device(pdev);
2467 if (error) {
2468 edbg("error enabling pci device\n");
2469 goto end_function;
2472 /* set the pci dev pointer */
2473 sep_dev = &sep_instance;
2474 sep = &sep_instance;
2476 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2477 /* transaction counter that coordinates the transactions between SEP
2478 and HOST */
2479 sep->send_ct = 0;
2480 /* counter for the messages from sep */
2481 sep->reply_ct = 0;
2482 /* counter for the number of bytes allocated in the pool
2483 for the current transaction */
2484 sep->data_pool_bytes_allocated = 0;
2486 /* calculate the total size for allocation */
2487 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2488 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2490 /* allocate the shared area */
2491 if (sep_map_and_alloc_shared_area(sep, size)) {
2492 error = -ENOMEM;
2493 /* allocation failed */
2494 goto end_function_error;
2496 /* now set the memory regions */
2497 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2498 /* Note: this test section will need moving before it could ever
2499 work as the registers are not yet mapped ! */
2500 /* send the new SHARED MESSAGE AREA to the SEP */
2501 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2503 /* poll for SEP response */
2504 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2505 while (retval != 0xffffffff && retval != sep->shared_bus)
2506 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2508 /* check the return value (register) */
2509 if (retval != sep->shared_bus) {
2510 error = -ENOMEM;
2511 goto end_function_deallocate_sep_shared_area;
2513 #endif
2514 /* init the flow contextes */
2515 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2516 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2518 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2519 if (sep->flow_wq == NULL) {
2520 error = -ENOMEM;
2521 edbg("sep_driver:flow queue creation failed\n");
2522 goto end_function_deallocate_sep_shared_area;
2524 edbg("SEP Driver: create flow workqueue \n");
2525 sep->pdev = pci_dev_get(pdev);
2527 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2528 if (!sep->reg_addr) {
2529 edbg("sep: ioremap of registers failed.\n");
2530 goto end_function_deallocate_sep_shared_area;
2532 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2534 /* load the rom code */
2535 sep_load_rom_code(sep);
2537 /* set up system base address and shared memory location */
2538 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2539 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2540 &sep->rar_bus, GFP_KERNEL);
2542 if (!sep->rar_addr) {
2543 edbg("SEP Driver:can't allocate rar\n");
2544 goto end_function_uniomap;
2548 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2549 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2551 #if !SEP_DRIVER_POLLING_MODE
2553 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2555 /* clear ICR register */
2556 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2558 /* set the IMR register - open only GPR 2 */
2559 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2561 edbg("SEP Driver: about to call request_irq\n");
2562 /* get the interrupt line */
2563 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2564 if (error)
2565 goto end_function_free_res;
2566 return 0;
2567 edbg("SEP Driver: about to write IMR REG_ADDR");
2569 /* set the IMR register - open only GPR 2 */
2570 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2572 end_function_free_res:
2573 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2574 sep->rar_addr, sep->rar_bus);
2575 #endif /* SEP_DRIVER_POLLING_MODE */
2576 end_function_uniomap:
2577 iounmap(sep->reg_addr);
2578 end_function_deallocate_sep_shared_area:
2579 /* de-allocate shared area */
2580 sep_unmap_and_free_shared_area(sep, size);
2581 end_function_error:
2582 sep_dev = NULL;
2583 end_function:
2584 return error;
2587 static struct pci_device_id sep_pci_id_tbl[] = {
2588 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2592 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2594 /* field for registering driver to PCI device */
2595 static struct pci_driver sep_pci_driver = {
2596 .name = "sep_sec_driver",
2597 .id_table = sep_pci_id_tbl,
2598 .probe = sep_probe
2599 /* FIXME: remove handler */
2602 /* major and minor device numbers */
2603 static dev_t sep_devno;
2605 /* the files operations structure of the driver */
2606 static struct file_operations sep_file_operations = {
2607 .owner = THIS_MODULE,
2608 .ioctl = sep_ioctl,
2609 .poll = sep_poll,
2610 .open = sep_open,
2611 .release = sep_release,
2612 .mmap = sep_mmap,
2616 /* cdev struct of the driver */
2617 static struct cdev sep_cdev;
2620 this function registers the driver to the file system
2622 static int sep_register_driver_to_fs(void)
2624 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2625 if (ret_val) {
2626 edbg("sep: major number allocation failed, retval is %d\n",
2627 ret_val);
2628 return ret_val;
2630 /* init cdev */
2631 cdev_init(&sep_cdev, &sep_file_operations);
2632 sep_cdev.owner = THIS_MODULE;
2634 /* register the driver with the kernel */
2635 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2636 if (ret_val) {
2637 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2638 /* unregister dev numbers */
2639 unregister_chrdev_region(sep_devno, 1);
2641 return ret_val;
2645 /*--------------------------------------------------------------
2646 init function
2647 ----------------------------------------------------------------*/
2648 static int __init sep_init(void)
2650 int ret_val = 0;
2651 dbg("SEP Driver:-------->Init start\n");
2652 /* FIXME: Probe can occur before we are ready to survive a probe */
2653 ret_val = pci_register_driver(&sep_pci_driver);
2654 if (ret_val) {
2655 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2656 goto end_function_unregister_from_fs;
2658 /* register driver to fs */
2659 ret_val = sep_register_driver_to_fs();
2660 if (ret_val)
2661 goto end_function_unregister_pci;
2662 goto end_function;
2663 end_function_unregister_pci:
2664 pci_unregister_driver(&sep_pci_driver);
2665 end_function_unregister_from_fs:
2666 /* unregister from fs */
2667 cdev_del(&sep_cdev);
2668 /* unregister dev numbers */
2669 unregister_chrdev_region(sep_devno, 1);
2670 end_function:
2671 dbg("SEP Driver:<-------- Init end\n");
2672 return ret_val;
2676 /*-------------------------------------------------------------
2677 exit function
2678 --------------------------------------------------------------*/
2679 static void __exit sep_exit(void)
2681 int size;
2683 dbg("SEP Driver:--------> Exit start\n");
2685 /* unregister from fs */
2686 cdev_del(&sep_cdev);
2687 /* unregister dev numbers */
2688 unregister_chrdev_region(sep_devno, 1);
2689 /* calculate the total size for de-allocation */
2690 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2691 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2692 /* FIXME: We need to do this in the unload for the device */
2693 /* free shared area */
2694 if (sep_dev) {
2695 sep_unmap_and_free_shared_area(sep_dev, size);
2696 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2697 iounmap((void *) sep_dev->reg_addr);
2698 edbg("SEP Driver: iounmap \n");
2700 edbg("SEP Driver: release_mem_region \n");
2701 dbg("SEP Driver:<-------- Exit end\n");
2705 module_init(sep_init);
2706 module_exit(sep_exit);
2708 MODULE_LICENSE("GPL");