Staging: sep: rename some fields
[linux-2.6/btrfs-unstable.git] / drivers / staging / sep / sep_driver.c
blobaf2d6c929ce76bebbf71422d57170d488cbbfb63
1 /*
3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * CONTACTS:
24 * Mark Allyn mark.a.allyn@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
45 #include <asm/io.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
52 #include "sep_dev.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
69 /* 2M size */
71 static void sep_load_rom_code(void)
73 /* Index variables */
74 unsigned long i, k, j;
75 u32 reg;
76 u32 error;
77 u32 warning;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep_dev->reg_addr is %p\n", sep_dev->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
89 /* write bank */
90 sep_write_reg(sep_dev, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep_dev, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
95 k = k - 4;
97 if (k == 0) {
98 j = CRYS_SEP_ROM_length;
99 i = 4;
104 /* reset the SEP */
105 sep_write_reg(sep_dev, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
108 do {
109 reg = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
110 } while (!reg);
112 edbg("SEP Driver: ROM polling ended\n");
114 switch (reg) {
115 case 0x1:
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
119 break;
120 case 0x2:
121 /* Boot First Phase ended */
122 warning = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
123 edbg("SEP Driver: ROM polling case 2\n");
124 break;
125 case 0x4:
126 /* Cold boot ended successfully */
127 warning = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
128 edbg("SEP Driver: ROM polling case 4\n");
129 error = 0;
130 break;
131 case 0x8:
132 /* Warmboot ended successfully */
133 warning = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
134 edbg("SEP Driver: ROM polling case 8\n");
135 error = 0;
136 break;
137 case 0x10:
138 /* ColdWarm boot ended successfully */
139 warning = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
140 edbg("SEP Driver: ROM polling case 16\n");
141 error = 0;
142 break;
143 case 0x20:
144 edbg("SEP Driver: ROM polling case 32\n");
145 break;
150 #else
151 static void sep_load_rom_code(void) { }
152 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
156 /*----------------------------------------
157 DEFINES
158 -----------------------------------------*/
160 #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
161 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
162 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
164 /*--------------------------------------------
165 GLOBAL variables
166 --------------------------------------------*/
168 /* debug messages level */
169 INT_MODULE_PARM(sepDebug, 0x0);
170 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
172 /* Keep this a single static object for now to keep the conversion easy */
174 static struct sep_device sep_instance;
175 static struct sep_device *sep_dev = &sep_instance;
178 mutex for the access to the internals of the sep driver
180 static DEFINE_MUTEX(sep_mutex);
183 /* wait queue head (event) of the driver */
184 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
187 This functions copies the cache and resident from their source location into
188 destination memory, which is external to Linux VM and is given as
189 physical address
191 static int sep_copy_cache_resident_to_area(unsigned long src_cache_addr, unsigned long cache_size_in_bytes, unsigned long src_resident_addr, unsigned long resident_size_in_bytes, unsigned long *dst_new_cache_addr_ptr, unsigned long *dst_new_resident_addr_ptr)
193 unsigned long resident_addr;
194 unsigned long cache_addr;
195 const struct firmware *fw;
197 char *cache_name = "cache.image.bin";
198 char *res_name = "resident.image.bin";
200 /* error */
201 int error;
203 /*--------------------------------
204 CODE
205 -------------------------------------*/
206 error = 0;
208 edbg("SEP Driver:rar_virtual is %p\n", sep_dev->rar_addr);
209 edbg("SEP Driver:rar_physical is %08lx\n", sep_dev->rar_bus);
211 sep_dev->rar_region_addr = (unsigned long) sep_dev->rar_addr;
213 sep_dev->cache_bus = sep_dev->rar_bus;
214 sep_dev->cache_addr = sep_dev->rar_addr;
216 /* load cache */
217 error = request_firmware(&fw, cache_name, &sep_dev->pdev->dev);
218 if (error) {
219 edbg("SEP Driver:cant request cache fw\n");
220 goto end_function;
223 edbg("SEP Driver:cache data loc is %p\n", (void *) fw->data);
224 edbg("SEP Driver:cache data size is %08Zx\n", fw->size);
226 memcpy((void *) sep_dev->cache_addr, (void *) fw->data, fw->size);
228 sep_dev->cache_size = fw->size;
230 cache_addr = (unsigned long) sep_dev->cache_addr;
232 release_firmware(fw);
234 sep_dev->resident_bus = sep_dev->cache_bus + sep_dev->cache_size;
235 sep_dev->resident_addr = sep_dev->cache_addr + sep_dev->cache_size;
237 /* load resident */
238 error = request_firmware(&fw, res_name, &sep_dev->pdev->dev);
239 if (error) {
240 edbg("SEP Driver:cant request res fw\n");
241 goto end_function;
244 edbg("SEP Driver:res data loc is %p\n", (void *) fw->data);
245 edbg("SEP Driver:res data size is %08Zx\n", fw->size);
247 memcpy((void *) sep_dev->resident_addr, (void *) fw->data, fw->size);
249 sep_dev->resident_size = fw->size;
251 release_firmware(fw);
253 resident_addr = (unsigned long) sep_dev->resident_addr;
255 edbg("SEP Driver:resident_addr (physical )is %08lx\n", sep_dev->resident_bus);
256 edbg("SEP Driver:cache_addr (physical) is %08lx\n", sep_dev->cache_bus);
258 edbg("SEP Driver:resident_addr (logical )is %08lx\n", resident_addr);
259 edbg("SEP Driver:cache_addr (logical) is %08lx\n", cache_addr);
261 edbg("SEP Driver:resident_size is %08lx\n", sep_dev->resident_size);
262 edbg("SEP Driver:cache_size is %08lx\n", sep_dev->cache_size);
266 /* physical addresses */
267 *dst_new_cache_addr_ptr = sep_dev->cache_bus;
268 *dst_new_resident_addr_ptr = sep_dev->resident_bus;
269 end_function:
270 return error;
274 This functions maps and allocates the
275 shared area on the external RAM (device)
276 The input is shared_area_size - the size of the memory to
277 allocate. The outputs
278 are kernel_shared_area_addr_ptr - the kerenl
279 address of the mapped and allocated
280 shared area, and phys_shared_area_addr_ptr
281 - the physical address of the shared area
283 static int sep_map_and_alloc_shared_area(unsigned long shared_area_size, unsigned long *kernel_shared_area_addr_ptr, unsigned long *phys_shared_area_addr_ptr)
285 // shared_addr = ioremap_nocache(0xda00000,shared_area_size);
286 sep_dev->shared_addr = kmalloc(shared_area_size, GFP_KERNEL);
287 if (!sep_dev->shared_addr) {
288 edbg("sep_driver:shared memory kmalloc failed\n");
289 return -1;
291 /* FIXME */
292 sep_dev->shared_bus = __pa(sep_dev->shared_addr);
293 /* shared_bus = 0xda00000; */
294 *kernel_shared_area_addr_ptr = (unsigned long) sep_dev->shared_addr;
295 /* set the physical address of the shared area */
296 *phys_shared_area_addr_ptr = sep_dev->shared_bus;
297 edbg("SEP Driver:shared_addr is %p\n", sep_dev->shared_addr);
298 edbg("SEP Driver:shared_region_size is %08lx\n", shared_area_size);
299 edbg("SEP Driver:shared_physical_addr is %08lx\n", *phys_shared_area_addr_ptr);
301 return 0;
305 This functions unmaps and deallocates the shared area
306 on the external RAM (device)
307 The input is shared_area_size - the size of the memory to deallocate,kernel_
308 shared_area_addr_ptr - the kernel address of the mapped and allocated
309 shared area,phys_shared_area_addr_ptr - the physical address of
310 the shared area
312 static void sep_unmap_and_free_shared_area(unsigned long shared_area_size, unsigned long kernel_shared_area_addr, unsigned long phys_shared_area_addr)
314 kfree((void *) kernel_shared_area_addr);
318 This functions returns the physical address inside shared area according
319 to the virtual address. It can be either on the externa RAM device
320 (ioremapped), or on the system RAM
321 This implementation is for the external RAM
323 static unsigned long sep_shared_area_virt_to_phys(unsigned long virt_address)
325 edbg("SEP Driver:sh virt to phys v %08lx\n", virt_address);
326 edbg("SEP Driver:sh virt to phys p %08lx\n", sep_dev->shared_bus + (virt_address - (unsigned long) sep_dev->shared_addr));
328 return (unsigned long) sep_dev->shared_bus + (virt_address - (unsigned long) sep_dev->shared_addr);
332 This functions returns the virtual address inside shared area
333 according to the physical address. It can be either on the
334 externa RAM device (ioremapped), or on the system RAM This implementation
335 is for the external RAM
337 static unsigned long sep_shared_area_phys_to_virt(unsigned long phys_address)
339 return (unsigned long) sep_dev->shared_addr + (phys_address - sep_dev->shared_bus);
343 /*----------------------------------------------------------------------
344 open function of the character driver - must only lock the mutex
345 must also release the memory data pool allocations
346 ------------------------------------------------------------------------*/
347 static int sep_open(struct inode *inode, struct file *filp)
349 int error = 0;
351 dbg("SEP Driver:--------> open start\n");
353 /* check the blocking mode */
354 if (filp->f_flags & O_NDELAY)
355 error = mutex_trylock(&sep_mutex);
356 else
357 /* lock mutex */
358 mutex_lock(&sep_mutex);
360 /* check the error */
361 if (error) {
362 edbg("SEP Driver: down_interruptible failed\n");
363 goto end_function;
366 /* release data pool allocations */
367 sep_dev->data_pool_bytes_allocated = 0;
369 end_function:
370 dbg("SEP Driver:<-------- open end\n");
371 return error;
377 /*------------------------------------------------------------
378 release function
379 -------------------------------------------------------------*/
380 static int sep_release(struct inode *inode_ptr, struct file *file_ptr)
382 dbg("----------->SEP Driver: sep_release start\n");
384 #if 0 /*!SEP_DRIVER_POLLING_MODE */
385 /* close IMR */
386 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
387 /* release IRQ line */
388 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_addr);
390 #endif
391 /* unlock the sep mutex */
392 mutex_unlock(&sep_mutex);
393 dbg("SEP Driver:<-------- sep_release end\n");
394 return 0;
400 /*---------------------------------------------------------------
401 map function - this functions maps the message shared area
402 -----------------------------------------------------------------*/
403 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
405 unsigned long phys_addr;
407 dbg("-------->SEP Driver: mmap start\n");
409 /* check that the size of the mapped range is as the size of the message
410 shared area */
411 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
412 edbg("SEP Driver mmap requested size is more than allowed\n");
413 printk(KERN_WARNING "SEP Driver mmap requested size is more \
414 than allowed\n");
415 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
416 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
417 return -EAGAIN;
420 edbg("SEP Driver:sep_dev->message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
422 /* get physical address */
423 phys_addr = sep_dev->shared_area_bus;
425 edbg("SEP Driver: phys_addr is %08lx\n", phys_addr);
427 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
428 edbg("SEP Driver remap_page_range failed\n");
429 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
430 return -EAGAIN;
433 dbg("SEP Driver:<-------- mmap end\n");
435 return 0;
439 /*-----------------------------------------------
440 poll function
441 *----------------------------------------------*/
442 static unsigned int sep_poll(struct file *filp, poll_table * wait)
444 unsigned long count;
445 unsigned int mask = 0;
446 unsigned long retVal = 0; /* flow id */
448 dbg("---------->SEP Driver poll: start\n");
451 #if SEP_DRIVER_POLLING_MODE
453 while (sep_dev->send_ct != (retVal & 0x7FFFFFFF)) {
454 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
456 for (count = 0; count < 10 * 4; count += 4)
457 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
460 sep_dev->reply_ct++;
461 #else
462 /* add the event to the polling wait table */
463 poll_wait(filp, &sep_event, wait);
465 #endif
467 edbg("sep_dev->send_ct is %lu\n", sep_dev->send_ct);
468 edbg("sep_dev->reply_ct is %lu\n", sep_dev->reply_ct);
470 /* check if the data is ready */
471 if (sep_dev->send_ct == sep_dev->reply_ct) {
472 for (count = 0; count < 12 * 4; count += 4)
473 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area + count)));
475 for (count = 0; count < 10 * 4; count += 4)
476 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area + 0x1800 + count)));
478 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
479 edbg("retVal is %lu\n", retVal);
480 /* check if the this is sep reply or request */
481 if (retVal >> 31) {
482 edbg("SEP Driver: sep request in\n");
483 /* request */
484 mask |= POLLOUT | POLLWRNORM;
485 } else {
486 edbg("SEP Driver: sep reply in\n");
487 mask |= POLLIN | POLLRDNORM;
490 dbg("SEP Driver:<-------- poll exit\n");
491 return mask;
495 calculates time and sets it at the predefined address
497 static int sep_set_time(unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
499 struct timeval time;
500 /* address of time in the kernel */
501 unsigned long time_addr;
504 dbg("SEP Driver:--------> sep_set_time start\n");
506 do_gettimeofday(&time);
508 /* set value in the SYSTEM MEMORY offset */
509 time_addr = sep_dev->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
511 *(unsigned long *) time_addr = SEP_TIME_VAL_TOKEN;
512 *(unsigned long *) (time_addr + 4) = time.tv_sec;
514 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
515 edbg("SEP Driver:time_addr is %lu\n", time_addr);
516 edbg("SEP Driver:sep_dev->message_shared_area_addr is %lu\n", sep_dev->message_shared_area_addr);
518 /* set the output parameters if needed */
519 if (address_ptr)
520 *address_ptr = sep_shared_area_virt_to_phys(time_addr);
522 if (time_in_sec_ptr)
523 *time_in_sec_ptr = time.tv_sec;
525 dbg("SEP Driver:<-------- sep_set_time end\n");
527 return 0;
531 This function raises interrupt to SEP that signals that is has a new
532 command from HOST
534 static void sep_send_command_handler(void)
536 unsigned long count;
538 dbg("SEP Driver:--------> sep_send_command_handler start\n");
539 sep_set_time(0, 0);
541 /* flash cache */
542 flush_cache_all();
544 for (count = 0; count < 12 * 4; count += 4)
545 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area + count)));
547 /* update counter */
548 sep_dev->send_ct++;
549 /* send interrupt to SEP */
550 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
551 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
552 return;
556 This function raises interrupt to SEPm that signals that is has a
557 new command from HOST
559 static void sep_send_reply_command_handler(void)
561 unsigned long count;
563 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
565 /* flash cache */
566 flush_cache_all();
567 for (count = 0; count < 12 * 4; count += 4)
568 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep_dev->shared_area + count)));
569 /* update counter */
570 sep_dev->send_ct++;
571 /* send the interrupt to SEP */
572 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep_dev->send_ct);
573 /* update both counters */
574 sep_dev->send_ct++;
575 sep_dev->reply_ct++;
576 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
580 This function handles the allocate data pool memory request
581 This function returns calculates the physical address of the
582 allocated memory, and the offset of this area from the mapped address.
583 Therefore, the FVOs in user space can calculate the exact virtual
584 address of this allocated memory
586 static int sep_allocate_data_pool_memory_handler(unsigned long arg)
588 int error;
589 struct sep_driver_alloc_t command_args;
591 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
593 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
594 if (error)
595 goto end_function;
597 /* allocate memory */
598 if ((sep_dev->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
599 error = -ENOTTY;
600 goto end_function;
603 /* set the virtual and physical address */
604 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
605 command_args.phys_address = sep_dev->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep_dev->data_pool_bytes_allocated;
607 /* write the memory back to the user space */
608 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
609 if (error)
610 goto end_function;
612 /* set the allocation */
613 sep_dev->data_pool_bytes_allocated += command_args.num_bytes;
615 end_function:
616 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
617 return error;
621 This function handles write into allocated data pool command
623 static int sep_write_into_data_pool_handler(unsigned long arg)
625 int error;
626 unsigned long virt_address;
627 unsigned long app_in_address;
628 unsigned long num_bytes;
629 unsigned long data_pool_area_addr;
631 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
633 /* get the application address */
634 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
635 if (error)
636 goto end_function;
638 /* get the virtual kernel address address */
639 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
640 if (error)
641 goto end_function;
643 /* get the number of bytes */
644 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
645 if (error)
646 goto end_function;
648 /* calculate the start of the data pool */
649 data_pool_area_addr = sep_dev->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
652 /* check that the range of the virtual kernel address is correct */
653 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
654 error = -ENOTTY;
655 goto end_function;
657 /* copy the application data */
658 error = copy_from_user((void *) virt_address, (void *) app_in_address, num_bytes);
659 end_function:
660 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
661 return error;
665 this function handles the read from data pool command
667 static int sep_read_from_data_pool_handler(unsigned long arg)
669 int error;
670 /* virtual address of dest application buffer */
671 unsigned long app_out_address;
672 /* virtual address of the data pool */
673 unsigned long virt_address;
674 unsigned long num_bytes;
675 unsigned long data_pool_area_addr;
677 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
679 /* get the application address */
680 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
681 if (error)
682 goto end_function;
684 /* get the virtual kernel address address */
685 error = get_user(virt_address, &(((struct sep_driver_write_t *) arg)->datapool_address));
686 if (error)
687 goto end_function;
689 /* get the number of bytes */
690 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
691 if (error)
692 goto end_function;
694 /* calculate the start of the data pool */
695 data_pool_area_addr = sep_dev->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
697 /* check that the range of the virtual kernel address is correct */
698 if ((virt_address < data_pool_area_addr) || (virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES))) {
699 error = -ENOTTY;
700 goto end_function;
703 /* copy the application data */
704 error = copy_to_user((void *) app_out_address, (void *) virt_address, num_bytes);
705 end_function:
706 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
707 return error;
711 This function releases all the application virtual buffer physical pages,
712 that were previously locked
714 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
716 unsigned long count;
718 if (dirtyFlag) {
719 for (count = 0; count < num_pages; count++) {
720 /* the out array was written, therefore the data was changed */
721 if (!PageReserved(page_array_ptr[count]))
722 SetPageDirty(page_array_ptr[count]);
723 page_cache_release(page_array_ptr[count]);
725 } else {
726 /* free in pages - the data was only read, therefore no update was done
727 on those pages */
728 for (count = 0; count < num_pages; count++)
729 page_cache_release(page_array_ptr[count]);
732 if (page_array_ptr)
733 /* free the array */
734 kfree(page_array_ptr);
736 return 0;
740 This function locks all the physical pages of the kernel virtual buffer
741 and construct a basic lli array, where each entry holds the physical
742 page address and the size that application data holds in this physical pages
744 static int sep_lock_kernel_pages(unsigned long kernel_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
746 int error = 0;
747 /* the the page of the end address of the user space buffer */
748 unsigned long end_page;
749 /* the page of the start address of the user space buffer */
750 unsigned long start_page;
751 /* the range in pages */
752 unsigned long num_pages;
753 struct sep_lli_entry_t *lli_array;
754 /* next kernel address to map */
755 unsigned long next_kernel_address;
756 unsigned long count;
758 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
760 /* set start and end pages and num pages */
761 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
762 start_page = kernel_virt_addr >> PAGE_SHIFT;
763 num_pages = end_page - start_page + 1;
765 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
766 edbg("SEP Driver: data_size is %lu\n", data_size);
767 edbg("SEP Driver: start_page is %lx\n", start_page);
768 edbg("SEP Driver: end_page is %lx\n", end_page);
769 edbg("SEP Driver: num_pages is %lu\n", num_pages);
771 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
772 if (!lli_array) {
773 edbg("SEP Driver: kmalloc for lli_array failed\n");
774 error = -ENOMEM;
775 goto end_function;
778 /* set the start address of the first page - app data may start not at
779 the beginning of the page */
780 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
782 /* check that not all the data is in the first page only */
783 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
784 lli_array[0].block_size = data_size;
785 else
786 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
788 /* debug print */
789 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
791 /* advance the address to the start of the next page */
792 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
794 /* go from the second page to the prev before last */
795 for (count = 1; count < (num_pages - 1); count++) {
796 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
797 lli_array[count].block_size = PAGE_SIZE;
799 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
800 next_kernel_address += PAGE_SIZE;
803 /* if more then 1 pages locked - then update for the last page size needed */
804 if (num_pages > 1) {
805 /* update the address of the last page */
806 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
808 /* set the size of the last page */
809 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
811 if (lli_array[count].block_size == 0) {
812 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
813 dbg("data_size is %lu\n", data_size);
814 while (1);
817 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
819 /* set output params */
820 *lli_array_ptr = lli_array;
821 *num_pages_ptr = num_pages;
822 *page_array_ptr = 0;
823 end_function:
824 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
825 return 0;
829 This function locks all the physical pages of the application virtual buffer
830 and construct a basic lli array, where each entry holds the physical page
831 address and the size that application data holds in this physical pages
833 static int sep_lock_user_pages(unsigned long app_virt_addr, unsigned long data_size, unsigned long *num_pages_ptr, struct sep_lli_entry_t **lli_array_ptr, struct page ***page_array_ptr)
835 int error = 0;
836 /* the the page of the end address of the user space buffer */
837 unsigned long end_page;
838 /* the page of the start address of the user space buffer */
839 unsigned long start_page;
840 /* the range in pages */
841 unsigned long num_pages;
842 struct page **page_array;
843 struct sep_lli_entry_t *lli_array;
844 unsigned long count;
845 int result;
847 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
849 /* set start and end pages and num pages */
850 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
851 start_page = app_virt_addr >> PAGE_SHIFT;
852 num_pages = end_page - start_page + 1;
854 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
855 edbg("SEP Driver: data_size is %lu\n", data_size);
856 edbg("SEP Driver: start_page is %lu\n", start_page);
857 edbg("SEP Driver: end_page is %lu\n", end_page);
858 edbg("SEP Driver: num_pages is %lu\n", num_pages);
860 /* allocate array of pages structure pointers */
861 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
862 if (!page_array) {
863 edbg("SEP Driver: kmalloc for page_array failed\n");
865 error = -ENOMEM;
866 goto end_function;
869 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
870 if (!lli_array) {
871 edbg("SEP Driver: kmalloc for lli_array failed\n");
873 error = -ENOMEM;
874 goto end_function_with_error1;
877 /* convert the application virtual address into a set of physical */
878 down_read(&current->mm->mmap_sem);
879 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
880 up_read(&current->mm->mmap_sem);
882 /* check the number of pages locked - if not all then exit with error */
883 if (result != num_pages) {
884 dbg("SEP Driver: not all pages locked by get_user_pages\n");
886 error = -ENOMEM;
887 goto end_function_with_error2;
890 /* flush the cache */
891 for (count = 0; count < num_pages; count++)
892 flush_dcache_page(page_array[count]);
894 /* set the start address of the first page - app data may start not at
895 the beginning of the page */
896 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
898 /* check that not all the data is in the first page only */
899 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
900 lli_array[0].block_size = data_size;
901 else
902 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
904 /* debug print */
905 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
907 /* go from the second page to the prev before last */
908 for (count = 1; count < (num_pages - 1); count++) {
909 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
910 lli_array[count].block_size = PAGE_SIZE;
912 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
915 /* if more then 1 pages locked - then update for the last page size needed */
916 if (num_pages > 1) {
917 /* update the address of the last page */
918 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
920 /* set the size of the last page */
921 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
923 if (lli_array[count].block_size == 0) {
924 dbg("app_virt_addr is %08lx\n", app_virt_addr);
925 dbg("data_size is %lu\n", data_size);
926 while (1);
928 edbg("lli_array[%lu].physical_address is %08lx, \
929 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
932 /* set output params */
933 *lli_array_ptr = lli_array;
934 *num_pages_ptr = num_pages;
935 *page_array_ptr = page_array;
936 goto end_function;
938 end_function_with_error2:
939 /* release the cache */
940 for (count = 0; count < num_pages; count++)
941 page_cache_release(page_array[count]);
942 kfree(lli_array);
943 end_function_with_error1:
944 kfree(page_array);
945 end_function:
946 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
947 return 0;
952 this function calculates the size of data that can be inserted into the lli
953 table from this array the condition is that either the table is full
954 (all etnries are entered), or there are no more entries in the lli array
956 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
958 unsigned long table_data_size = 0;
959 unsigned long counter;
961 /* calculate the data in the out lli table if till we fill the whole
962 table or till the data has ended */
963 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
964 table_data_size += lli_in_array_ptr[counter].block_size;
965 return table_data_size;
969 this functions builds ont lli table from the lli_array according to
970 the given size of data
972 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
974 unsigned long curr_table_data_size;
975 /* counter of lli array entry */
976 unsigned long array_counter;
978 dbg("SEP Driver:--------> sep_build_lli_table start\n");
980 /* init currrent table data size and lli array entry counter */
981 curr_table_data_size = 0;
982 array_counter = 0;
983 *num_table_entries_ptr = 1;
985 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
987 /* fill the table till table size reaches the needed amount */
988 while (curr_table_data_size < table_data_size) {
989 /* update the number of entries in table */
990 (*num_table_entries_ptr)++;
992 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
993 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
994 curr_table_data_size += lli_table_ptr->block_size;
996 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
997 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
998 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1000 /* check for overflow of the table data */
1001 if (curr_table_data_size > table_data_size) {
1002 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1004 /* update the size of block in the table */
1005 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1007 /* update the physical address in the lli array */
1008 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1010 /* update the block size left in the lli array */
1011 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1012 } else
1013 /* advance to the next entry in the lli_array */
1014 array_counter++;
1016 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1017 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1019 /* move to the next entry in table */
1020 lli_table_ptr++;
1023 /* set the info entry to default */
1024 lli_table_ptr->physical_address = 0xffffffff;
1025 lli_table_ptr->block_size = 0;
1027 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1028 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1029 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1031 /* set the output parameter */
1032 *num_processed_entries_ptr += array_counter;
1034 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1035 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1036 return;
1040 this function goes over the list of the print created tables and
1041 prints all the data
1043 static void sep_debug_print_lli_tables(struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1045 unsigned long table_count;
1046 unsigned long entries_count;
1048 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1050 table_count = 1;
1051 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1052 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1053 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1055 /* print entries of the table (without info entry) */
1056 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1057 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1058 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1061 /* point to the info entry */
1062 lli_table_ptr--;
1064 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1065 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1068 table_data_size = lli_table_ptr->block_size & 0xffffff;
1069 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1070 lli_table_ptr = (struct sep_lli_entry_t *)
1071 (lli_table_ptr->physical_address);
1073 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1075 if ((unsigned long) lli_table_ptr != 0xffffffff)
1076 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt((unsigned long) lli_table_ptr);
1078 table_count++;
1080 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1085 This function prepares only input DMA table for synhronic symmetric
1086 operations (HASH)
1088 static int sep_prepare_input_dma_table(unsigned long app_virt_addr, unsigned long data_size, unsigned long block_size, unsigned long *lli_table_ptr, unsigned long *num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1090 /* pointer to the info entry of the table - the last entry */
1091 struct sep_lli_entry_t *info_entry_ptr;
1092 /* array of pointers ot page */
1093 struct sep_lli_entry_t *lli_array_ptr;
1094 /* points to the first entry to be processed in the lli_in_array */
1095 unsigned long current_entry;
1096 /* num entries in the virtual buffer */
1097 unsigned long sep_lli_entries;
1098 /* lli table pointer */
1099 struct sep_lli_entry_t *in_lli_table_ptr;
1100 /* the total data in one table */
1101 unsigned long table_data_size;
1102 /* number of entries in lli table */
1103 unsigned long num_entries_in_table;
1104 /* next table address */
1105 unsigned long lli_table_alloc_addr;
1106 unsigned long result;
1108 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1110 edbg("SEP Driver:data_size is %lu\n", data_size);
1111 edbg("SEP Driver:block_size is %lu\n", block_size);
1113 /* initialize the pages pointers */
1114 sep_dev->in_page_array = 0;
1115 sep_dev->in_num_pages = 0;
1117 if (data_size == 0) {
1118 /* special case - created 2 entries table with zero data */
1119 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep_dev->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1120 in_lli_table_ptr->physical_address = sep_dev->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1121 in_lli_table_ptr->block_size = 0;
1123 in_lli_table_ptr++;
1124 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1125 in_lli_table_ptr->block_size = 0;
1127 *lli_table_ptr = sep_dev->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1128 *num_entries_ptr = 2;
1129 *table_data_size_ptr = 0;
1131 goto end_function;
1134 /* check if the pages are in Kernel Virtual Address layout */
1135 if (isKernelVirtualAddress == true)
1136 /* lock the pages of the kernel buffer and translate them to pages */
1137 result = sep_lock_kernel_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
1138 else
1139 /* lock the pages of the user buffer and translate them to pages */
1140 result = sep_lock_user_pages(app_virt_addr, data_size, &sep_dev->in_num_pages, &lli_array_ptr, &sep_dev->in_page_array);
1142 if (result)
1143 return result;
1145 edbg("SEP Driver:output sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
1147 current_entry = 0;
1148 info_entry_ptr = 0;
1149 sep_lli_entries = sep_dev->in_num_pages;
1151 /* initiate to point after the message area */
1152 lli_table_alloc_addr = sep_dev->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1154 /* loop till all the entries in in array are not processed */
1155 while (current_entry < sep_lli_entries) {
1156 /* set the new input and output tables */
1157 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1159 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1161 /* calculate the maximum size of data for input table */
1162 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1164 /* now calculate the table size so that it will be module block size */
1165 table_data_size = (table_data_size / block_size) * block_size;
1167 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1169 /* construct input lli table */
1170 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1172 if (info_entry_ptr == 0) {
1173 /* set the output parameters to physical addresses */
1174 *lli_table_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1175 *num_entries_ptr = num_entries_in_table;
1176 *table_data_size_ptr = table_data_size;
1178 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1179 } else {
1180 /* update the info entry of the previous in table */
1181 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1182 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1185 /* save the pointer to the info entry of the current tables */
1186 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1189 /* print input tables */
1190 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1191 sep_shared_area_phys_to_virt(*lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1193 /* the array of the pages */
1194 kfree(lli_array_ptr);
1195 end_function:
1196 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1197 return 0;
1202 This function creates the input and output dma tables for
1203 symmetric operations (AES/DES) according to the block size from LLI arays
1205 static int sep_construct_dma_tables_from_lli(struct sep_lli_entry_t *lli_in_array,
1206 unsigned long sep_in_lli_entries,
1207 struct sep_lli_entry_t *lli_out_array,
1208 unsigned long sep_out_lli_entries,
1209 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1211 /* points to the area where next lli table can be allocated */
1212 unsigned long lli_table_alloc_addr;
1213 /* input lli table */
1214 struct sep_lli_entry_t *in_lli_table_ptr;
1215 /* output lli table */
1216 struct sep_lli_entry_t *out_lli_table_ptr;
1217 /* pointer to the info entry of the table - the last entry */
1218 struct sep_lli_entry_t *info_in_entry_ptr;
1219 /* pointer to the info entry of the table - the last entry */
1220 struct sep_lli_entry_t *info_out_entry_ptr;
1221 /* points to the first entry to be processed in the lli_in_array */
1222 unsigned long current_in_entry;
1223 /* points to the first entry to be processed in the lli_out_array */
1224 unsigned long current_out_entry;
1225 /* max size of the input table */
1226 unsigned long in_table_data_size;
1227 /* max size of the output table */
1228 unsigned long out_table_data_size;
1229 /* flag te signifies if this is the first tables build from the arrays */
1230 unsigned long first_table_flag;
1231 /* the data size that should be in table */
1232 unsigned long table_data_size;
1233 /* number of etnries in the input table */
1234 unsigned long num_entries_in_table;
1235 /* number of etnries in the output table */
1236 unsigned long num_entries_out_table;
1238 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1240 /* initiate to pint after the message area */
1241 lli_table_alloc_addr = sep_dev->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1243 current_in_entry = 0;
1244 current_out_entry = 0;
1245 first_table_flag = 1;
1246 info_in_entry_ptr = 0;
1247 info_out_entry_ptr = 0;
1249 /* loop till all the entries in in array are not processed */
1250 while (current_in_entry < sep_in_lli_entries) {
1251 /* set the new input and output tables */
1252 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1254 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1256 /* set the first output tables */
1257 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1259 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1261 /* calculate the maximum size of data for input table */
1262 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1264 /* calculate the maximum size of data for output table */
1265 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1267 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1268 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1270 /* check where the data is smallest */
1271 table_data_size = in_table_data_size;
1272 if (table_data_size > out_table_data_size)
1273 table_data_size = out_table_data_size;
1275 /* now calculate the table size so that it will be module block size */
1276 table_data_size = (table_data_size / block_size) * block_size;
1278 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1280 /* construct input lli table */
1281 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1283 /* construct output lli table */
1284 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1286 /* if info entry is null - this is the first table built */
1287 if (info_in_entry_ptr == 0) {
1288 /* set the output parameters to physical addresses */
1289 *lli_table_in_ptr = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1290 *in_num_entries_ptr = num_entries_in_table;
1291 *lli_table_out_ptr = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
1292 *out_num_entries_ptr = num_entries_out_table;
1293 *table_data_size_ptr = table_data_size;
1295 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1296 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1297 } else {
1298 /* update the info entry of the previous in table */
1299 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) in_lli_table_ptr);
1300 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1302 /* update the info entry of the previous in table */
1303 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys((unsigned long) out_lli_table_ptr);
1304 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1307 /* save the pointer to the info entry of the current tables */
1308 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1309 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1311 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1312 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1313 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1316 /* print input tables */
1317 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1318 sep_shared_area_phys_to_virt(*lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1319 /* print output tables */
1320 sep_debug_print_lli_tables((struct sep_lli_entry_t *)
1321 sep_shared_area_phys_to_virt(*lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1322 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1323 return 0;
1328 This function builds input and output DMA tables for synhronic
1329 symmetric operations (AES, DES). It also checks that each table
1330 is of the modular block size
1332 static int sep_prepare_input_output_dma_table(unsigned long app_virt_in_addr,
1333 unsigned long app_virt_out_addr,
1334 unsigned long data_size,
1335 unsigned long block_size,
1336 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1338 /* array of pointers of page */
1339 struct sep_lli_entry_t *lli_in_array;
1340 /* array of pointers of page */
1341 struct sep_lli_entry_t *lli_out_array;
1342 int result = 0;
1344 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1346 /* initialize the pages pointers */
1347 sep_dev->in_page_array = 0;
1348 sep_dev->out_page_array = 0;
1350 /* check if the pages are in Kernel Virtual Address layout */
1351 if (isKernelVirtualAddress == true) {
1352 /* lock the pages of the kernel buffer and translate them to pages */
1353 result = sep_lock_kernel_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
1354 if (result) {
1355 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1356 goto end_function;
1358 } else {
1359 /* lock the pages of the user buffer and translate them to pages */
1360 result = sep_lock_user_pages(app_virt_in_addr, data_size, &sep_dev->in_num_pages, &lli_in_array, &sep_dev->in_page_array);
1361 if (result) {
1362 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1363 goto end_function;
1367 if (isKernelVirtualAddress == true) {
1368 result = sep_lock_kernel_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
1369 if (result) {
1370 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1371 goto end_function_with_error1;
1373 } else {
1374 result = sep_lock_user_pages(app_virt_out_addr, data_size, &sep_dev->out_num_pages, &lli_out_array, &sep_dev->out_page_array);
1375 if (result) {
1376 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1377 goto end_function_with_error1;
1380 edbg("sep_dev->in_num_pages is %lu\n", sep_dev->in_num_pages);
1381 edbg("sep_dev->out_num_pages is %lu\n", sep_dev->out_num_pages);
1382 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1385 /* call the fucntion that creates table from the lli arrays */
1386 result = sep_construct_dma_tables_from_lli(lli_in_array, sep_dev->in_num_pages, lli_out_array, sep_dev->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1387 if (result) {
1388 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1389 goto end_function_with_error2;
1392 /* fall through - free the lli entry arrays */
1393 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1394 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1395 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1396 end_function_with_error2:
1397 kfree(lli_out_array);
1398 end_function_with_error1:
1399 kfree(lli_in_array);
1400 end_function:
1401 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1402 return result;
1407 this function handles tha request for creation of the DMA table
1408 for the synchronic symmetric operations (AES,DES)
1410 static int sep_create_sync_dma_tables_handler(unsigned long arg)
1412 int error;
1413 /* command arguments */
1414 struct sep_driver_build_sync_table_t command_args;
1416 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1418 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1419 if (error)
1420 goto end_function;
1422 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1423 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1424 edbg("data_size is %lu\n", command_args.data_in_size);
1425 edbg("block_size is %lu\n", command_args.block_size);
1427 /* check if we need to build only input table or input/output */
1428 if (command_args.app_out_address)
1429 /* prepare input and output tables */
1430 error = sep_prepare_input_output_dma_table(command_args.app_in_address,
1431 command_args.app_out_address,
1432 command_args.data_in_size,
1433 command_args.block_size,
1434 &command_args.in_table_address,
1435 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1436 else
1437 /* prepare input tables */
1438 error = sep_prepare_input_dma_table(command_args.app_in_address,
1439 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1441 if (error)
1442 goto end_function;
1443 /* copy to user */
1444 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1445 end_function:
1446 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1447 return error;
1451 this function handles the request for freeing dma table for synhronic actions
1453 static int sep_free_dma_table_data_handler(void)
1455 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1457 /* free input pages array */
1458 sep_free_dma_pages(sep_dev->in_page_array, sep_dev->in_num_pages, 0);
1460 /* free output pages array if needed */
1461 if (sep_dev->out_page_array)
1462 sep_free_dma_pages(sep_dev->out_page_array, sep_dev->out_num_pages, 1);
1464 /* reset all the values */
1465 sep_dev->in_page_array = 0;
1466 sep_dev->out_page_array = 0;
1467 sep_dev->in_num_pages = 0;
1468 sep_dev->out_num_pages = 0;
1469 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1470 return 0;
1474 this function find a space for the new flow dma table
1476 static int sep_find_free_flow_dma_table_space(unsigned long **table_address_ptr)
1478 int error = 0;
1479 /* pointer to the id field of the flow dma table */
1480 unsigned long *start_table_ptr;
1481 unsigned long flow_dma_area_start_addr;
1482 unsigned long flow_dma_area_end_addr;
1483 /* maximum table size in words */
1484 unsigned long table_size_in_words;
1486 /* find the start address of the flow DMA table area */
1487 flow_dma_area_start_addr = sep_dev->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1489 /* set end address of the flow table area */
1490 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1492 /* set table size in words */
1493 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1495 /* set the pointer to the start address of DMA area */
1496 start_table_ptr = (unsigned long *) flow_dma_area_start_addr;
1498 /* find the space for the next table */
1499 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && ((unsigned long) start_table_ptr < flow_dma_area_end_addr))
1500 start_table_ptr += table_size_in_words;
1502 /* check if we reached the end of floa tables area */
1503 if ((unsigned long) start_table_ptr >= flow_dma_area_end_addr)
1504 error = -1;
1505 else
1506 *table_address_ptr = start_table_ptr;
1508 return error;
1514 This function creates one DMA table for flow and returns its data,
1515 and pointer to its info entry
1517 static int sep_prepare_one_flow_dma_table(unsigned long virt_buff_addr, unsigned long virt_buff_size, struct sep_lli_entry_t *table_data, struct sep_lli_entry_t **info_entry_ptr, struct sep_flow_context_t *flow_data_ptr, bool isKernelVirtualAddress)
1519 int error;
1520 /* the range in pages */
1521 unsigned long lli_array_size;
1522 struct sep_lli_entry_t *lli_array;
1523 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1524 unsigned long *start_dma_table_ptr;
1525 /* total table data counter */
1526 unsigned long dma_table_data_count;
1527 /* pointer that will keep the pointer to the pages of the virtual buffer */
1528 struct page **page_array_ptr;
1529 unsigned long entry_count;
1531 /* find the space for the new table */
1532 error = sep_find_free_flow_dma_table_space(&start_dma_table_ptr);
1533 if (error)
1534 goto end_function;
1536 /* check if the pages are in Kernel Virtual Address layout */
1537 if (isKernelVirtualAddress == true)
1538 /* lock kernel buffer in the memory */
1539 error = sep_lock_kernel_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1540 else
1541 /* lock user buffer in the memory */
1542 error = sep_lock_user_pages(virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1544 if (error)
1545 goto end_function;
1547 /* set the pointer to page array at the beginning of table - this table is
1548 now considered taken */
1549 *start_dma_table_ptr = lli_array_size;
1551 /* point to the place of the pages pointers of the table */
1552 start_dma_table_ptr++;
1554 /* set the pages pointer */
1555 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1557 /* set the pointer to the first entry */
1558 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1560 /* now create the entries for table */
1561 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1562 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1564 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1566 /* set the total data of a table */
1567 dma_table_data_count += lli_array[entry_count].block_size;
1569 flow_dma_table_entry_ptr++;
1572 /* set the physical address */
1573 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1575 /* set the num_entries and total data size */
1576 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1578 /* set the info entry */
1579 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1580 flow_dma_table_entry_ptr->block_size = 0;
1582 /* set the pointer to info entry */
1583 *info_entry_ptr = flow_dma_table_entry_ptr;
1585 /* the array of the lli entries */
1586 kfree(lli_array);
1587 end_function:
1588 return error;
1594 This function creates a list of tables for flow and returns the data for
1595 the first and last tables of the list
1597 static int sep_prepare_flow_dma_tables(unsigned long num_virtual_buffers,
1598 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1600 int error;
1601 unsigned long virt_buff_addr;
1602 unsigned long virt_buff_size;
1603 struct sep_lli_entry_t table_data;
1604 struct sep_lli_entry_t *info_entry_ptr;
1605 struct sep_lli_entry_t *prev_info_entry_ptr;
1606 unsigned long i;
1608 /* init vars */
1609 error = 0;
1610 prev_info_entry_ptr = 0;
1612 /* init the first table to default */
1613 table_data.physical_address = 0xffffffff;
1614 first_table_data_ptr->physical_address = 0xffffffff;
1615 table_data.block_size = 0;
1617 for (i = 0; i < num_virtual_buffers; i++) {
1618 /* get the virtual buffer address */
1619 error = get_user(virt_buff_addr, &first_buff_addr);
1620 if (error)
1621 goto end_function;
1623 /* get the virtual buffer size */
1624 first_buff_addr++;
1625 error = get_user(virt_buff_size, &first_buff_addr);
1626 if (error)
1627 goto end_function;
1629 /* advance the address to point to the next pair of address|size */
1630 first_buff_addr++;
1632 /* now prepare the one flow LLI table from the data */
1633 error = sep_prepare_one_flow_dma_table(virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1634 if (error)
1635 goto end_function;
1637 if (i == 0) {
1638 /* if this is the first table - save it to return to the user
1639 application */
1640 *first_table_data_ptr = table_data;
1642 /* set the pointer to info entry */
1643 prev_info_entry_ptr = info_entry_ptr;
1644 } else {
1645 /* not first table - the previous table info entry should
1646 be updated */
1647 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1649 /* set the pointer to info entry */
1650 prev_info_entry_ptr = info_entry_ptr;
1654 /* set the last table data */
1655 *last_table_data_ptr = table_data;
1656 end_function:
1657 return error;
1661 this function goes over all the flow tables connected to the given
1662 table and deallocate them
1664 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1666 /* id pointer */
1667 unsigned long *table_ptr;
1668 /* end address of the flow dma area */
1669 unsigned long num_entries;
1670 unsigned long num_pages;
1671 struct page **pages_ptr;
1672 /* maximum table size in words */
1673 struct sep_lli_entry_t *info_entry_ptr;
1675 /* set the pointer to the first table */
1676 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1678 /* set the num of entries */
1679 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1680 & SEP_NUM_ENTRIES_MASK;
1682 /* go over all the connected tables */
1683 while (*table_ptr != 0xffffffff) {
1684 /* get number of pages */
1685 num_pages = *(table_ptr - 2);
1687 /* get the pointer to the pages */
1688 pages_ptr = (struct page **) (*(table_ptr - 1));
1690 /* free the pages */
1691 sep_free_dma_pages(pages_ptr, num_pages, 1);
1693 /* goto to the info entry */
1694 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1696 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1697 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1700 return;
1704 This function returns pointer to the flow data structure
1705 that contains the given id
1707 static int sep_find_flow_context(unsigned long flow_id, struct sep_flow_context_t **flow_data_ptr)
1709 unsigned long count;
1710 int error = 0;
1713 always search for flow with id default first - in case we
1714 already started working on the flow there can be no situation
1715 when 2 flows are with default flag
1717 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1718 if (sep_dev->flows[count].flow_id == flow_id) {
1719 *flow_data_ptr = &sep_dev->flows[count];
1720 break;
1724 if (count == SEP_DRIVER_NUM_FLOWS)
1725 /* no flow found */
1726 error = -ENOMEM;
1728 return error;
1733 this function handles the request to create the DMA tables for flow
1735 static int sep_create_flow_dma_tables_handler(unsigned long arg)
1737 int error;
1738 struct sep_driver_build_flow_table_t command_args;
1739 /* first table - output */
1740 struct sep_lli_entry_t first_table_data;
1741 /* dma table data */
1742 struct sep_lli_entry_t last_table_data;
1743 /* pointer to the info entry of the previuos DMA table */
1744 struct sep_lli_entry_t *prev_info_entry_ptr;
1745 /* pointer to the flow data strucutre */
1746 struct sep_flow_context_t *flow_context_ptr;
1748 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1750 /* init variables */
1751 prev_info_entry_ptr = 0;
1752 first_table_data.physical_address = 0xffffffff;
1754 /* find the free structure for flow data */
1755 error = sep_find_flow_context(SEP_FREE_FLOW_ID, &flow_context_ptr);
1756 if (error)
1757 goto end_function;
1759 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1760 if (error)
1761 goto end_function;
1763 /* create flow tables */
1764 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1765 if (error)
1766 goto end_function_with_error;
1768 /* check if flow is static */
1769 if (!command_args.flow_type)
1770 /* point the info entry of the last to the info entry of the first */
1771 last_table_data = first_table_data;
1773 /* set output params */
1774 command_args.first_table_addr = first_table_data.physical_address;
1775 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1776 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1778 /* send the parameters to user application */
1779 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1780 if (error)
1781 goto end_function_with_error;
1783 /* all the flow created - update the flow entry with temp id */
1784 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1786 /* set the processing tables data in the context */
1787 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1788 flow_context_ptr->input_tables_in_process = first_table_data;
1789 else
1790 flow_context_ptr->output_tables_in_process = first_table_data;
1792 goto end_function;
1794 end_function_with_error:
1795 /* free the allocated tables */
1796 sep_deallocated_flow_tables(&first_table_data);
1797 end_function:
1798 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1799 return error;
1803 this functio nhandles add tables to flow
1805 static int sep_add_flow_tables_handler(unsigned long arg)
1807 int error;
1808 unsigned long num_entries;
1809 struct sep_driver_add_flow_table_t command_args;
1810 struct sep_flow_context_t *flow_context_ptr;
1811 /* first dma table data */
1812 struct sep_lli_entry_t first_table_data;
1813 /* last dma table data */
1814 struct sep_lli_entry_t last_table_data;
1815 /* pointer to the info entry of the current DMA table */
1816 struct sep_lli_entry_t *info_entry_ptr;
1818 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1820 /* get input parameters */
1821 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1822 if (error)
1823 goto end_function;
1825 /* find the flow structure for the flow id */
1826 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1827 if (error)
1828 goto end_function;
1830 /* prepare the flow dma tables */
1831 error = sep_prepare_flow_dma_tables(command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1832 if (error)
1833 goto end_function_with_error;
1835 /* now check if there is already an existing add table for this flow */
1836 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1837 /* this buffer was for input buffers */
1838 if (flow_context_ptr->input_tables_flag) {
1839 /* add table already exists - add the new tables to the end
1840 of the previous */
1841 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1843 info_entry_ptr = (struct sep_lli_entry_t *)
1844 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1846 /* connect to list of tables */
1847 *info_entry_ptr = first_table_data;
1849 /* set the first table data */
1850 first_table_data = flow_context_ptr->first_input_table;
1851 } else {
1852 /* set the input flag */
1853 flow_context_ptr->input_tables_flag = 1;
1855 /* set the first table data */
1856 flow_context_ptr->first_input_table = first_table_data;
1858 /* set the last table data */
1859 flow_context_ptr->last_input_table = last_table_data;
1860 } else { /* this is output tables */
1862 /* this buffer was for input buffers */
1863 if (flow_context_ptr->output_tables_flag) {
1864 /* add table already exists - add the new tables to
1865 the end of the previous */
1866 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1868 info_entry_ptr = (struct sep_lli_entry_t *)
1869 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1871 /* connect to list of tables */
1872 *info_entry_ptr = first_table_data;
1874 /* set the first table data */
1875 first_table_data = flow_context_ptr->first_output_table;
1876 } else {
1877 /* set the input flag */
1878 flow_context_ptr->output_tables_flag = 1;
1880 /* set the first table data */
1881 flow_context_ptr->first_output_table = first_table_data;
1883 /* set the last table data */
1884 flow_context_ptr->last_output_table = last_table_data;
1887 /* set output params */
1888 command_args.first_table_addr = first_table_data.physical_address;
1889 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1890 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1892 /* send the parameters to user application */
1893 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1894 end_function_with_error:
1895 /* free the allocated tables */
1896 sep_deallocated_flow_tables(&first_table_data);
1897 end_function:
1898 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1899 return error;
1903 this function add the flow add message to the specific flow
1905 static int sep_add_flow_tables_message_handler(unsigned long arg)
1907 int error;
1908 struct sep_driver_add_message_t command_args;
1909 struct sep_flow_context_t *flow_context_ptr;
1911 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1913 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1914 if (error)
1915 goto end_function;
1917 /* check input */
1918 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1919 error = -ENOMEM;
1920 goto end_function;
1923 /* find the flow context */
1924 error = sep_find_flow_context(command_args.flow_id, &flow_context_ptr);
1925 if (error)
1926 goto end_function;
1928 /* copy the message into context */
1929 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1930 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1931 end_function:
1932 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1933 return error;
1938 this function returns the physical and virtual addresses of the static pool
1940 static int sep_get_static_pool_addr_handler(unsigned long arg)
1942 int error;
1943 struct sep_driver_static_pool_addr_t command_args;
1945 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1947 /*prepare the output parameters in the struct */
1948 command_args.physical_static_address = sep_dev->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1949 command_args.virtual_static_address = sep_dev->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1951 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1953 /* send the parameters to user application */
1954 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1955 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1956 return error;
1960 this address gets the offset of the physical address from the start
1961 of the mapped area
1963 static int sep_get_physical_mapped_offset_handler(unsigned long arg)
1965 int error;
1966 struct sep_driver_get_mapped_offset_t command_args;
1968 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
1970 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
1971 if (error)
1972 goto end_function;
1974 if (command_args.physical_address < sep_dev->shared_area_bus) {
1975 error = -ENOTTY;
1976 goto end_function;
1979 /*prepare the output parameters in the struct */
1980 command_args.offset = command_args.physical_address - sep_dev->shared_area_bus;
1982 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
1984 /* send the parameters to user application */
1985 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
1986 end_function:
1987 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
1988 return error;
1995 static int sep_start_handler(void)
1997 unsigned long reg_val;
1998 unsigned long error = 0;
2000 dbg("SEP Driver:--------> sep_start_handler start\n");
2002 /* wait in polling for message from SEP */
2004 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2005 while (!reg_val);
2007 /* check the value */
2008 if (reg_val == 0x1)
2009 /* fatal error - read erro status from GPRO */
2010 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2011 dbg("SEP Driver:<-------- sep_start_handler end\n");
2012 return error;
2016 this function handles the request for SEP initialization
2018 static int sep_init_handler(unsigned long arg)
2020 unsigned long message_word;
2021 unsigned long *message_ptr;
2022 struct sep_driver_init_t command_args;
2023 unsigned long counter;
2024 unsigned long error;
2025 unsigned long reg_val;
2027 dbg("SEP Driver:--------> sep_init_handler start\n");
2028 error = 0;
2030 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2032 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2034 if (error)
2035 goto end_function;
2037 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2038 /*sep_configure_dma_burst(); */
2040 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2042 message_ptr = (unsigned long *) command_args.message_addr;
2044 /* set the base address of the SRAM */
2045 sep_write_reg(sep_dev, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2047 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2048 get_user(message_word, message_ptr);
2049 /* write data to SRAM */
2050 sep_write_reg(sep_dev, HW_SRAM_DATA_REG_ADDR, message_word);
2051 edbg("SEP Driver:message_word is %lu\n", message_word);
2052 /* wait for write complete */
2053 sep_wait_sram_write(sep_dev);
2055 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2056 /* signal SEP */
2057 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2060 reg_val = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2061 while (!(reg_val & 0xFFFFFFFD));
2063 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2065 /* check the value */
2066 if (reg_val == 0x1) {
2067 edbg("SEP Driver:init failed\n");
2069 error = sep_read_reg(sep_dev, 0x8060);
2070 edbg("SEP Driver:sw monitor is %lu\n", error);
2072 /* fatal error - read erro status from GPRO */
2073 error = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2074 edbg("SEP Driver:error is %lu\n", error);
2076 end_function:
2077 dbg("SEP Driver:<-------- sep_init_handler end\n");
2078 return error;
2083 this function handles the request cache and resident reallocation
2085 static int sep_realloc_cache_resident_handler(unsigned long arg)
2087 int error;
2088 unsigned long phys_cache_address;
2089 unsigned long phys_resident_address;
2090 struct sep_driver_realloc_cache_resident_t command_args;
2092 /* copy the data */
2093 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2094 if (error)
2095 goto end_function;
2097 /* copy cache and resident to the their intended locations */
2098 error = sep_copy_cache_resident_to_area(command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
2099 if (error)
2100 goto end_function;
2102 command_args.new_base_addr = sep_dev->shared_area_bus;
2104 /* find the new base address according to the lowest address between
2105 cache, resident and shared area */
2106 if (phys_resident_address < command_args.new_base_addr)
2107 command_args.new_base_addr = phys_resident_address;
2108 if (phys_cache_address < command_args.new_base_addr)
2109 command_args.new_base_addr = phys_cache_address;
2111 /* set the return parameters */
2112 command_args.new_cache_addr = phys_cache_address;
2113 command_args.new_resident_addr = phys_resident_address;
2115 /* set the new shared area */
2116 command_args.new_shared_area_addr = sep_dev->shared_area_bus;
2118 edbg("SEP Driver:command_args.new_shared_area is %08lx\n", command_args.new_shared_area_addr);
2119 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2120 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2121 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2123 /* return to user */
2124 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2125 end_function:
2126 return error;
2130 this function handles the request for get time
2132 static int sep_get_time_handler(unsigned long arg)
2134 int error;
2135 struct sep_driver_get_time_t command_args;
2137 error = sep_set_time(&command_args.time_physical_address, &command_args.time_value);
2138 if (error == 0)
2139 error = copy_to_user((void __user *)arg,
2140 &command_args, sizeof(struct sep_driver_get_time_t));
2141 return error;
2146 This API handles the end transaction request
2148 static int sep_end_transaction_handler(unsigned long arg)
2150 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2152 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2153 /* close IMR */
2154 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2156 /* release IRQ line */
2157 free_irq(SEP_DIRVER_IRQ_NUM, &sep_dev->reg_addr);
2159 /* lock the sep mutex */
2160 mutex_unlock(&sep_mutex);
2161 #endif
2163 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2165 return 0;
2170 This function handler the set flow id command
2172 static int sep_set_flow_id_handler(unsigned long arg)
2174 int error;
2175 unsigned long flow_id;
2176 struct sep_flow_context_t *flow_data_ptr;
2178 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2180 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2181 if (error)
2182 goto end_function;
2184 /* find the flow data structure that was just used for creating new flow
2185 - its id should be default */
2186 error = sep_find_flow_context(SEP_TEMP_FLOW_ID, &flow_data_ptr);
2187 if (error)
2188 goto end_function;
2190 /* set flow id */
2191 flow_data_ptr->flow_id = flow_id;
2193 end_function:
2194 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2195 return error;
2202 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2204 int error = 0;
2206 dbg("------------>SEP Driver: ioctl start\n");
2208 edbg("SEP Driver: cmd is %x\n", cmd);
2210 /* check that the command is for sep device */
2211 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2212 error = -ENOTTY;
2214 switch (cmd) {
2215 case SEP_IOCSENDSEPCOMMAND:
2216 /* send command to SEP */
2217 sep_send_command_handler();
2218 edbg("SEP Driver: after sep_send_command_handler\n");
2219 break;
2220 case SEP_IOCSENDSEPRPLYCOMMAND:
2221 /* send reply command to SEP */
2222 sep_send_reply_command_handler();
2223 break;
2224 case SEP_IOCALLOCDATAPOLL:
2225 /* allocate data pool */
2226 error = sep_allocate_data_pool_memory_handler(arg);
2227 break;
2228 case SEP_IOCWRITEDATAPOLL:
2229 /* write data into memory pool */
2230 error = sep_write_into_data_pool_handler(arg);
2231 break;
2232 case SEP_IOCREADDATAPOLL:
2233 /* read data from data pool into application memory */
2234 error = sep_read_from_data_pool_handler(arg);
2235 break;
2236 case SEP_IOCCREATESYMDMATABLE:
2237 /* create dma table for synhronic operation */
2238 error = sep_create_sync_dma_tables_handler(arg);
2239 break;
2240 case SEP_IOCCREATEFLOWDMATABLE:
2241 /* create flow dma tables */
2242 error = sep_create_flow_dma_tables_handler(arg);
2243 break;
2244 case SEP_IOCFREEDMATABLEDATA:
2245 /* free the pages */
2246 error = sep_free_dma_table_data_handler();
2247 break;
2248 case SEP_IOCSETFLOWID:
2249 /* set flow id */
2250 error = sep_set_flow_id_handler(arg);
2251 break;
2252 case SEP_IOCADDFLOWTABLE:
2253 /* add tables to the dynamic flow */
2254 error = sep_add_flow_tables_handler(arg);
2255 break;
2256 case SEP_IOCADDFLOWMESSAGE:
2257 /* add message of add tables to flow */
2258 error = sep_add_flow_tables_message_handler(arg);
2259 break;
2260 case SEP_IOCSEPSTART:
2261 /* start command to sep */
2262 error = sep_start_handler();
2263 break;
2264 case SEP_IOCSEPINIT:
2265 /* init command to sep */
2266 error = sep_init_handler(arg);
2267 break;
2268 case SEP_IOCGETSTATICPOOLADDR:
2269 /* get the physical and virtual addresses of the static pool */
2270 error = sep_get_static_pool_addr_handler(arg);
2271 break;
2272 case SEP_IOCENDTRANSACTION:
2273 error = sep_end_transaction_handler(arg);
2274 break;
2275 case SEP_IOCREALLOCCACHERES:
2276 error = sep_realloc_cache_resident_handler(arg);
2277 break;
2278 case SEP_IOCGETMAPPEDADDROFFSET:
2279 error = sep_get_physical_mapped_offset_handler(arg);
2280 break;
2281 case SEP_IOCGETIME:
2282 error = sep_get_time_handler(arg);
2283 break;
2284 default:
2285 error = -ENOTTY;
2286 break;
2288 dbg("SEP Driver:<-------- ioctl end\n");
2289 return error;
2294 #if !SEP_DRIVER_POLLING_MODE
2296 /* handler for flow done interrupt */
2298 static void sep_flow_done_handler(struct work_struct *work)
2300 struct sep_flow_context_t *flow_data_ptr;
2302 /* obtain the mutex */
2303 mutex_lock(&sep_mutex);
2305 /* get the pointer to context */
2306 flow_data_ptr = (struct sep_flow_context_t *) work;
2308 /* free all the current input tables in sep */
2309 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2311 /* free all the current tables output tables in SEP (if needed) */
2312 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2313 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2315 /* check if we have additional tables to be sent to SEP only input
2316 flag may be checked */
2317 if (flow_data_ptr->input_tables_flag) {
2318 /* copy the message to the shared RAM and signal SEP */
2319 memcpy((void *) flow_data_ptr->message, (void *) sep_dev->shared_area, flow_data_ptr->message_size_in_bytes);
2321 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2323 mutex_unlock(&sep_mutex);
2326 interrupt handler function
2328 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2330 irqreturn_t int_error;
2331 unsigned long error;
2332 unsigned long reg_val;
2333 unsigned long flow_id;
2334 struct sep_flow_context_t *flow_context_ptr;
2336 int_error = IRQ_HANDLED;
2338 /* read the IRR register to check if this is SEP interrupt */
2339 reg_val = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
2340 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2342 /* check if this is the flow interrupt */
2343 if (0 /*reg_val & (0x1 << 11) */ ) {
2344 /* read GPRO to find out the which flow is done */
2345 flow_id = sep_read_reg(sep_dev, HW_HOST_IRR_REG_ADDR);
2347 /* find the contex of the flow */
2348 error = sep_find_flow_context(flow_id >> 28, &flow_context_ptr);
2349 if (error)
2350 goto end_function_with_error;
2352 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2354 /* queue the work */
2355 queue_work(sep_dev->flow_wq, &flow_context_ptr->flow_wq);
2357 } else {
2358 /* check if this is reply interrupt from SEP */
2359 if (reg_val & (0x1 << 13)) {
2360 /* update the counter of reply messages */
2361 sep_dev->reply_ct++;
2363 /* wake up the waiting process */
2364 wake_up(&sep_event);
2365 } else {
2366 int_error = IRQ_NONE;
2367 goto end_function;
2370 end_function_with_error:
2371 /* clear the interrupt */
2372 sep_write_reg(sep_dev, HW_HOST_ICR_REG_ADDR, reg_val);
2373 end_function:
2374 return int_error;
2377 #endif
2383 static void sep_wait_busy(struct sep_device *dev)
2385 u32 reg;
2387 do {
2388 reg = sep_read_reg(sep_dev, HW_HOST_SEP_BUSY_REG_ADDR);
2389 } while (reg);
2393 PATCH for configuring the DMA to single burst instead of multi-burst
2395 static void sep_configure_dma_burst(void)
2397 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2399 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2401 /* request access to registers from SEP */
2402 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2404 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2406 sep_wait_busy(sep_dev);
2408 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2410 /* set the DMA burst register to single burst */
2411 sep_write_reg(sep_dev, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2413 /* release the sep busy */
2414 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2415 sep_wait_busy(sep_dev);
2417 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2422 function that is activaed on the succesfull probe of the SEP device
2424 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2426 int error = 0;
2428 edbg("Sep pci probe starting\n");
2430 /* enable the device */
2431 error = pci_enable_device(pdev);
2432 if (error) {
2433 edbg("error enabling pci device\n");
2434 goto end_function;
2437 /* set the pci dev pointer */
2438 sep_dev->pdev = pdev;
2440 /* get the io memory start address */
2441 sep_dev->io_bus = pci_resource_start(pdev, 0);
2442 if (!sep_dev->io_bus) {
2443 edbg("SEP Driver error pci resource start\n");
2444 goto end_function;
2447 /* get the io memory end address */
2448 sep_dev->io_end_bus = pci_resource_end(pdev, 0);
2449 if (!sep_dev->io_end_bus) {
2450 edbg("SEP Driver error pci resource end\n");
2451 goto end_function;
2454 sep_dev->io_memory_size = sep_dev->io_end_bus - sep_dev->io_bus + 1;
2456 edbg("SEP Driver:io_bus is %08lx\n", sep_dev->io_bus);
2458 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep_dev->io_end_bus);
2460 edbg("SEP Driver:io_memory_size is %08lx\n", sep_dev->io_memory_size);
2462 sep_dev->io_addr = ioremap_nocache(sep_dev->io_bus, sep_dev->io_memory_size);
2463 if (!sep_dev->io_addr) {
2464 edbg("SEP Driver error ioremap of io memory\n");
2465 goto end_function;
2468 edbg("SEP Driver:io_addr is %p\n", sep_dev->io_addr);
2470 sep_dev->reg_addr = (void __iomem *) sep_dev->io_addr;
2473 /* set up system base address and shared memory location */
2475 sep_dev->rar_addr = kmalloc(2 * SEP_RAR_IO_MEM_REGION_SIZE, GFP_KERNEL);
2477 if (!sep_dev->rar_addr) {
2478 edbg("SEP Driver:cant kmalloc rar\n");
2479 goto end_function;
2481 /* FIXME */
2482 sep_dev->rar_bus = __pa(sep_dev->rar_addr);
2484 edbg("SEP Driver:rar_physical is %08lx\n", sep_dev->rar_bus);
2485 edbg("SEP Driver:rar_virtual is %p\n", sep_dev->rar_addr);
2487 #if !SEP_DRIVER_POLLING_MODE
2489 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2491 /* clear ICR register */
2492 sep_write_reg(sep_dev, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2494 /* set the IMR register - open only GPR 2 */
2495 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2497 /* figure out our irq */
2498 /* FIXME: */
2499 error = pci_read_config_byte(pdev, PCI_INTERRUPT_LINE, (u8 *) & sep_dev->irq);
2501 edbg("SEP Driver: my irq is %d\n", sep_dev->irq);
2503 edbg("SEP Driver: about to call request_irq\n");
2504 /* get the interrupt line */
2505 error = request_irq(sep_dev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", &sep_dev->reg_addr);
2506 if (error)
2507 goto end_function;
2509 goto end_function;
2510 edbg("SEP Driver: about to write IMR REG_ADDR");
2512 /* set the IMR register - open only GPR 2 */
2513 sep_write_reg(sep_dev, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2515 #endif /* SEP_DRIVER_POLLING_MODE */
2516 end_function:
2517 return error;
2520 static struct pci_device_id sep_pci_id_tbl[] = {
2521 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2525 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2527 /* field for registering driver to PCI device */
2528 static struct pci_driver sep_pci_driver = {
2529 .name = "sep_sec_driver",
2530 .id_table = sep_pci_id_tbl,
2531 .probe = sep_probe
2534 /* major and minor device numbers */
2535 static dev_t sep_devno;
2537 /* the files operations structure of the driver */
2538 static struct file_operations sep_file_operations = {
2539 .owner = THIS_MODULE,
2540 .ioctl = sep_ioctl,
2541 .poll = sep_poll,
2542 .open = sep_open,
2543 .release = sep_release,
2544 .mmap = sep_mmap,
2548 /* cdev struct of the driver */
2549 static struct cdev sep_cdev;
2552 this function registers the driver to the file system
2554 static int sep_register_driver_to_fs(void)
2556 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2557 if (ret_val) {
2558 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2559 goto end_function;
2562 /* init cdev */
2563 cdev_init(&sep_cdev, &sep_file_operations);
2564 sep_cdev.owner = THIS_MODULE;
2566 /* register the driver with the kernel */
2567 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2569 if (ret_val) {
2570 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2571 goto end_function_unregister_devnum;
2574 goto end_function;
2576 end_function_unregister_devnum:
2578 /* unregister dev numbers */
2579 unregister_chrdev_region(sep_devno, 1);
2581 end_function:
2582 return ret_val;
2586 /*--------------------------------------------------------------
2587 init function
2588 ----------------------------------------------------------------*/
2589 static int __init sep_init(void)
2591 int ret_val = 0;
2592 int counter;
2593 int size; /* size of memory for allocation */
2595 dbg("SEP Driver:-------->Init start\n");
2596 edbg("sep->shared_area = %lx\n", (unsigned long) &sep_dev->shared_area);
2598 /* transaction counter that coordinates the transactions between SEP
2599 and HOST */
2600 sep_dev->send_ct = 0;
2602 /* counter for the messages from sep */
2603 sep_dev->reply_ct = 0;
2605 /* counter for the number of bytes allocated in the pool
2606 for the current transaction */
2607 sep_dev->data_pool_bytes_allocated = 0;
2609 /* FIXME: Probe can occur before we are ready to survive a probe */
2610 ret_val = pci_register_driver(&sep_pci_driver);
2611 if (ret_val) {
2612 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2613 goto end_function_unregister_from_fs;
2615 /* calculate the total size for allocation */
2616 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2617 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2619 /* allocate the shared area */
2620 if (sep_map_and_alloc_shared_area(size, &sep_dev->shared_area, &sep_dev->shared_area_bus)) {
2621 ret_val = -ENOMEM;
2622 /* allocation failed */
2623 goto end_function_unmap_io_memory;
2625 /* now set the memory regions */
2626 sep_dev->message_shared_area_addr = sep_dev->shared_area;
2628 edbg("SEP Driver: sep_dev->message_shared_area_addr is %08lx\n", sep_dev->message_shared_area_addr);
2630 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2631 /* send the new SHARED MESSAGE AREA to the SEP */
2632 sep_write_reg(sep_dev, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep_dev->shared_area_bus);
2634 /* poll for SEP response */
2635 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2636 while (retVal != 0xffffffff && retVal != sep_dev->shared_area_bus)
2637 retVal = sep_read_reg(sep_dev, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2639 /* check the return value (register) */
2640 if (retVal != sep_dev->shared_area_bus) {
2641 ret_val = -ENOMEM;
2642 goto end_function_deallocate_message_area;
2644 #endif
2645 /* init the flow contextes */
2646 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2647 sep_dev->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2649 sep_dev->flow_wq = create_singlethread_workqueue("sepflowwq");
2650 if (sep_dev->flow_wq == NULL) {
2651 ret_val = -ENOMEM;
2652 edbg("sep_driver:flow queue creation failed\n");
2653 goto end_function_deallocate_sep_shared_area;
2655 edbg("SEP Driver: create flow workqueue \n");
2657 /* register driver to fs */
2658 ret_val = sep_register_driver_to_fs();
2659 if (ret_val)
2660 goto end_function_deallocate_sep_shared_area;
2661 /* load the rom code */
2662 sep_load_rom_code();
2663 goto end_function;
2664 end_function_unregister_from_fs:
2665 /* unregister from fs */
2666 cdev_del(&sep_cdev);
2667 /* unregister dev numbers */
2668 unregister_chrdev_region(sep_devno, 1);
2669 end_function_deallocate_sep_shared_area:
2670 /* de-allocate shared area */
2671 sep_unmap_and_free_shared_area(size, sep_dev->shared_area, sep_dev->shared_area_bus);
2672 end_function_unmap_io_memory:
2673 iounmap((void *) sep_dev->reg_addr);
2674 /* release io memory region */
2675 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2676 end_function:
2677 dbg("SEP Driver:<-------- Init end\n");
2678 return ret_val;
2682 /*-------------------------------------------------------------
2683 exit function
2684 --------------------------------------------------------------*/
2685 static void __exit sep_exit(void)
2687 int size;
2689 dbg("SEP Driver:--------> Exit start\n");
2691 /* unregister from fs */
2692 cdev_del(&sep_cdev);
2693 /* unregister dev numbers */
2694 unregister_chrdev_region(sep_devno, 1);
2695 /* calculate the total size for de-allocation */
2696 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2697 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2698 /* free shared area */
2699 sep_unmap_and_free_shared_area(size, sep_dev->shared_area, sep_dev->shared_area_bus);
2700 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2701 iounmap((void *) sep_dev->reg_addr);
2702 edbg("SEP Driver: iounmap \n");
2703 /* release io memory region */
2704 release_mem_region(SEP_IO_MEM_REGION_START_ADDRESS, SEP_IO_MEM_REGION_SIZE);
2705 edbg("SEP Driver: release_mem_region \n");
2706 dbg("SEP Driver:<-------- Exit end\n");
2710 module_init(sep_init);
2711 module_exit(sep_exit);
2713 MODULE_LICENSE("GPL");