Staging: sep: Try and get kernel address and user address types right
[linux-2.6/linux-2.6-openrd.git] / drivers / staging / sep / sep_driver.c
blob73a43c03802b13938c5aee93ec075e8847ee1036
1 /*
3 * sep_main_mod.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * CONTACTS:
24 * Mark Allyn mark.a.allyn@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/pci.h>
42 #include <linux/firmware.h>
43 #include <asm/ioctl.h>
44 #include <linux/ioport.h>
45 #include <asm/io.h>
46 #include <linux/interrupt.h>
47 #include <linux/pagemap.h>
48 #include <asm/cacheflush.h>
49 #include "sep_driver_hw_defs.h"
50 #include "sep_driver_config.h"
51 #include "sep_driver_api.h"
52 #include "sep_dev.h"
54 #if SEP_DRIVER_ARM_DEBUG_MODE
56 #define CRYS_SEP_ROM_length 0x4000
57 #define CRYS_SEP_ROM_start_address 0x8000C000UL
58 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
59 #define SEP_ROM_BANK_register 0x80008420UL
60 #define SEP_ROM_BANK_register_offset 0x8420UL
61 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
64 * THESE 2 definitions are specific to the board - must be
65 * defined during integration
67 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
69 /* 2M size */
71 static void sep_load_rom_code(struct sep_device *sep)
73 /* Index variables */
74 unsigned long i, k, j;
75 u32 reg;
76 u32 error;
77 u32 warning;
79 /* Loading ROM from SEP_ROM_image.h file */
80 k = sizeof(CRYS_SEP_ROM);
82 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
84 edbg("SEP Driver: k is %lu\n", k);
85 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
86 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
88 for (i = 0; i < 4; i++) {
89 /* write bank */
90 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
92 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
93 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
95 k = k - 4;
97 if (k == 0) {
98 j = CRYS_SEP_ROM_length;
99 i = 4;
104 /* reset the SEP */
105 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
107 /* poll for SEP ROM boot finish */
109 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
110 while (!reg);
112 edbg("SEP Driver: ROM polling ended\n");
114 switch (reg) {
115 case 0x1:
116 /* fatal error - read erro status from GPRO */
117 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
118 edbg("SEP Driver: ROM polling case 1\n");
119 break;
120 case 0x4:
121 /* Cold boot ended successfully */
122 case 0x8:
123 /* Warmboot ended successfully */
124 case 0x10:
125 /* ColdWarm boot ended successfully */
126 error = 0;
127 case 0x2:
128 /* Boot First Phase ended */
129 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
130 case 0x20:
131 edbg("SEP Driver: ROM polling case %d\n", reg);
132 break;
137 #else
138 static void sep_load_rom_code(struct sep_device *sep) { }
139 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
143 /*----------------------------------------
144 DEFINES
145 -----------------------------------------*/
147 #define INT_MODULE_PARM(n, v) static int n = v; module_param(n, int, 0)
148 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
149 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151 /*--------------------------------------------
152 GLOBAL variables
153 --------------------------------------------*/
155 /* debug messages level */
156 static int sepDebug;
157 module_param(sepDebug, int , 0);
158 MODULE_PARM_DESC(sepDebug, "Flag to enable SEP debug messages");
160 /* Keep this a single static object for now to keep the conversion easy */
162 static struct sep_device sep_instance;
163 static struct sep_device *sep_dev = &sep_instance;
166 mutex for the access to the internals of the sep driver
168 static DEFINE_MUTEX(sep_mutex);
171 /* wait queue head (event) of the driver */
172 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
175 This functions copies the cache and resident from their source location into
176 destination memory, which is external to Linux VM and is given as
177 physical address
179 static int sep_copy_cache_resident_to_area(struct sep_device *sep,
180 unsigned long src_cache_addr,
181 unsigned long cache_size_in_bytes,
182 unsigned long src_resident_addr,
183 unsigned long resident_size_in_bytes,
184 unsigned long *dst_new_cache_addr_ptr,
185 unsigned long *dst_new_resident_addr_ptr)
187 void *resident_addr;
188 void *cache_addr;
189 const struct firmware *fw;
191 char *cache_name = "cache.image.bin";
192 char *res_name = "resident.image.bin";
194 /* error */
195 int error;
197 /*--------------------------------
198 CODE
199 -------------------------------------*/
200 error = 0;
202 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
203 edbg("SEP Driver:rar_physical is %08llx\n", (unsigned long long)sep->rar_bus);
205 sep->rar_region_addr = (unsigned long) sep->rar_addr;
207 sep->cache_bus = sep->rar_bus;
208 sep->cache_addr = sep->rar_addr;
210 /* load cache */
211 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
212 if (error) {
213 edbg("SEP Driver:cant request cache fw\n");
214 goto end_function;
217 edbg("SEP Driver:cache data loc is %p\n", (void *) fw->data);
218 edbg("SEP Driver:cache data size is %08Zx\n", fw->size);
220 memcpy(sep->cache_addr, (void *) fw->data, fw->size);
222 sep->cache_size = fw->size;
224 cache_addr = sep->cache_addr;
226 release_firmware(fw);
228 sep->resident_bus = sep->cache_bus + sep->cache_size;
229 sep->resident_addr = sep->cache_addr + sep->cache_size;
231 /* load resident */
232 error = request_firmware(&fw, res_name, &sep->pdev->dev);
233 if (error) {
234 edbg("SEP Driver:cant request res fw\n");
235 goto end_function;
238 edbg("SEP Driver:res data loc is %p\n", (void *) fw->data);
239 edbg("SEP Driver:res data size is %08Zx\n", fw->size);
241 memcpy((void *) sep->resident_addr, (void *) fw->data, fw->size);
243 sep->resident_size = fw->size;
245 release_firmware(fw);
247 resident_addr = sep->resident_addr;
249 edbg("SEP Driver:resident_addr (physical )is %08llx\n", (unsigned long long)sep->resident_bus);
250 edbg("SEP Driver:cache_addr (physical) is %08llx\n", (unsigned long long)sep->cache_bus);
252 edbg("SEP Driver:resident_addr (logical )is %p\n", resident_addr);
253 edbg("SEP Driver:cache_addr (logical) is %08llx\n", (unsigned long long)cache_addr);
255 edbg("SEP Driver:resident_size is %08lx\n", sep->resident_size);
256 edbg("SEP Driver:cache_size is %08llx\n", (unsigned long long)sep->cache_size);
260 /* physical addresses */
261 *dst_new_cache_addr_ptr = sep->cache_bus;
262 *dst_new_resident_addr_ptr = sep->resident_bus;
263 end_function:
264 return error;
268 This functions maps and allocates the
269 shared area on the external RAM (device)
270 The input is shared_area_size - the size of the memory to
271 allocate. The outputs
272 are kernel_shared_area_addr_ptr - the kerenl
273 address of the mapped and allocated
274 shared area, and phys_shared_area_addr_ptr
275 - the physical address of the shared area
277 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
278 unsigned long shared_area_size)
280 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
281 sep->shared_addr = kmalloc(shared_area_size, GFP_KERNEL);
282 if (!sep->shared_addr) {
283 edbg("sep_driver:shared memory kmalloc failed\n");
284 return -1;
286 /* FIXME */
287 sep->shared_bus = __pa(sep->shared_addr);
288 /* shared_bus = 0xda00000; */
289 sep->shared_area = sep->shared_addr;
290 /* set the physical address of the shared area */
291 sep->shared_area_bus = sep->shared_bus;
292 edbg("SEP Driver:shared_addr is %p\n", sep->shared_addr);
293 edbg("SEP Driver:shared_region_size is %08lx\n", shared_area_size);
294 edbg("SEP Driver:shared_physical_addr is %08llx\n", (unsigned long long)sep->shared_bus);
296 return 0;
300 This functions unmaps and deallocates the shared area
301 on the external RAM (device)
302 The input is shared_area_size - the size of the memory to deallocate,kernel_
303 shared_area_addr_ptr - the kernel address of the mapped and allocated
304 shared area,phys_shared_area_addr_ptr - the physical address of
305 the shared area
307 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
309 kfree(sep->shared_area);
313 This functions returns the physical address inside shared area according
314 to the virtual address. It can be either on the externa RAM device
315 (ioremapped), or on the system RAM
316 This implementation is for the external RAM
318 static dma_addr_t sep_shared_area_virt_to_phys(struct sep_device *sep,
319 void *virt_address)
321 edbg("SEP Driver:sh virt to phys v %p\n", virt_address);
322 edbg("SEP Driver:sh virt to phys p %08llx\n", (unsigned long long) sep->shared_bus + (virt_address - sep->shared_addr));
324 return sep->shared_bus + (virt_address - sep->shared_addr);
328 This functions returns the virtual address inside shared area
329 according to the physical address. It can be either on the
330 externa RAM device (ioremapped), or on the system RAM This implementation
331 is for the external RAM
333 static void *sep_shared_area_phys_to_virt(struct sep_device *sep,
334 dma_addr_t phys_address)
336 return sep->shared_addr + (phys_address - sep->shared_bus);
340 /*----------------------------------------------------------------------
341 open function of the character driver - must only lock the mutex
342 must also release the memory data pool allocations
343 ------------------------------------------------------------------------*/
344 static int sep_open(struct inode *inode, struct file *filp)
346 int error = 0;
348 dbg("SEP Driver:--------> open start\n");
350 /* check the blocking mode */
351 if (filp->f_flags & O_NDELAY)
352 error = mutex_trylock(&sep_mutex);
353 else
354 /* lock mutex */
355 mutex_lock(&sep_mutex);
357 /* check the error */
358 if (error) {
359 edbg("SEP Driver: down_interruptible failed\n");
360 goto end_function;
362 /* Bind to the device, we only have one which makes it easy */
363 filp->private_data = sep_dev;
364 if (sep_dev == NULL)
365 return -ENODEV;
367 /* release data pool allocations */
368 sep_dev->data_pool_bytes_allocated = 0;
371 end_function:
372 dbg("SEP Driver:<-------- open end\n");
373 return error;
379 /*------------------------------------------------------------
380 release function
381 -------------------------------------------------------------*/
382 static int sep_release(struct inode *inode_ptr, struct file *filp)
384 struct sep_driver *sep = filp->private_data;
385 dbg("----------->SEP Driver: sep_release start\n");
387 #if 0 /*!SEP_DRIVER_POLLING_MODE */
388 /* close IMR */
389 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
390 /* release IRQ line */
391 free_irq(SEP_DIRVER_IRQ_NUM, sep);
393 #endif
394 /* unlock the sep mutex */
395 mutex_unlock(&sep_mutex);
396 dbg("SEP Driver:<-------- sep_release end\n");
397 return 0;
403 /*---------------------------------------------------------------
404 map function - this functions maps the message shared area
405 -----------------------------------------------------------------*/
406 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
408 dma_addr_t phys_addr;
409 struct sep_device *sep = filp->private_data;
411 dbg("-------->SEP Driver: mmap start\n");
413 /* check that the size of the mapped range is as the size of the message
414 shared area */
415 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
416 edbg("SEP Driver mmap requested size is more than allowed\n");
417 printk(KERN_WARNING "SEP Driver mmap requested size is more \
418 than allowed\n");
419 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
420 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
421 return -EAGAIN;
424 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
426 /* get physical address */
427 phys_addr = sep->shared_area_bus;
429 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)phys_addr);
431 if (remap_pfn_range(vma, vma->vm_start, phys_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
432 edbg("SEP Driver remap_page_range failed\n");
433 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
434 return -EAGAIN;
437 dbg("SEP Driver:<-------- mmap end\n");
439 return 0;
443 /*-----------------------------------------------
444 poll function
445 *----------------------------------------------*/
446 static unsigned int sep_poll(struct file *filp, poll_table * wait)
448 unsigned long count;
449 unsigned int mask = 0;
450 unsigned long retVal = 0; /* flow id */
451 struct sep_device *sep = filp->private_data;
453 dbg("---------->SEP Driver poll: start\n");
456 #if SEP_DRIVER_POLLING_MODE
458 while (sep->send_ct != (retVal & 0x7FFFFFFF)) {
459 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
461 for (count = 0; count < 10 * 4; count += 4)
462 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
465 sep->reply_ct++;
466 #else
467 /* add the event to the polling wait table */
468 poll_wait(filp, &sep_event, wait);
470 #endif
472 edbg("sep->send_ct is %lu\n", sep->send_ct);
473 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
475 /* check if the data is ready */
476 if (sep->send_ct == sep->reply_ct) {
477 for (count = 0; count < 12 * 4; count += 4)
478 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
480 for (count = 0; count < 10 * 4; count += 4)
481 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + 0x1800 + count)));
483 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
484 edbg("retVal is %lu\n", retVal);
485 /* check if the this is sep reply or request */
486 if (retVal >> 31) {
487 edbg("SEP Driver: sep request in\n");
488 /* request */
489 mask |= POLLOUT | POLLWRNORM;
490 } else {
491 edbg("SEP Driver: sep reply in\n");
492 mask |= POLLIN | POLLRDNORM;
495 dbg("SEP Driver:<-------- poll exit\n");
496 return mask;
500 calculates time and sets it at the predefined address
502 static int sep_set_time(struct sep_device *sep, unsigned long *address_ptr, unsigned long *time_in_sec_ptr)
504 struct timeval time;
505 /* address of time in the kernel */
506 u32 *time_addr;
509 dbg("SEP Driver:--------> sep_set_time start\n");
511 do_gettimeofday(&time);
513 /* set value in the SYSTEM MEMORY offset */
514 time_addr = sep->message_shared_area_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
516 time_addr[0] = SEP_TIME_VAL_TOKEN;
517 time_addr[1] = time.tv_sec;
519 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
520 edbg("SEP Driver:time_addr is %p\n", time_addr);
521 edbg("SEP Driver:sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
523 /* set the output parameters if needed */
524 if (address_ptr)
525 *address_ptr = sep_shared_area_virt_to_phys(sep, time_addr);
527 if (time_in_sec_ptr)
528 *time_in_sec_ptr = time.tv_sec;
530 dbg("SEP Driver:<-------- sep_set_time end\n");
532 return 0;
536 This function raises interrupt to SEP that signals that is has a new
537 command from HOST
539 static void sep_send_command_handler(struct sep_device *sep)
541 unsigned long count;
543 dbg("SEP Driver:--------> sep_send_command_handler start\n");
544 sep_set_time(sep, 0, 0);
546 /* flash cache */
547 flush_cache_all();
549 for (count = 0; count < 12 * 4; count += 4)
550 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
552 /* update counter */
553 sep->send_ct++;
554 /* send interrupt to SEP */
555 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
556 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
557 return;
561 This function raises interrupt to SEPm that signals that is has a
562 new command from HOST
564 static void sep_send_reply_command_handler(struct sep_device *sep)
566 unsigned long count;
568 dbg("SEP Driver:--------> sep_send_reply_command_handler start\n");
570 /* flash cache */
571 flush_cache_all();
572 for (count = 0; count < 12 * 4; count += 4)
573 edbg("Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_area + count)));
574 /* update counter */
575 sep->send_ct++;
576 /* send the interrupt to SEP */
577 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
578 /* update both counters */
579 sep->send_ct++;
580 sep->reply_ct++;
581 dbg("SEP Driver:<-------- sep_send_reply_command_handler end\n");
585 This function handles the allocate data pool memory request
586 This function returns calculates the physical address of the
587 allocated memory, and the offset of this area from the mapped address.
588 Therefore, the FVOs in user space can calculate the exact virtual
589 address of this allocated memory
591 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
592 unsigned long arg)
594 int error;
595 struct sep_driver_alloc_t command_args;
597 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
599 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
600 if (error)
601 goto end_function;
603 /* allocate memory */
604 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
605 /* FIXME: ENOMEM ? */
606 error = -ENOTTY;
607 goto end_function;
610 /* set the virtual and physical address */
611 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
612 command_args.phys_address = sep->shared_area_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
614 /* write the memory back to the user space */
615 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
616 if (error)
617 goto end_function;
619 /* set the allocation */
620 sep->data_pool_bytes_allocated += command_args.num_bytes;
622 end_function:
623 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
624 return error;
628 This function handles write into allocated data pool command
630 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
632 int error;
633 void *virt_address;
634 unsigned long va;
635 unsigned long app_in_address;
636 unsigned long num_bytes;
637 void *data_pool_area_addr;
639 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
641 /* get the application address */
642 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
643 if (error)
644 goto end_function;
646 /* get the virtual kernel address address */
647 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
648 if (error)
649 goto end_function;
650 virt_address = (void *)va;
652 /* get the number of bytes */
653 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
654 if (error)
655 goto end_function;
657 /* calculate the start of the data pool */
658 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
661 /* check that the range of the virtual kernel address is correct */
662 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
663 /* FIXME: EINVAL ? */
664 error = -ENOTTY;
665 goto end_function;
667 /* copy the application data */
668 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
669 end_function:
670 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
671 return error;
675 this function handles the read from data pool command
677 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
679 int error;
680 /* virtual address of dest application buffer */
681 unsigned long app_out_address;
682 /* virtual address of the data pool */
683 unsigned long va;
684 void *virt_address;
685 unsigned long num_bytes;
686 void *data_pool_area_addr;
688 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
690 /* get the application address */
691 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
692 if (error)
693 goto end_function;
695 /* get the virtual kernel address address */
696 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
697 if (error)
698 goto end_function;
699 virt_address = (void *)va;
701 /* get the number of bytes */
702 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
703 if (error)
704 goto end_function;
706 /* calculate the start of the data pool */
707 data_pool_area_addr = sep->shared_area + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
709 /* FIXME: These are incomplete all over the driver: what about + len
710 and when doing that also overflows */
711 /* check that the range of the virtual kernel address is correct */
712 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
713 error = -ENOTTY;
714 goto end_function;
717 /* copy the application data */
718 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
719 end_function:
720 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
721 return error;
725 This function releases all the application virtual buffer physical pages,
726 that were previously locked
728 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
730 unsigned long count;
732 if (dirtyFlag) {
733 for (count = 0; count < num_pages; count++) {
734 /* the out array was written, therefore the data was changed */
735 if (!PageReserved(page_array_ptr[count]))
736 SetPageDirty(page_array_ptr[count]);
737 page_cache_release(page_array_ptr[count]);
739 } else {
740 /* free in pages - the data was only read, therefore no update was done
741 on those pages */
742 for (count = 0; count < num_pages; count++)
743 page_cache_release(page_array_ptr[count]);
746 if (page_array_ptr)
747 /* free the array */
748 kfree(page_array_ptr);
750 return 0;
754 This function locks all the physical pages of the kernel virtual buffer
755 and construct a basic lli array, where each entry holds the physical
756 page address and the size that application data holds in this physical pages
758 static int sep_lock_kernel_pages(struct sep_device *sep,
759 unsigned long kernel_virt_addr,
760 unsigned long data_size,
761 unsigned long *num_pages_ptr,
762 struct sep_lli_entry_t **lli_array_ptr,
763 struct page ***page_array_ptr)
765 int error = 0;
766 /* the the page of the end address of the user space buffer */
767 unsigned long end_page;
768 /* the page of the start address of the user space buffer */
769 unsigned long start_page;
770 /* the range in pages */
771 unsigned long num_pages;
772 struct sep_lli_entry_t *lli_array;
773 /* next kernel address to map */
774 unsigned long next_kernel_address;
775 unsigned long count;
777 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
779 /* set start and end pages and num pages */
780 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
781 start_page = kernel_virt_addr >> PAGE_SHIFT;
782 num_pages = end_page - start_page + 1;
784 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
785 edbg("SEP Driver: data_size is %lu\n", data_size);
786 edbg("SEP Driver: start_page is %lx\n", start_page);
787 edbg("SEP Driver: end_page is %lx\n", end_page);
788 edbg("SEP Driver: num_pages is %lu\n", num_pages);
790 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
791 if (!lli_array) {
792 edbg("SEP Driver: kmalloc for lli_array failed\n");
793 error = -ENOMEM;
794 goto end_function;
797 /* set the start address of the first page - app data may start not at
798 the beginning of the page */
799 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
801 /* check that not all the data is in the first page only */
802 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
803 lli_array[0].block_size = data_size;
804 else
805 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
807 /* debug print */
808 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
810 /* advance the address to the start of the next page */
811 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
813 /* go from the second page to the prev before last */
814 for (count = 1; count < (num_pages - 1); count++) {
815 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
816 lli_array[count].block_size = PAGE_SIZE;
818 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
819 next_kernel_address += PAGE_SIZE;
822 /* if more then 1 pages locked - then update for the last page size needed */
823 if (num_pages > 1) {
824 /* update the address of the last page */
825 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
827 /* set the size of the last page */
828 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
830 if (lli_array[count].block_size == 0) {
831 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
832 dbg("data_size is %lu\n", data_size);
833 while (1);
836 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
838 /* set output params */
839 *lli_array_ptr = lli_array;
840 *num_pages_ptr = num_pages;
841 *page_array_ptr = 0;
842 end_function:
843 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
844 return 0;
848 This function locks all the physical pages of the application virtual buffer
849 and construct a basic lli array, where each entry holds the physical page
850 address and the size that application data holds in this physical pages
852 static int sep_lock_user_pages(struct sep_device *sep,
853 unsigned long app_virt_addr,
854 unsigned long data_size,
855 unsigned long *num_pages_ptr,
856 struct sep_lli_entry_t **lli_array_ptr,
857 struct page ***page_array_ptr)
859 int error = 0;
860 /* the the page of the end address of the user space buffer */
861 unsigned long end_page;
862 /* the page of the start address of the user space buffer */
863 unsigned long start_page;
864 /* the range in pages */
865 unsigned long num_pages;
866 struct page **page_array;
867 struct sep_lli_entry_t *lli_array;
868 unsigned long count;
869 int result;
871 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
873 /* set start and end pages and num pages */
874 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
875 start_page = app_virt_addr >> PAGE_SHIFT;
876 num_pages = end_page - start_page + 1;
878 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
879 edbg("SEP Driver: data_size is %lu\n", data_size);
880 edbg("SEP Driver: start_page is %lu\n", start_page);
881 edbg("SEP Driver: end_page is %lu\n", end_page);
882 edbg("SEP Driver: num_pages is %lu\n", num_pages);
884 /* allocate array of pages structure pointers */
885 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
886 if (!page_array) {
887 edbg("SEP Driver: kmalloc for page_array failed\n");
889 error = -ENOMEM;
890 goto end_function;
893 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
894 if (!lli_array) {
895 edbg("SEP Driver: kmalloc for lli_array failed\n");
897 error = -ENOMEM;
898 goto end_function_with_error1;
901 /* convert the application virtual address into a set of physical */
902 down_read(&current->mm->mmap_sem);
903 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
904 up_read(&current->mm->mmap_sem);
906 /* check the number of pages locked - if not all then exit with error */
907 if (result != num_pages) {
908 dbg("SEP Driver: not all pages locked by get_user_pages\n");
910 error = -ENOMEM;
911 goto end_function_with_error2;
914 /* flush the cache */
915 for (count = 0; count < num_pages; count++)
916 flush_dcache_page(page_array[count]);
918 /* set the start address of the first page - app data may start not at
919 the beginning of the page */
920 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
922 /* check that not all the data is in the first page only */
923 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
924 lli_array[0].block_size = data_size;
925 else
926 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
928 /* debug print */
929 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
931 /* go from the second page to the prev before last */
932 for (count = 1; count < (num_pages - 1); count++) {
933 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
934 lli_array[count].block_size = PAGE_SIZE;
936 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
939 /* if more then 1 pages locked - then update for the last page size needed */
940 if (num_pages > 1) {
941 /* update the address of the last page */
942 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
944 /* set the size of the last page */
945 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
947 if (lli_array[count].block_size == 0) {
948 dbg("app_virt_addr is %08lx\n", app_virt_addr);
949 dbg("data_size is %lu\n", data_size);
950 while (1);
952 edbg("lli_array[%lu].physical_address is %08lx, \
953 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
956 /* set output params */
957 *lli_array_ptr = lli_array;
958 *num_pages_ptr = num_pages;
959 *page_array_ptr = page_array;
960 goto end_function;
962 end_function_with_error2:
963 /* release the cache */
964 for (count = 0; count < num_pages; count++)
965 page_cache_release(page_array[count]);
966 kfree(lli_array);
967 end_function_with_error1:
968 kfree(page_array);
969 end_function:
970 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
971 return 0;
976 this function calculates the size of data that can be inserted into the lli
977 table from this array the condition is that either the table is full
978 (all etnries are entered), or there are no more entries in the lli array
980 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
982 unsigned long table_data_size = 0;
983 unsigned long counter;
985 /* calculate the data in the out lli table if till we fill the whole
986 table or till the data has ended */
987 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
988 table_data_size += lli_in_array_ptr[counter].block_size;
989 return table_data_size;
993 this functions builds ont lli table from the lli_array according to
994 the given size of data
996 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
998 unsigned long curr_table_data_size;
999 /* counter of lli array entry */
1000 unsigned long array_counter;
1002 dbg("SEP Driver:--------> sep_build_lli_table start\n");
1004 /* init currrent table data size and lli array entry counter */
1005 curr_table_data_size = 0;
1006 array_counter = 0;
1007 *num_table_entries_ptr = 1;
1009 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1011 /* fill the table till table size reaches the needed amount */
1012 while (curr_table_data_size < table_data_size) {
1013 /* update the number of entries in table */
1014 (*num_table_entries_ptr)++;
1016 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1017 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1018 curr_table_data_size += lli_table_ptr->block_size;
1020 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1021 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1022 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1024 /* check for overflow of the table data */
1025 if (curr_table_data_size > table_data_size) {
1026 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1028 /* update the size of block in the table */
1029 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1031 /* update the physical address in the lli array */
1032 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1034 /* update the block size left in the lli array */
1035 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1036 } else
1037 /* advance to the next entry in the lli_array */
1038 array_counter++;
1040 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1041 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1043 /* move to the next entry in table */
1044 lli_table_ptr++;
1047 /* set the info entry to default */
1048 lli_table_ptr->physical_address = 0xffffffff;
1049 lli_table_ptr->block_size = 0;
1051 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1052 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1053 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1055 /* set the output parameter */
1056 *num_processed_entries_ptr += array_counter;
1058 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1059 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1060 return;
1064 this function goes over the list of the print created tables and
1065 prints all the data
1067 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1069 unsigned long table_count;
1070 unsigned long entries_count;
1072 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1074 table_count = 1;
1075 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1076 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1077 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1079 /* print entries of the table (without info entry) */
1080 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1081 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1082 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1085 /* point to the info entry */
1086 lli_table_ptr--;
1088 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1089 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1092 table_data_size = lli_table_ptr->block_size & 0xffffff;
1093 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1094 lli_table_ptr = (struct sep_lli_entry_t *)
1095 (lli_table_ptr->physical_address);
1097 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1099 if ((unsigned long) lli_table_ptr != 0xffffffff)
1100 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_area_phys_to_virt(sep, (unsigned long) lli_table_ptr);
1102 table_count++;
1104 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1109 This function prepares only input DMA table for synhronic symmetric
1110 operations (HASH)
1112 static int sep_prepare_input_dma_table(struct sep_device *sep,
1113 unsigned long app_virt_addr,
1114 unsigned long data_size,
1115 unsigned long block_size,
1116 unsigned long *lli_table_ptr,
1117 unsigned long *num_entries_ptr,
1118 unsigned long *table_data_size_ptr,
1119 bool isKernelVirtualAddress)
1121 /* pointer to the info entry of the table - the last entry */
1122 struct sep_lli_entry_t *info_entry_ptr;
1123 /* array of pointers ot page */
1124 struct sep_lli_entry_t *lli_array_ptr;
1125 /* points to the first entry to be processed in the lli_in_array */
1126 unsigned long current_entry;
1127 /* num entries in the virtual buffer */
1128 unsigned long sep_lli_entries;
1129 /* lli table pointer */
1130 struct sep_lli_entry_t *in_lli_table_ptr;
1131 /* the total data in one table */
1132 unsigned long table_data_size;
1133 /* number of entries in lli table */
1134 unsigned long num_entries_in_table;
1135 /* next table address */
1136 void *lli_table_alloc_addr;
1137 unsigned long result;
1139 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1141 edbg("SEP Driver:data_size is %lu\n", data_size);
1142 edbg("SEP Driver:block_size is %lu\n", block_size);
1144 /* initialize the pages pointers */
1145 sep->in_page_array = 0;
1146 sep->in_num_pages = 0;
1148 if (data_size == 0) {
1149 /* special case - created 2 entries table with zero data */
1150 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1151 /* FIXME: Should the entry below not be for _bus */
1152 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1153 in_lli_table_ptr->block_size = 0;
1155 in_lli_table_ptr++;
1156 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1157 in_lli_table_ptr->block_size = 0;
1159 *lli_table_ptr = sep->shared_area_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1160 *num_entries_ptr = 2;
1161 *table_data_size_ptr = 0;
1163 goto end_function;
1166 /* check if the pages are in Kernel Virtual Address layout */
1167 if (isKernelVirtualAddress == true)
1168 /* lock the pages of the kernel buffer and translate them to pages */
1169 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1170 else
1171 /* lock the pages of the user buffer and translate them to pages */
1172 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1174 if (result)
1175 return result;
1177 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1179 current_entry = 0;
1180 info_entry_ptr = 0;
1181 sep_lli_entries = sep->in_num_pages;
1183 /* initiate to point after the message area */
1184 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1186 /* loop till all the entries in in array are not processed */
1187 while (current_entry < sep_lli_entries) {
1188 /* set the new input and output tables */
1189 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1191 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1193 /* calculate the maximum size of data for input table */
1194 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1196 /* now calculate the table size so that it will be module block size */
1197 table_data_size = (table_data_size / block_size) * block_size;
1199 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1201 /* construct input lli table */
1202 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1204 if (info_entry_ptr == 0) {
1205 /* set the output parameters to physical addresses */
1206 *lli_table_ptr = sep_shared_area_virt_to_phys(sep, in_lli_table_ptr);
1207 *num_entries_ptr = num_entries_in_table;
1208 *table_data_size_ptr = table_data_size;
1210 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1211 } else {
1212 /* update the info entry of the previous in table */
1213 info_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, in_lli_table_ptr);
1214 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1217 /* save the pointer to the info entry of the current tables */
1218 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1221 /* print input tables */
1222 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1223 sep_shared_area_phys_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1225 /* the array of the pages */
1226 kfree(lli_array_ptr);
1227 end_function:
1228 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1229 return 0;
1234 This function creates the input and output dma tables for
1235 symmetric operations (AES/DES) according to the block size from LLI arays
1237 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1238 struct sep_lli_entry_t *lli_in_array,
1239 unsigned long sep_in_lli_entries,
1240 struct sep_lli_entry_t *lli_out_array,
1241 unsigned long sep_out_lli_entries,
1242 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1244 /* points to the area where next lli table can be allocated: keep void *
1245 as there is pointer scaling to fix otherwise */
1246 void *lli_table_alloc_addr;
1247 /* input lli table */
1248 struct sep_lli_entry_t *in_lli_table_ptr;
1249 /* output lli table */
1250 struct sep_lli_entry_t *out_lli_table_ptr;
1251 /* pointer to the info entry of the table - the last entry */
1252 struct sep_lli_entry_t *info_in_entry_ptr;
1253 /* pointer to the info entry of the table - the last entry */
1254 struct sep_lli_entry_t *info_out_entry_ptr;
1255 /* points to the first entry to be processed in the lli_in_array */
1256 unsigned long current_in_entry;
1257 /* points to the first entry to be processed in the lli_out_array */
1258 unsigned long current_out_entry;
1259 /* max size of the input table */
1260 unsigned long in_table_data_size;
1261 /* max size of the output table */
1262 unsigned long out_table_data_size;
1263 /* flag te signifies if this is the first tables build from the arrays */
1264 unsigned long first_table_flag;
1265 /* the data size that should be in table */
1266 unsigned long table_data_size;
1267 /* number of etnries in the input table */
1268 unsigned long num_entries_in_table;
1269 /* number of etnries in the output table */
1270 unsigned long num_entries_out_table;
1272 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1274 /* initiate to pint after the message area */
1275 lli_table_alloc_addr = sep->shared_area + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1277 current_in_entry = 0;
1278 current_out_entry = 0;
1279 first_table_flag = 1;
1280 info_in_entry_ptr = 0;
1281 info_out_entry_ptr = 0;
1283 /* loop till all the entries in in array are not processed */
1284 while (current_in_entry < sep_in_lli_entries) {
1285 /* set the new input and output tables */
1286 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1288 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1290 /* set the first output tables */
1291 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1293 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1295 /* calculate the maximum size of data for input table */
1296 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1298 /* calculate the maximum size of data for output table */
1299 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1301 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1302 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1304 /* check where the data is smallest */
1305 table_data_size = in_table_data_size;
1306 if (table_data_size > out_table_data_size)
1307 table_data_size = out_table_data_size;
1309 /* now calculate the table size so that it will be module block size */
1310 table_data_size = (table_data_size / block_size) * block_size;
1312 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1314 /* construct input lli table */
1315 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1317 /* construct output lli table */
1318 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1320 /* if info entry is null - this is the first table built */
1321 if (info_in_entry_ptr == 0) {
1322 /* set the output parameters to physical addresses */
1323 *lli_table_in_ptr = sep_shared_area_virt_to_phys(sep, in_lli_table_ptr);
1324 *in_num_entries_ptr = num_entries_in_table;
1325 *lli_table_out_ptr = sep_shared_area_virt_to_phys(sep, out_lli_table_ptr);
1326 *out_num_entries_ptr = num_entries_out_table;
1327 *table_data_size_ptr = table_data_size;
1329 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1330 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1331 } else {
1332 /* update the info entry of the previous in table */
1333 info_in_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, in_lli_table_ptr);
1334 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1336 /* update the info entry of the previous in table */
1337 info_out_entry_ptr->physical_address = sep_shared_area_virt_to_phys(sep, out_lli_table_ptr);
1338 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1341 /* save the pointer to the info entry of the current tables */
1342 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1343 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1345 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1346 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1347 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1350 /* print input tables */
1351 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1352 sep_shared_area_phys_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1353 /* print output tables */
1354 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1355 sep_shared_area_phys_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1356 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1357 return 0;
1362 This function builds input and output DMA tables for synhronic
1363 symmetric operations (AES, DES). It also checks that each table
1364 is of the modular block size
1366 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1367 unsigned long app_virt_in_addr,
1368 unsigned long app_virt_out_addr,
1369 unsigned long data_size,
1370 unsigned long block_size,
1371 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1373 /* array of pointers of page */
1374 struct sep_lli_entry_t *lli_in_array;
1375 /* array of pointers of page */
1376 struct sep_lli_entry_t *lli_out_array;
1377 int result = 0;
1379 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1381 /* initialize the pages pointers */
1382 sep->in_page_array = 0;
1383 sep->out_page_array = 0;
1385 /* check if the pages are in Kernel Virtual Address layout */
1386 if (isKernelVirtualAddress == true) {
1387 /* lock the pages of the kernel buffer and translate them to pages */
1388 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1389 if (result) {
1390 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1391 goto end_function;
1393 } else {
1394 /* lock the pages of the user buffer and translate them to pages */
1395 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1396 if (result) {
1397 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1398 goto end_function;
1402 if (isKernelVirtualAddress == true) {
1403 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1404 if (result) {
1405 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1406 goto end_function_with_error1;
1408 } else {
1409 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1410 if (result) {
1411 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1412 goto end_function_with_error1;
1415 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1416 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1417 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1420 /* call the fucntion that creates table from the lli arrays */
1421 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1422 if (result) {
1423 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1424 goto end_function_with_error2;
1427 /* fall through - free the lli entry arrays */
1428 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1429 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1430 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1431 end_function_with_error2:
1432 kfree(lli_out_array);
1433 end_function_with_error1:
1434 kfree(lli_in_array);
1435 end_function:
1436 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1437 return result;
1442 this function handles tha request for creation of the DMA table
1443 for the synchronic symmetric operations (AES,DES)
1445 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1446 unsigned long arg)
1448 int error;
1449 /* command arguments */
1450 struct sep_driver_build_sync_table_t command_args;
1452 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1454 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1455 if (error)
1456 goto end_function;
1458 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1459 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1460 edbg("data_size is %lu\n", command_args.data_in_size);
1461 edbg("block_size is %lu\n", command_args.block_size);
1463 /* check if we need to build only input table or input/output */
1464 if (command_args.app_out_address)
1465 /* prepare input and output tables */
1466 error = sep_prepare_input_output_dma_table(sep,
1467 command_args.app_in_address,
1468 command_args.app_out_address,
1469 command_args.data_in_size,
1470 command_args.block_size,
1471 &command_args.in_table_address,
1472 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1473 else
1474 /* prepare input tables */
1475 error = sep_prepare_input_dma_table(sep,
1476 command_args.app_in_address,
1477 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1479 if (error)
1480 goto end_function;
1481 /* copy to user */
1482 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t));
1483 /* FIXME: wrong error returned ! */
1484 end_function:
1485 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1486 return error;
1490 this function handles the request for freeing dma table for synhronic actions
1492 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1494 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1496 /* free input pages array */
1497 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1499 /* free output pages array if needed */
1500 if (sep->out_page_array)
1501 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1503 /* reset all the values */
1504 sep->in_page_array = 0;
1505 sep->out_page_array = 0;
1506 sep->in_num_pages = 0;
1507 sep->out_num_pages = 0;
1508 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1509 return 0;
1513 this function find a space for the new flow dma table
1515 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1516 unsigned long **table_address_ptr)
1518 int error = 0;
1519 /* pointer to the id field of the flow dma table */
1520 unsigned long *start_table_ptr;
1521 /* Do not make start_addr unsigned long * unless fixing the offset
1522 computations ! */
1523 void *flow_dma_area_start_addr;
1524 unsigned long *flow_dma_area_end_addr;
1525 /* maximum table size in words */
1526 unsigned long table_size_in_words;
1528 /* find the start address of the flow DMA table area */
1529 flow_dma_area_start_addr = sep->shared_area + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1531 /* set end address of the flow table area */
1532 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1534 /* set table size in words */
1535 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1537 /* set the pointer to the start address of DMA area */
1538 start_table_ptr = flow_dma_area_start_addr;
1540 /* find the space for the next table */
1541 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1542 start_table_ptr += table_size_in_words;
1544 /* check if we reached the end of floa tables area */
1545 if (start_table_ptr >= flow_dma_area_end_addr)
1546 error = -1;
1547 else
1548 *table_address_ptr = start_table_ptr;
1550 return error;
1554 This function creates one DMA table for flow and returns its data,
1555 and pointer to its info entry
1557 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1558 unsigned long virt_buff_addr,
1559 unsigned long virt_buff_size,
1560 struct sep_lli_entry_t *table_data,
1561 struct sep_lli_entry_t **info_entry_ptr,
1562 struct sep_flow_context_t *flow_data_ptr,
1563 bool isKernelVirtualAddress)
1565 int error;
1566 /* the range in pages */
1567 unsigned long lli_array_size;
1568 struct sep_lli_entry_t *lli_array;
1569 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1570 unsigned long *start_dma_table_ptr;
1571 /* total table data counter */
1572 unsigned long dma_table_data_count;
1573 /* pointer that will keep the pointer to the pages of the virtual buffer */
1574 struct page **page_array_ptr;
1575 unsigned long entry_count;
1577 /* find the space for the new table */
1578 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1579 if (error)
1580 goto end_function;
1582 /* check if the pages are in Kernel Virtual Address layout */
1583 if (isKernelVirtualAddress == true)
1584 /* lock kernel buffer in the memory */
1585 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1586 else
1587 /* lock user buffer in the memory */
1588 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1590 if (error)
1591 goto end_function;
1593 /* set the pointer to page array at the beginning of table - this table is
1594 now considered taken */
1595 *start_dma_table_ptr = lli_array_size;
1597 /* point to the place of the pages pointers of the table */
1598 start_dma_table_ptr++;
1600 /* set the pages pointer */
1601 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1603 /* set the pointer to the first entry */
1604 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1606 /* now create the entries for table */
1607 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1608 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1610 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1612 /* set the total data of a table */
1613 dma_table_data_count += lli_array[entry_count].block_size;
1615 flow_dma_table_entry_ptr++;
1618 /* set the physical address */
1619 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1621 /* set the num_entries and total data size */
1622 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1624 /* set the info entry */
1625 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1626 flow_dma_table_entry_ptr->block_size = 0;
1628 /* set the pointer to info entry */
1629 *info_entry_ptr = flow_dma_table_entry_ptr;
1631 /* the array of the lli entries */
1632 kfree(lli_array);
1633 end_function:
1634 return error;
1640 This function creates a list of tables for flow and returns the data for
1641 the first and last tables of the list
1643 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1644 unsigned long num_virtual_buffers,
1645 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1647 int error;
1648 unsigned long virt_buff_addr;
1649 unsigned long virt_buff_size;
1650 struct sep_lli_entry_t table_data;
1651 struct sep_lli_entry_t *info_entry_ptr;
1652 struct sep_lli_entry_t *prev_info_entry_ptr;
1653 unsigned long i;
1655 /* init vars */
1656 error = 0;
1657 prev_info_entry_ptr = 0;
1659 /* init the first table to default */
1660 table_data.physical_address = 0xffffffff;
1661 first_table_data_ptr->physical_address = 0xffffffff;
1662 table_data.block_size = 0;
1664 for (i = 0; i < num_virtual_buffers; i++) {
1665 /* get the virtual buffer address */
1666 error = get_user(virt_buff_addr, &first_buff_addr);
1667 if (error)
1668 goto end_function;
1670 /* get the virtual buffer size */
1671 first_buff_addr++;
1672 error = get_user(virt_buff_size, &first_buff_addr);
1673 if (error)
1674 goto end_function;
1676 /* advance the address to point to the next pair of address|size */
1677 first_buff_addr++;
1679 /* now prepare the one flow LLI table from the data */
1680 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1681 if (error)
1682 goto end_function;
1684 if (i == 0) {
1685 /* if this is the first table - save it to return to the user
1686 application */
1687 *first_table_data_ptr = table_data;
1689 /* set the pointer to info entry */
1690 prev_info_entry_ptr = info_entry_ptr;
1691 } else {
1692 /* not first table - the previous table info entry should
1693 be updated */
1694 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1696 /* set the pointer to info entry */
1697 prev_info_entry_ptr = info_entry_ptr;
1701 /* set the last table data */
1702 *last_table_data_ptr = table_data;
1703 end_function:
1704 return error;
1708 this function goes over all the flow tables connected to the given
1709 table and deallocate them
1711 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1713 /* id pointer */
1714 unsigned long *table_ptr;
1715 /* end address of the flow dma area */
1716 unsigned long num_entries;
1717 unsigned long num_pages;
1718 struct page **pages_ptr;
1719 /* maximum table size in words */
1720 struct sep_lli_entry_t *info_entry_ptr;
1722 /* set the pointer to the first table */
1723 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1725 /* set the num of entries */
1726 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1727 & SEP_NUM_ENTRIES_MASK;
1729 /* go over all the connected tables */
1730 while (*table_ptr != 0xffffffff) {
1731 /* get number of pages */
1732 num_pages = *(table_ptr - 2);
1734 /* get the pointer to the pages */
1735 pages_ptr = (struct page **) (*(table_ptr - 1));
1737 /* free the pages */
1738 sep_free_dma_pages(pages_ptr, num_pages, 1);
1740 /* goto to the info entry */
1741 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1743 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1744 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1747 return;
1751 * sep_find_flow_context - find a flow
1752 * @sep: the SEP we are working with
1753 * @flow_id: flow identifier
1755 * Returns a pointer the matching flow, or NULL if the flow does not
1756 * exist.
1759 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1760 unsigned long flow_id)
1762 int count;
1764 * always search for flow with id default first - in case we
1765 * already started working on the flow there can be no situation
1766 * when 2 flows are with default flag
1768 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1769 if (sep->flows[count].flow_id == flow_id)
1770 return &sep->flows[count];
1772 return NULL;
1777 this function handles the request to create the DMA tables for flow
1779 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1780 unsigned long arg)
1782 int error;
1783 struct sep_driver_build_flow_table_t command_args;
1784 /* first table - output */
1785 struct sep_lli_entry_t first_table_data;
1786 /* dma table data */
1787 struct sep_lli_entry_t last_table_data;
1788 /* pointer to the info entry of the previuos DMA table */
1789 struct sep_lli_entry_t *prev_info_entry_ptr;
1790 /* pointer to the flow data strucutre */
1791 struct sep_flow_context_t *flow_context_ptr;
1793 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1795 /* init variables */
1796 prev_info_entry_ptr = 0;
1797 first_table_data.physical_address = 0xffffffff;
1799 /* find the free structure for flow data */
1800 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1801 if (flow_context_ptr == NULL)
1802 goto end_function;
1804 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1805 if (error)
1806 goto end_function;
1808 /* create flow tables */
1809 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1810 if (error)
1811 goto end_function_with_error;
1813 /* check if flow is static */
1814 if (!command_args.flow_type)
1815 /* point the info entry of the last to the info entry of the first */
1816 last_table_data = first_table_data;
1818 /* set output params */
1819 command_args.first_table_addr = first_table_data.physical_address;
1820 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1821 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1823 /* send the parameters to user application */
1824 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1825 if (error)
1826 goto end_function_with_error;
1828 /* all the flow created - update the flow entry with temp id */
1829 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1831 /* set the processing tables data in the context */
1832 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1833 flow_context_ptr->input_tables_in_process = first_table_data;
1834 else
1835 flow_context_ptr->output_tables_in_process = first_table_data;
1837 goto end_function;
1839 end_function_with_error:
1840 /* free the allocated tables */
1841 sep_deallocated_flow_tables(&first_table_data);
1842 end_function:
1843 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1844 return error;
1848 this function handles add tables to flow
1850 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1852 int error;
1853 unsigned long num_entries;
1854 struct sep_driver_add_flow_table_t command_args;
1855 struct sep_flow_context_t *flow_context_ptr;
1856 /* first dma table data */
1857 struct sep_lli_entry_t first_table_data;
1858 /* last dma table data */
1859 struct sep_lli_entry_t last_table_data;
1860 /* pointer to the info entry of the current DMA table */
1861 struct sep_lli_entry_t *info_entry_ptr;
1863 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1865 /* get input parameters */
1866 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1867 if (error)
1868 goto end_function;
1870 /* find the flow structure for the flow id */
1871 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1872 if (flow_context_ptr == NULL)
1873 goto end_function;
1875 /* prepare the flow dma tables */
1876 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1877 if (error)
1878 goto end_function_with_error;
1880 /* now check if there is already an existing add table for this flow */
1881 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1882 /* this buffer was for input buffers */
1883 if (flow_context_ptr->input_tables_flag) {
1884 /* add table already exists - add the new tables to the end
1885 of the previous */
1886 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1888 info_entry_ptr = (struct sep_lli_entry_t *)
1889 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1891 /* connect to list of tables */
1892 *info_entry_ptr = first_table_data;
1894 /* set the first table data */
1895 first_table_data = flow_context_ptr->first_input_table;
1896 } else {
1897 /* set the input flag */
1898 flow_context_ptr->input_tables_flag = 1;
1900 /* set the first table data */
1901 flow_context_ptr->first_input_table = first_table_data;
1903 /* set the last table data */
1904 flow_context_ptr->last_input_table = last_table_data;
1905 } else { /* this is output tables */
1907 /* this buffer was for input buffers */
1908 if (flow_context_ptr->output_tables_flag) {
1909 /* add table already exists - add the new tables to
1910 the end of the previous */
1911 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1913 info_entry_ptr = (struct sep_lli_entry_t *)
1914 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1916 /* connect to list of tables */
1917 *info_entry_ptr = first_table_data;
1919 /* set the first table data */
1920 first_table_data = flow_context_ptr->first_output_table;
1921 } else {
1922 /* set the input flag */
1923 flow_context_ptr->output_tables_flag = 1;
1925 /* set the first table data */
1926 flow_context_ptr->first_output_table = first_table_data;
1928 /* set the last table data */
1929 flow_context_ptr->last_output_table = last_table_data;
1932 /* set output params */
1933 command_args.first_table_addr = first_table_data.physical_address;
1934 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1935 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1937 /* send the parameters to user application */
1938 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1939 end_function_with_error:
1940 /* free the allocated tables */
1941 sep_deallocated_flow_tables(&first_table_data);
1942 end_function:
1943 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1944 return error;
1948 this function add the flow add message to the specific flow
1950 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1952 int error;
1953 struct sep_driver_add_message_t command_args;
1954 struct sep_flow_context_t *flow_context_ptr;
1956 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1958 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1959 if (error)
1960 goto end_function;
1962 /* check input */
1963 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1964 error = -ENOMEM;
1965 goto end_function;
1968 /* find the flow context */
1969 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1970 if (flow_context_ptr == NULL)
1971 goto end_function;
1973 /* copy the message into context */
1974 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1975 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1976 end_function:
1977 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1978 return error;
1983 this function returns the physical and virtual addresses of the static pool
1985 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1987 int error;
1988 struct sep_driver_static_pool_addr_t command_args;
1990 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1992 /*prepare the output parameters in the struct */
1993 command_args.physical_static_address = sep->shared_area_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1994 command_args.virtual_static_address = (unsigned long)sep->shared_area + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1996 edbg("SEP Driver:physical_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1998 /* send the parameters to user application */
1999 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
2000 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
2001 return error;
2005 this address gets the offset of the physical address from the start
2006 of the mapped area
2008 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2010 int error;
2011 struct sep_driver_get_mapped_offset_t command_args;
2013 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2015 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2016 if (error)
2017 goto end_function;
2019 if (command_args.physical_address < sep->shared_area_bus) {
2020 /* FIXME */
2021 error = -ENOTTY;
2022 goto end_function;
2025 /*prepare the output parameters in the struct */
2026 command_args.offset = command_args.physical_address - sep->shared_area_bus;
2028 edbg("SEP Driver:physical_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2030 /* send the parameters to user application */
2031 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2032 end_function:
2033 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2034 return error;
2041 static int sep_start_handler(struct sep_device *sep)
2043 unsigned long reg_val;
2044 unsigned long error = 0;
2046 dbg("SEP Driver:--------> sep_start_handler start\n");
2048 /* wait in polling for message from SEP */
2050 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2051 while (!reg_val);
2053 /* check the value */
2054 if (reg_val == 0x1)
2055 /* fatal error - read error status from GPRO */
2056 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2057 dbg("SEP Driver:<-------- sep_start_handler end\n");
2058 return error;
2062 this function handles the request for SEP initialization
2064 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2066 unsigned long message_word;
2067 unsigned long *message_ptr;
2068 struct sep_driver_init_t command_args;
2069 unsigned long counter;
2070 unsigned long error;
2071 unsigned long reg_val;
2073 dbg("SEP Driver:--------> sep_init_handler start\n");
2074 error = 0;
2076 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2078 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2080 if (error)
2081 goto end_function;
2083 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2084 /*sep_configure_dma_burst(); */
2086 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2088 message_ptr = (unsigned long *) command_args.message_addr;
2090 /* set the base address of the SRAM */
2091 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2093 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2094 get_user(message_word, message_ptr);
2095 /* write data to SRAM */
2096 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2097 edbg("SEP Driver:message_word is %lu\n", message_word);
2098 /* wait for write complete */
2099 sep_wait_sram_write(sep);
2101 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2102 /* signal SEP */
2103 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2106 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2107 while (!(reg_val & 0xFFFFFFFD));
2109 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2111 /* check the value */
2112 if (reg_val == 0x1) {
2113 edbg("SEP Driver:init failed\n");
2115 error = sep_read_reg(sep, 0x8060);
2116 edbg("SEP Driver:sw monitor is %lu\n", error);
2118 /* fatal error - read erro status from GPRO */
2119 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2120 edbg("SEP Driver:error is %lu\n", error);
2122 end_function:
2123 dbg("SEP Driver:<-------- sep_init_handler end\n");
2124 return error;
2129 this function handles the request cache and resident reallocation
2131 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2132 unsigned long arg)
2134 int error;
2135 unsigned long phys_cache_address;
2136 unsigned long phys_resident_address;
2137 struct sep_driver_realloc_cache_resident_t command_args;
2139 /* copy the data */
2140 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_realloc_cache_resident_t));
2141 if (error)
2142 goto end_function;
2144 /* copy cache and resident to the their intended locations */
2145 error = sep_copy_cache_resident_to_area(sep, command_args.cache_addr, command_args.cache_size_in_bytes, command_args.resident_addr, command_args.resident_size_in_bytes, &phys_cache_address, &phys_resident_address);
2146 if (error)
2147 goto end_function;
2149 command_args.new_base_addr = sep->shared_area_bus;
2151 /* find the new base address according to the lowest address between
2152 cache, resident and shared area */
2153 if (phys_resident_address < command_args.new_base_addr)
2154 command_args.new_base_addr = phys_resident_address;
2155 if (phys_cache_address < command_args.new_base_addr)
2156 command_args.new_base_addr = phys_cache_address;
2158 /* set the return parameters */
2159 command_args.new_cache_addr = phys_cache_address;
2160 command_args.new_resident_addr = phys_resident_address;
2162 /* set the new shared area */
2163 command_args.new_shared_area_addr = sep->shared_area_bus;
2165 edbg("SEP Driver:command_args.new_shared_area is %08lx\n", command_args.new_shared_area_addr);
2166 edbg("SEP Driver:command_args.new_base_addr is %08lx\n", command_args.new_base_addr);
2167 edbg("SEP Driver:command_args.new_resident_addr is %08lx\n", command_args.new_resident_addr);
2168 edbg("SEP Driver:command_args.new_cache_addr is %08lx\n", command_args.new_cache_addr);
2170 /* return to user */
2171 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_realloc_cache_resident_t));
2172 end_function:
2173 return error;
2177 this function handles the request for get time
2179 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2181 int error;
2182 struct sep_driver_get_time_t command_args;
2184 error = sep_set_time(sep, &command_args.time_physical_address, &command_args.time_value);
2185 if (error == 0)
2186 error = copy_to_user((void __user *)arg,
2187 &command_args, sizeof(struct sep_driver_get_time_t));
2188 return error;
2193 This API handles the end transaction request
2195 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2197 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2199 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2200 /* close IMR */
2201 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2203 /* release IRQ line */
2204 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2206 /* lock the sep mutex */
2207 mutex_unlock(&sep_mutex);
2208 #endif
2210 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2212 return 0;
2217 This function handler the set flow id command
2219 static int sep_set_flow_id_handler(struct sep_device *sep, unsigned long arg)
2221 int error;
2222 unsigned long flow_id;
2223 struct sep_flow_context_t *flow_data_ptr;
2225 dbg("------------>SEP Driver: sep_set_flow_id_handler start\n");
2227 error = get_user(flow_id, &(((struct sep_driver_set_flow_id_t *) arg)->flow_id));
2228 if (error)
2229 goto end_function;
2231 /* find the flow data structure that was just used for creating new flow
2232 - its id should be default */
2233 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2234 if (flow_data_ptr == NULL)
2235 goto end_function;
2237 /* set flow id */
2238 flow_data_ptr->flow_id = flow_id;
2240 end_function:
2241 dbg("SEP Driver:<-------- sep_set_flow_id_handler end\n");
2242 return error;
2249 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2251 int error = 0;
2252 struct sep_device *sep = filp->private_data;
2254 dbg("------------>SEP Driver: ioctl start\n");
2256 edbg("SEP Driver: cmd is %x\n", cmd);
2258 /* check that the command is for sep device */
2259 if (_IOC_TYPE(cmd) != SEP_IOC_MAGIC_NUMBER)
2260 error = -ENOTTY;
2262 switch (cmd) {
2263 case SEP_IOCSENDSEPCOMMAND:
2264 /* send command to SEP */
2265 sep_send_command_handler(sep);
2266 edbg("SEP Driver: after sep_send_command_handler\n");
2267 break;
2268 case SEP_IOCSENDSEPRPLYCOMMAND:
2269 /* send reply command to SEP */
2270 sep_send_reply_command_handler(sep);
2271 break;
2272 case SEP_IOCALLOCDATAPOLL:
2273 /* allocate data pool */
2274 error = sep_allocate_data_pool_memory_handler(sep, arg);
2275 break;
2276 case SEP_IOCWRITEDATAPOLL:
2277 /* write data into memory pool */
2278 error = sep_write_into_data_pool_handler(sep, arg);
2279 break;
2280 case SEP_IOCREADDATAPOLL:
2281 /* read data from data pool into application memory */
2282 error = sep_read_from_data_pool_handler(sep, arg);
2283 break;
2284 case SEP_IOCCREATESYMDMATABLE:
2285 /* create dma table for synhronic operation */
2286 error = sep_create_sync_dma_tables_handler(sep, arg);
2287 break;
2288 case SEP_IOCCREATEFLOWDMATABLE:
2289 /* create flow dma tables */
2290 error = sep_create_flow_dma_tables_handler(sep, arg);
2291 break;
2292 case SEP_IOCFREEDMATABLEDATA:
2293 /* free the pages */
2294 error = sep_free_dma_table_data_handler(sep);
2295 break;
2296 case SEP_IOCSETFLOWID:
2297 /* set flow id */
2298 error = sep_set_flow_id_handler(sep, arg);
2299 break;
2300 case SEP_IOCADDFLOWTABLE:
2301 /* add tables to the dynamic flow */
2302 error = sep_add_flow_tables_handler(sep, arg);
2303 break;
2304 case SEP_IOCADDFLOWMESSAGE:
2305 /* add message of add tables to flow */
2306 error = sep_add_flow_tables_message_handler(sep, arg);
2307 break;
2308 case SEP_IOCSEPSTART:
2309 /* start command to sep */
2310 error = sep_start_handler(sep);
2311 break;
2312 case SEP_IOCSEPINIT:
2313 /* init command to sep */
2314 error = sep_init_handler(sep, arg);
2315 break;
2316 case SEP_IOCGETSTATICPOOLADDR:
2317 /* get the physical and virtual addresses of the static pool */
2318 error = sep_get_static_pool_addr_handler(sep, arg);
2319 break;
2320 case SEP_IOCENDTRANSACTION:
2321 error = sep_end_transaction_handler(sep, arg);
2322 break;
2323 case SEP_IOCREALLOCCACHERES:
2324 error = sep_realloc_cache_resident_handler(sep, arg);
2325 break;
2326 case SEP_IOCGETMAPPEDADDROFFSET:
2327 error = sep_get_physical_mapped_offset_handler(sep, arg);
2328 break;
2329 case SEP_IOCGETIME:
2330 error = sep_get_time_handler(sep, arg);
2331 break;
2332 default:
2333 error = -ENOTTY;
2334 break;
2336 dbg("SEP Driver:<-------- ioctl end\n");
2337 return error;
2342 #if !SEP_DRIVER_POLLING_MODE
2344 /* handler for flow done interrupt */
2346 static void sep_flow_done_handler(struct work_struct *work)
2348 struct sep_flow_context_t *flow_data_ptr;
2350 /* obtain the mutex */
2351 mutex_lock(&sep_mutex);
2353 /* get the pointer to context */
2354 flow_data_ptr = (struct sep_flow_context_t *) work;
2356 /* free all the current input tables in sep */
2357 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2359 /* free all the current tables output tables in SEP (if needed) */
2360 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2361 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2363 /* check if we have additional tables to be sent to SEP only input
2364 flag may be checked */
2365 if (flow_data_ptr->input_tables_flag) {
2366 /* copy the message to the shared RAM and signal SEP */
2367 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_area, flow_data_ptr->message_size_in_bytes);
2369 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2371 mutex_unlock(&sep_mutex);
2374 interrupt handler function
2376 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2378 irqreturn_t int_error;
2379 unsigned long reg_val;
2380 unsigned long flow_id;
2381 struct sep_flow_context_t *flow_context_ptr;
2382 struct sep_device *sep = dev_id;
2384 int_error = IRQ_HANDLED;
2386 /* read the IRR register to check if this is SEP interrupt */
2387 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2388 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2390 /* check if this is the flow interrupt */
2391 if (0 /*reg_val & (0x1 << 11) */ ) {
2392 /* read GPRO to find out the which flow is done */
2393 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2395 /* find the contex of the flow */
2396 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2397 if (flow_context_ptr == NULL)
2398 goto end_function_with_error;
2400 /* queue the work */
2401 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2402 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2404 } else {
2405 /* check if this is reply interrupt from SEP */
2406 if (reg_val & (0x1 << 13)) {
2407 /* update the counter of reply messages */
2408 sep->reply_ct++;
2409 /* wake up the waiting process */
2410 wake_up(&sep_event);
2411 } else {
2412 int_error = IRQ_NONE;
2413 goto end_function;
2416 end_function_with_error:
2417 /* clear the interrupt */
2418 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2419 end_function:
2420 return int_error;
2423 #endif
2427 #if 0
2429 static void sep_wait_busy(struct sep_device *sep)
2431 u32 reg;
2433 do {
2434 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2435 } while (reg);
2439 PATCH for configuring the DMA to single burst instead of multi-burst
2441 static void sep_configure_dma_burst(struct sep_device *sep)
2443 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2445 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2447 /* request access to registers from SEP */
2448 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2450 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2452 sep_wait_busy(sep);
2454 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2456 /* set the DMA burst register to single burst */
2457 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2459 /* release the sep busy */
2460 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2461 sep_wait_busy(sep);
2463 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2467 #endif
2470 Function that is activaed on the succesful probe of the SEP device
2472 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2474 int error = 0;
2475 struct sep_device *sep;
2476 int counter;
2477 int size; /* size of memory for allocation */
2479 edbg("Sep pci probe starting\n");
2480 if (sep_dev != NULL) {
2481 dev_warn(&pdev->dev, "only one SEP supported.\n");
2482 return -EBUSY;
2485 /* enable the device */
2486 error = pci_enable_device(pdev);
2487 if (error) {
2488 edbg("error enabling pci device\n");
2489 goto end_function;
2492 /* set the pci dev pointer */
2493 sep_dev = &sep_instance;
2494 sep = &sep_instance;
2496 edbg("sep->shared_area = %lx\n", (unsigned long) &sep->shared_area);
2497 /* transaction counter that coordinates the transactions between SEP
2498 and HOST */
2499 sep->send_ct = 0;
2500 /* counter for the messages from sep */
2501 sep->reply_ct = 0;
2502 /* counter for the number of bytes allocated in the pool
2503 for the current transaction */
2504 sep->data_pool_bytes_allocated = 0;
2506 /* calculate the total size for allocation */
2507 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2508 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2510 /* allocate the shared area */
2511 if (sep_map_and_alloc_shared_area(sep, size)) {
2512 error = -ENOMEM;
2513 /* allocation failed */
2514 goto end_function_error;
2516 /* now set the memory regions */
2517 sep->message_shared_area_addr = sep->shared_area;
2519 edbg("SEP Driver: sep->message_shared_area_addr is %p\n", sep->message_shared_area_addr);
2521 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2522 /* send the new SHARED MESSAGE AREA to the SEP */
2523 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_area_bus);
2525 /* poll for SEP response */
2526 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2527 while (retVal != 0xffffffff && retVal != sep->shared_area_bus)
2528 retVal = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2530 /* check the return value (register) */
2531 if (retVal != sep->shared_area_bus) {
2532 error = -ENOMEM;
2533 goto end_function_deallocate_sep_shared_area;
2535 #endif
2536 /* init the flow contextes */
2537 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2538 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2540 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2541 if (sep->flow_wq == NULL) {
2542 error = -ENOMEM;
2543 edbg("sep_driver:flow queue creation failed\n");
2544 goto end_function_deallocate_sep_shared_area;
2546 edbg("SEP Driver: create flow workqueue \n");
2547 /* load the rom code */
2548 sep_load_rom_code(sep);
2550 sep->pdev = pci_dev_get(pdev);
2552 /* get the io memory start address */
2553 sep->io_bus = pci_resource_start(pdev, 0);
2554 if (!sep->io_bus) {
2555 edbg("SEP Driver error pci resource start\n");
2556 goto end_function_deallocate_sep_shared_area;
2559 /* get the io memory end address */
2560 sep->io_end_bus = pci_resource_end(pdev, 0);
2561 if (!sep->io_end_bus) {
2562 edbg("SEP Driver error pci resource end\n");
2563 goto end_function_deallocate_sep_shared_area;
2566 sep->io_memory_size = sep->io_end_bus - sep->io_bus + 1;
2568 edbg("SEP Driver:io_bus is %08lx\n", sep->io_bus);
2570 edbg("SEP Driver:io_memory_end_phyaical_address is %08lx\n", sep->io_end_bus);
2572 edbg("SEP Driver:io_memory_size is %08lx\n", sep->io_memory_size);
2574 sep->io_addr = ioremap_nocache(sep->io_bus, sep->io_memory_size);
2575 if (!sep->io_addr) {
2576 edbg("SEP Driver error ioremap of io memory\n");
2577 goto end_function_deallocate_sep_shared_area;
2580 edbg("SEP Driver:io_addr is %p\n", sep->io_addr);
2582 sep->reg_addr = (void __iomem *) sep->io_addr;
2584 /* set up system base address and shared memory location */
2586 sep->rar_addr = kmalloc(2 * SEP_RAR_IO_MEM_REGION_SIZE, GFP_KERNEL);
2588 if (!sep->rar_addr) {
2589 edbg("SEP Driver:cant kmalloc rar\n");
2590 goto end_function_uniomap;
2592 /* FIXME */
2593 sep->rar_bus = __pa(sep->rar_addr);
2595 edbg("SEP Driver:rar_physical is %08llx\n", (unsigned long long)sep->rar_bus);
2596 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2598 #if !SEP_DRIVER_POLLING_MODE
2600 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2602 /* clear ICR register */
2603 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2605 /* set the IMR register - open only GPR 2 */
2606 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2608 edbg("SEP Driver: about to call request_irq\n");
2609 /* get the interrupt line */
2610 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2611 if (error)
2612 goto end_function_free_res;
2614 goto end_function;
2615 edbg("SEP Driver: about to write IMR REG_ADDR");
2617 /* set the IMR register - open only GPR 2 */
2618 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2620 end_function_free_res:
2621 kfree(sep->rar_addr);
2622 #endif /* SEP_DRIVER_POLLING_MODE */
2623 end_function_uniomap:
2624 iounmap(sep->io_addr);
2625 end_function_deallocate_sep_shared_area:
2626 /* de-allocate shared area */
2627 sep_unmap_and_free_shared_area(sep, size);
2628 end_function_error:
2629 sep_dev = NULL;
2630 end_function:
2631 return error;
2634 static struct pci_device_id sep_pci_id_tbl[] = {
2635 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2639 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2641 /* field for registering driver to PCI device */
2642 static struct pci_driver sep_pci_driver = {
2643 .name = "sep_sec_driver",
2644 .id_table = sep_pci_id_tbl,
2645 .probe = sep_probe
2646 /* FIXME: remove handler */
2649 /* major and minor device numbers */
2650 static dev_t sep_devno;
2652 /* the files operations structure of the driver */
2653 static struct file_operations sep_file_operations = {
2654 .owner = THIS_MODULE,
2655 .ioctl = sep_ioctl,
2656 .poll = sep_poll,
2657 .open = sep_open,
2658 .release = sep_release,
2659 .mmap = sep_mmap,
2663 /* cdev struct of the driver */
2664 static struct cdev sep_cdev;
2667 this function registers the driver to the file system
2669 static int sep_register_driver_to_fs(void)
2671 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2672 if (ret_val) {
2673 edbg("sep_driver:major number allocation failed, retval is %d\n", ret_val);
2674 goto end_function;
2677 /* init cdev */
2678 cdev_init(&sep_cdev, &sep_file_operations);
2679 sep_cdev.owner = THIS_MODULE;
2681 /* register the driver with the kernel */
2682 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2684 if (ret_val) {
2685 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2686 goto end_function_unregister_devnum;
2689 goto end_function;
2691 end_function_unregister_devnum:
2693 /* unregister dev numbers */
2694 unregister_chrdev_region(sep_devno, 1);
2696 end_function:
2697 return ret_val;
2701 /*--------------------------------------------------------------
2702 init function
2703 ----------------------------------------------------------------*/
2704 static int __init sep_init(void)
2706 int ret_val = 0;
2707 dbg("SEP Driver:-------->Init start\n");
2708 /* FIXME: Probe can occur before we are ready to survive a probe */
2709 ret_val = pci_register_driver(&sep_pci_driver);
2710 if (ret_val) {
2711 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2712 goto end_function_unregister_from_fs;
2714 /* register driver to fs */
2715 ret_val = sep_register_driver_to_fs();
2716 if (ret_val)
2717 goto end_function_unregister_pci;
2718 goto end_function;
2719 end_function_unregister_pci:
2720 pci_unregister_driver(&sep_pci_driver);
2721 end_function_unregister_from_fs:
2722 /* unregister from fs */
2723 cdev_del(&sep_cdev);
2724 /* unregister dev numbers */
2725 unregister_chrdev_region(sep_devno, 1);
2726 end_function:
2727 dbg("SEP Driver:<-------- Init end\n");
2728 return ret_val;
2732 /*-------------------------------------------------------------
2733 exit function
2734 --------------------------------------------------------------*/
2735 static void __exit sep_exit(void)
2737 int size;
2739 dbg("SEP Driver:--------> Exit start\n");
2741 /* unregister from fs */
2742 cdev_del(&sep_cdev);
2743 /* unregister dev numbers */
2744 unregister_chrdev_region(sep_devno, 1);
2745 /* calculate the total size for de-allocation */
2746 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2747 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2748 /* FIXME: We need to do this in the unload for the device */
2749 /* free shared area */
2750 if (sep_dev) {
2751 sep_unmap_and_free_shared_area(sep_dev, size);
2752 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2753 iounmap((void *) sep_dev->reg_addr);
2754 edbg("SEP Driver: iounmap \n");
2756 edbg("SEP Driver: release_mem_region \n");
2757 dbg("SEP Driver:<-------- Exit end\n");
2761 module_init(sep_init);
2762 module_exit(sep_exit);
2764 MODULE_LICENSE("GPL");