Staging: sep: fix 2 warnings
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / drivers / staging / sep / sep_driver.c
blob67c7d2ca7ca06a9d061438028150905c7d7f6c19
1 /*
3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009 Intel Corporation. All rights reserved.
6 * Copyright(c) 2009 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; either version 2 of the License, or (at your option)
11 * any later version.
13 * This program is distributed in the hope that it will be useful, but WITHOUT
14 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
15 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
16 * more details.
18 * You should have received a copy of the GNU General Public License along with
19 * this program; if not, write to the Free Software Foundation, Inc., 59
20 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
22 * CONTACTS:
24 * Mark Allyn mark.a.allyn@intel.com
26 * CHANGES:
28 * 2009.06.26 Initial publish
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/fs.h>
35 #include <linux/cdev.h>
36 #include <linux/kdev_t.h>
37 #include <linux/mutex.h>
38 #include <linux/mm.h>
39 #include <linux/poll.h>
40 #include <linux/wait.h>
41 #include <linux/sched.h>
42 #include <linux/pci.h>
43 #include <linux/firmware.h>
44 #include <asm/ioctl.h>
45 #include <linux/ioport.h>
46 #include <asm/io.h>
47 #include <linux/interrupt.h>
48 #include <linux/pagemap.h>
49 #include <asm/cacheflush.h>
50 #include "sep_driver_hw_defs.h"
51 #include "sep_driver_config.h"
52 #include "sep_driver_api.h"
53 #include "sep_dev.h"
55 #if SEP_DRIVER_ARM_DEBUG_MODE
57 #define CRYS_SEP_ROM_length 0x4000
58 #define CRYS_SEP_ROM_start_address 0x8000C000UL
59 #define CRYS_SEP_ROM_start_address_offset 0xC000UL
60 #define SEP_ROM_BANK_register 0x80008420UL
61 #define SEP_ROM_BANK_register_offset 0x8420UL
62 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0x82000000
65 * THESE 2 definitions are specific to the board - must be
66 * defined during integration
68 #define SEP_RAR_IO_MEM_REGION_START_ADDRESS 0xFF0D0000
70 /* 2M size */
72 static void sep_load_rom_code(struct sep_device *sep)
74 /* Index variables */
75 unsigned long i, k, j;
76 u32 reg;
77 u32 error;
78 u32 warning;
80 /* Loading ROM from SEP_ROM_image.h file */
81 k = sizeof(CRYS_SEP_ROM);
83 edbg("SEP Driver: DX_CC_TST_SepRomLoader start\n");
85 edbg("SEP Driver: k is %lu\n", k);
86 edbg("SEP Driver: sep->reg_addr is %p\n", sep->reg_addr);
87 edbg("SEP Driver: CRYS_SEP_ROM_start_address_offset is %p\n", CRYS_SEP_ROM_start_address_offset);
89 for (i = 0; i < 4; i++) {
90 /* write bank */
91 sep_write_reg(sep, SEP_ROM_BANK_register_offset, i);
93 for (j = 0; j < CRYS_SEP_ROM_length / 4; j++) {
94 sep_write_reg(sep, CRYS_SEP_ROM_start_address_offset + 4 * j, CRYS_SEP_ROM[i * 0x1000 + j]);
96 k = k - 4;
98 if (k == 0) {
99 j = CRYS_SEP_ROM_length;
100 i = 4;
105 /* reset the SEP */
106 sep_write_reg(sep, HW_HOST_SEP_SW_RST_REG_ADDR, 0x1);
108 /* poll for SEP ROM boot finish */
110 reg = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
111 while (!reg);
113 edbg("SEP Driver: ROM polling ended\n");
115 switch (reg) {
116 case 0x1:
117 /* fatal error - read erro status from GPRO */
118 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
119 edbg("SEP Driver: ROM polling case 1\n");
120 break;
121 case 0x4:
122 /* Cold boot ended successfully */
123 case 0x8:
124 /* Warmboot ended successfully */
125 case 0x10:
126 /* ColdWarm boot ended successfully */
127 error = 0;
128 case 0x2:
129 /* Boot First Phase ended */
130 warning = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
131 case 0x20:
132 edbg("SEP Driver: ROM polling case %d\n", reg);
133 break;
138 #else
139 static void sep_load_rom_code(struct sep_device *sep) { }
140 #endif /* SEP_DRIVER_ARM_DEBUG_MODE */
144 /*----------------------------------------
145 DEFINES
146 -----------------------------------------*/
148 #define BASE_ADDRESS_FOR_SYSTEM 0xfffc0000
149 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
151 /*--------------------------------------------
152 GLOBAL variables
153 --------------------------------------------*/
155 /* debug messages level */
156 static int debug;
157 module_param(debug, int , 0);
158 MODULE_PARM_DESC(debug, "Flag to enable SEP debug messages");
160 /* Keep this a single static object for now to keep the conversion easy */
162 static struct sep_device sep_instance;
163 static struct sep_device *sep_dev = &sep_instance;
166 mutex for the access to the internals of the sep driver
168 static DEFINE_MUTEX(sep_mutex);
171 /* wait queue head (event) of the driver */
172 static DECLARE_WAIT_QUEUE_HEAD(sep_event);
175 * sep_load_firmware - copy firmware cache/resident
176 * @sep: device we are loading
178 * This functions copies the cache and resident from their source
179 * location into destination shared memory.
182 static int sep_load_firmware(struct sep_device *sep)
184 const struct firmware *fw;
185 char *cache_name = "cache.image.bin";
186 char *res_name = "resident.image.bin";
187 int error;
189 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
190 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
192 /* load cache */
193 error = request_firmware(&fw, cache_name, &sep->pdev->dev);
194 if (error) {
195 edbg("SEP Driver:cant request cache fw\n");
196 return error;
198 edbg("SEP Driver:cache %08Zx@%p\n", fw->size, (void *) fw->data);
200 memcpy(sep->rar_addr, (void *)fw->data, fw->size);
201 sep->cache_size = fw->size;
202 release_firmware(fw);
204 sep->resident_bus = sep->rar_bus + sep->cache_size;
205 sep->resident_addr = sep->rar_addr + sep->cache_size;
207 /* load resident */
208 error = request_firmware(&fw, res_name, &sep->pdev->dev);
209 if (error) {
210 edbg("SEP Driver:cant request res fw\n");
211 return error;
213 edbg("sep: res %08Zx@%p\n", fw->size, (void *)fw->data);
215 memcpy(sep->resident_addr, (void *) fw->data, fw->size);
216 sep->resident_size = fw->size;
217 release_firmware(fw);
219 edbg("sep: resident v %p b %08llx cache v %p b %08llx\n",
220 sep->resident_addr, (unsigned long long)sep->resident_bus,
221 sep->rar_addr, (unsigned long long)sep->rar_bus);
222 return 0;
226 * sep_map_and_alloc_shared_area - allocate shared block
227 * @sep: security processor
228 * @size: size of shared area
230 * Allocate a shared buffer in host memory that can be used by both the
231 * kernel and also the hardware interface via DMA.
234 static int sep_map_and_alloc_shared_area(struct sep_device *sep,
235 unsigned long size)
237 /* shared_addr = ioremap_nocache(0xda00000,shared_area_size); */
238 sep->shared_addr = dma_alloc_coherent(&sep->pdev->dev, size,
239 &sep->shared_bus, GFP_KERNEL);
241 if (!sep->shared_addr) {
242 edbg("sep_driver :shared memory dma_alloc_coherent failed\n");
243 return -ENOMEM;
245 /* set the bus address of the shared area */
246 edbg("sep: shared_addr %ld bytes @%p (bus %08llx)\n",
247 size, sep->shared_addr, (unsigned long long)sep->shared_bus);
248 return 0;
252 * sep_unmap_and_free_shared_area - free shared block
253 * @sep: security processor
255 * Free the shared area allocated to the security processor. The
256 * processor must have finished with this and any final posted
257 * writes cleared before we do so.
259 static void sep_unmap_and_free_shared_area(struct sep_device *sep, int size)
261 dma_free_coherent(&sep->pdev->dev, size,
262 sep->shared_addr, sep->shared_bus);
266 * sep_shared_virt_to_bus - convert bus/virt addresses
268 * Returns the bus address inside the shared area according
269 * to the virtual address.
272 static dma_addr_t sep_shared_virt_to_bus(struct sep_device *sep,
273 void *virt_address)
275 dma_addr_t pa = sep->shared_bus + (virt_address - sep->shared_addr);
276 edbg("sep: virt to bus b %08llx v %p\n",
277 (unsigned long long)pa, virt_address);
278 return pa;
282 * sep_shared_bus_to_virt - convert bus/virt addresses
284 * Returns virtual address inside the shared area according
285 * to the bus address.
288 static void *sep_shared_bus_to_virt(struct sep_device *sep,
289 dma_addr_t bus_address)
291 return sep->shared_addr + (bus_address - sep->shared_bus);
296 * sep_try_open - attempt to open a SEP device
297 * @sep: device to attempt to open
299 * Atomically attempt to get ownership of a SEP device.
300 * Returns 1 if the device was opened, 0 on failure.
303 static int sep_try_open(struct sep_device *sep)
305 if (!test_and_set_bit(0, &sep->in_use))
306 return 1;
307 return 0;
311 * sep_open - device open method
312 * @inode: inode of sep device
313 * @filp: file handle to sep device
315 * Open method for the SEP device. Called when userspace opens
316 * the SEP device node. Must also release the memory data pool
317 * allocations.
319 * Returns zero on success otherwise an error code.
322 static int sep_open(struct inode *inode, struct file *filp)
324 if (sep_dev == NULL)
325 return -ENODEV;
327 /* check the blocking mode */
328 if (filp->f_flags & O_NDELAY) {
329 if (sep_try_open(sep_dev) == 0)
330 return -EAGAIN;
331 } else
332 if (wait_event_interruptible(sep_event, sep_try_open(sep_dev)) < 0)
333 return -EINTR;
335 /* Bind to the device, we only have one which makes it easy */
336 filp->private_data = sep_dev;
337 /* release data pool allocations */
338 sep_dev->data_pool_bytes_allocated = 0;
339 return 0;
344 * sep_release - close a SEP device
345 * @inode: inode of SEP device
346 * @filp: file handle being closed
348 * Called on the final close of a SEP device. As the open protects against
349 * multiple simultaenous opens that means this method is called when the
350 * final reference to the open handle is dropped.
353 static int sep_release(struct inode *inode, struct file *filp)
355 struct sep_device *sep = filp->private_data;
356 #if 0 /*!SEP_DRIVER_POLLING_MODE */
357 /* close IMR */
358 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
359 /* release IRQ line */
360 free_irq(SEP_DIRVER_IRQ_NUM, sep);
362 #endif
363 /* Ensure any blocked open progresses */
364 clear_bit(0, &sep->in_use);
365 wake_up(&sep_event);
366 return 0;
369 /*---------------------------------------------------------------
370 map function - this functions maps the message shared area
371 -----------------------------------------------------------------*/
372 static int sep_mmap(struct file *filp, struct vm_area_struct *vma)
374 dma_addr_t bus_addr;
375 struct sep_device *sep = filp->private_data;
377 dbg("-------->SEP Driver: mmap start\n");
379 /* check that the size of the mapped range is as the size of the message
380 shared area */
381 if ((vma->vm_end - vma->vm_start) > SEP_DRIVER_MMMAP_AREA_SIZE) {
382 edbg("SEP Driver mmap requested size is more than allowed\n");
383 printk(KERN_WARNING "SEP Driver mmap requested size is more \
384 than allowed\n");
385 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_end);
386 printk(KERN_WARNING "SEP Driver vma->vm_end is %08lx\n", vma->vm_start);
387 return -EAGAIN;
390 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
392 /* get bus address */
393 bus_addr = sep->shared_bus;
395 edbg("SEP Driver: phys_addr is %08llx\n", (unsigned long long)bus_addr);
397 if (remap_pfn_range(vma, vma->vm_start, bus_addr >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot)) {
398 edbg("SEP Driver remap_page_range failed\n");
399 printk(KERN_WARNING "SEP Driver remap_page_range failed\n");
400 return -EAGAIN;
403 dbg("SEP Driver:<-------- mmap end\n");
405 return 0;
409 /*-----------------------------------------------
410 poll function
411 *----------------------------------------------*/
412 static unsigned int sep_poll(struct file *filp, poll_table * wait)
414 unsigned long count;
415 unsigned int mask = 0;
416 unsigned long retval = 0; /* flow id */
417 struct sep_device *sep = filp->private_data;
419 dbg("---------->SEP Driver poll: start\n");
422 #if SEP_DRIVER_POLLING_MODE
424 while (sep->send_ct != (retval & 0x7FFFFFFF)) {
425 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
427 for (count = 0; count < 10 * 4; count += 4)
428 edbg("Poll Debug Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES + count)));
431 sep->reply_ct++;
432 #else
433 /* add the event to the polling wait table */
434 poll_wait(filp, &sep_event, wait);
436 #endif
438 edbg("sep->send_ct is %lu\n", sep->send_ct);
439 edbg("sep->reply_ct is %lu\n", sep->reply_ct);
441 /* check if the data is ready */
442 if (sep->send_ct == sep->reply_ct) {
443 for (count = 0; count < 12 * 4; count += 4)
444 edbg("Sep Mesg Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + count)));
446 for (count = 0; count < 10 * 4; count += 4)
447 edbg("Debug Data Word %lu of the message is %lu\n", count, *((unsigned long *) (sep->shared_addr + 0x1800 + count)));
449 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR2_REG_ADDR);
450 edbg("retval is %lu\n", retval);
451 /* check if the this is sep reply or request */
452 if (retval >> 31) {
453 edbg("SEP Driver: sep request in\n");
454 /* request */
455 mask |= POLLOUT | POLLWRNORM;
456 } else {
457 edbg("SEP Driver: sep reply in\n");
458 mask |= POLLIN | POLLRDNORM;
461 dbg("SEP Driver:<-------- poll exit\n");
462 return mask;
466 * sep_time_address - address in SEP memory of time
467 * @sep: SEP device we want the address from
469 * Return the address of the two dwords in memory used for time
470 * setting.
473 static u32 *sep_time_address(struct sep_device *sep)
475 return sep->shared_addr + SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES;
479 * sep_set_time - set the SEP time
480 * @sep: the SEP we are setting the time for
482 * Calculates time and sets it at the predefined address.
483 * Called with the sep mutex held.
485 static unsigned long sep_set_time(struct sep_device *sep)
487 struct timeval time;
488 u32 *time_addr; /* address of time as seen by the kernel */
491 dbg("sep:sep_set_time start\n");
493 do_gettimeofday(&time);
495 /* set value in the SYSTEM MEMORY offset */
496 time_addr = sep_time_address(sep);
498 time_addr[0] = SEP_TIME_VAL_TOKEN;
499 time_addr[1] = time.tv_sec;
501 edbg("SEP Driver:time.tv_sec is %lu\n", time.tv_sec);
502 edbg("SEP Driver:time_addr is %p\n", time_addr);
503 edbg("SEP Driver:sep->shared_addr is %p\n", sep->shared_addr);
505 return time.tv_sec;
509 * sep_dump_message - dump the message that is pending
510 * @sep: sep device
512 * Dump out the message pending in the shared message area
515 static void sep_dump_message(struct sep_device *sep)
517 int count;
518 for (count = 0; count < 12 * 4; count += 4)
519 edbg("Word %d of the message is %u\n", count, *((u32 *) (sep->shared_addr + count)));
523 * sep_send_command_handler - kick off a command
524 * @sep: sep being signalled
526 * This function raises interrupt to SEP that signals that is has a new
527 * command from the host
530 static void sep_send_command_handler(struct sep_device *sep)
532 dbg("sep:sep_send_command_handler start\n");
534 mutex_lock(&sep_mutex);
535 sep_set_time(sep);
537 /* FIXME: flush cache */
538 flush_cache_all();
540 sep_dump_message(sep);
541 /* update counter */
542 sep->send_ct++;
543 /* send interrupt to SEP */
544 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
545 dbg("SEP Driver:<-------- sep_send_command_handler end\n");
546 mutex_unlock(&sep_mutex);
547 return;
551 * sep_send_reply_command_handler - kick off a command reply
552 * @sep: sep being signalled
554 * This function raises interrupt to SEP that signals that is has a new
555 * command from the host
558 static void sep_send_reply_command_handler(struct sep_device *sep)
560 dbg("sep:sep_send_reply_command_handler start\n");
562 /* flash cache */
563 flush_cache_all();
565 sep_dump_message(sep);
567 mutex_lock(&sep_mutex);
568 sep->send_ct++; /* update counter */
569 /* send the interrupt to SEP */
570 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, sep->send_ct);
571 /* update both counters */
572 sep->send_ct++;
573 sep->reply_ct++;
574 mutex_unlock(&sep_mutex);
575 dbg("sep: sep_send_reply_command_handler end\n");
579 This function handles the allocate data pool memory request
580 This function returns calculates the bus address of the
581 allocated memory, and the offset of this area from the mapped address.
582 Therefore, the FVOs in user space can calculate the exact virtual
583 address of this allocated memory
585 static int sep_allocate_data_pool_memory_handler(struct sep_device *sep,
586 unsigned long arg)
588 int error;
589 struct sep_driver_alloc_t command_args;
591 dbg("SEP Driver:--------> sep_allocate_data_pool_memory_handler start\n");
593 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_alloc_t));
594 if (error)
595 goto end_function;
597 /* allocate memory */
598 if ((sep->data_pool_bytes_allocated + command_args.num_bytes) > SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
599 error = -ENOMEM;
600 goto end_function;
603 /* set the virtual and bus address */
604 command_args.offset = SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
605 command_args.phys_address = sep->shared_bus + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES + sep->data_pool_bytes_allocated;
607 /* write the memory back to the user space */
608 error = copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_alloc_t));
609 if (error)
610 goto end_function;
612 /* set the allocation */
613 sep->data_pool_bytes_allocated += command_args.num_bytes;
615 end_function:
616 dbg("SEP Driver:<-------- sep_allocate_data_pool_memory_handler end\n");
617 return error;
621 This function handles write into allocated data pool command
623 static int sep_write_into_data_pool_handler(struct sep_device *sep, unsigned long arg)
625 int error;
626 void *virt_address;
627 unsigned long va;
628 unsigned long app_in_address;
629 unsigned long num_bytes;
630 void *data_pool_area_addr;
632 dbg("SEP Driver:--------> sep_write_into_data_pool_handler start\n");
634 /* get the application address */
635 error = get_user(app_in_address, &(((struct sep_driver_write_t *) arg)->app_address));
636 if (error)
637 goto end_function;
639 /* get the virtual kernel address address */
640 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
641 if (error)
642 goto end_function;
643 virt_address = (void *)va;
645 /* get the number of bytes */
646 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
647 if (error)
648 goto end_function;
650 /* calculate the start of the data pool */
651 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
654 /* check that the range of the virtual kernel address is correct */
655 if (virt_address < data_pool_area_addr || virt_address > (data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES)) {
656 error = -EINVAL;
657 goto end_function;
659 /* copy the application data */
660 error = copy_from_user(virt_address, (void *) app_in_address, num_bytes);
661 end_function:
662 dbg("SEP Driver:<-------- sep_write_into_data_pool_handler end\n");
663 return error;
667 this function handles the read from data pool command
669 static int sep_read_from_data_pool_handler(struct sep_device *sep, unsigned long arg)
671 int error;
672 /* virtual address of dest application buffer */
673 unsigned long app_out_address;
674 /* virtual address of the data pool */
675 unsigned long va;
676 void *virt_address;
677 unsigned long num_bytes;
678 void *data_pool_area_addr;
680 dbg("SEP Driver:--------> sep_read_from_data_pool_handler start\n");
682 /* get the application address */
683 error = get_user(app_out_address, &(((struct sep_driver_write_t *) arg)->app_address));
684 if (error)
685 goto end_function;
687 /* get the virtual kernel address address */
688 error = get_user(va, &(((struct sep_driver_write_t *) arg)->datapool_address));
689 if (error)
690 goto end_function;
691 virt_address = (void *)va;
693 /* get the number of bytes */
694 error = get_user(num_bytes, &(((struct sep_driver_write_t *) arg)->num_bytes));
695 if (error)
696 goto end_function;
698 /* calculate the start of the data pool */
699 data_pool_area_addr = sep->shared_addr + SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES;
701 /* FIXME: These are incomplete all over the driver: what about + len
702 and when doing that also overflows */
703 /* check that the range of the virtual kernel address is correct */
704 if (virt_address < data_pool_area_addr || virt_address > data_pool_area_addr + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES) {
705 error = -EINVAL;
706 goto end_function;
709 /* copy the application data */
710 error = copy_to_user((void *) app_out_address, virt_address, num_bytes);
711 end_function:
712 dbg("SEP Driver:<-------- sep_read_from_data_pool_handler end\n");
713 return error;
717 This function releases all the application virtual buffer physical pages,
718 that were previously locked
720 static int sep_free_dma_pages(struct page **page_array_ptr, unsigned long num_pages, unsigned long dirtyFlag)
722 unsigned long count;
724 if (dirtyFlag) {
725 for (count = 0; count < num_pages; count++) {
726 /* the out array was written, therefore the data was changed */
727 if (!PageReserved(page_array_ptr[count]))
728 SetPageDirty(page_array_ptr[count]);
729 page_cache_release(page_array_ptr[count]);
731 } else {
732 /* free in pages - the data was only read, therefore no update was done
733 on those pages */
734 for (count = 0; count < num_pages; count++)
735 page_cache_release(page_array_ptr[count]);
738 if (page_array_ptr)
739 /* free the array */
740 kfree(page_array_ptr);
742 return 0;
746 This function locks all the physical pages of the kernel virtual buffer
747 and construct a basic lli array, where each entry holds the physical
748 page address and the size that application data holds in this physical pages
750 static int sep_lock_kernel_pages(struct sep_device *sep,
751 unsigned long kernel_virt_addr,
752 unsigned long data_size,
753 unsigned long *num_pages_ptr,
754 struct sep_lli_entry_t **lli_array_ptr,
755 struct page ***page_array_ptr)
757 int error = 0;
758 /* the the page of the end address of the user space buffer */
759 unsigned long end_page;
760 /* the page of the start address of the user space buffer */
761 unsigned long start_page;
762 /* the range in pages */
763 unsigned long num_pages;
764 struct sep_lli_entry_t *lli_array;
765 /* next kernel address to map */
766 unsigned long next_kernel_address;
767 unsigned long count;
769 dbg("SEP Driver:--------> sep_lock_kernel_pages start\n");
771 /* set start and end pages and num pages */
772 end_page = (kernel_virt_addr + data_size - 1) >> PAGE_SHIFT;
773 start_page = kernel_virt_addr >> PAGE_SHIFT;
774 num_pages = end_page - start_page + 1;
776 edbg("SEP Driver: kernel_virt_addr is %08lx\n", kernel_virt_addr);
777 edbg("SEP Driver: data_size is %lu\n", data_size);
778 edbg("SEP Driver: start_page is %lx\n", start_page);
779 edbg("SEP Driver: end_page is %lx\n", end_page);
780 edbg("SEP Driver: num_pages is %lu\n", num_pages);
782 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
783 if (!lli_array) {
784 edbg("SEP Driver: kmalloc for lli_array failed\n");
785 error = -ENOMEM;
786 goto end_function;
789 /* set the start address of the first page - app data may start not at
790 the beginning of the page */
791 lli_array[0].physical_address = (unsigned long) virt_to_phys((unsigned long *) kernel_virt_addr);
793 /* check that not all the data is in the first page only */
794 if ((PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK))) >= data_size)
795 lli_array[0].block_size = data_size;
796 else
797 lli_array[0].block_size = PAGE_SIZE - (kernel_virt_addr & (~PAGE_MASK));
799 /* debug print */
800 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
802 /* advance the address to the start of the next page */
803 next_kernel_address = (kernel_virt_addr & PAGE_MASK) + PAGE_SIZE;
805 /* go from the second page to the prev before last */
806 for (count = 1; count < (num_pages - 1); count++) {
807 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
808 lli_array[count].block_size = PAGE_SIZE;
810 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
811 next_kernel_address += PAGE_SIZE;
814 /* if more then 1 pages locked - then update for the last page size needed */
815 if (num_pages > 1) {
816 /* update the address of the last page */
817 lli_array[count].physical_address = (unsigned long) virt_to_phys((unsigned long *) next_kernel_address);
819 /* set the size of the last page */
820 lli_array[count].block_size = (kernel_virt_addr + data_size) & (~PAGE_MASK);
822 if (lli_array[count].block_size == 0) {
823 dbg("app_virt_addr is %08lx\n", kernel_virt_addr);
824 dbg("data_size is %lu\n", data_size);
825 while (1);
828 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
830 /* set output params */
831 *lli_array_ptr = lli_array;
832 *num_pages_ptr = num_pages;
833 *page_array_ptr = 0;
834 end_function:
835 dbg("SEP Driver:<-------- sep_lock_kernel_pages end\n");
836 return 0;
840 This function locks all the physical pages of the application virtual buffer
841 and construct a basic lli array, where each entry holds the physical page
842 address and the size that application data holds in this physical pages
844 static int sep_lock_user_pages(struct sep_device *sep,
845 unsigned long app_virt_addr,
846 unsigned long data_size,
847 unsigned long *num_pages_ptr,
848 struct sep_lli_entry_t **lli_array_ptr,
849 struct page ***page_array_ptr)
851 int error = 0;
852 /* the the page of the end address of the user space buffer */
853 unsigned long end_page;
854 /* the page of the start address of the user space buffer */
855 unsigned long start_page;
856 /* the range in pages */
857 unsigned long num_pages;
858 struct page **page_array;
859 struct sep_lli_entry_t *lli_array;
860 unsigned long count;
861 int result;
863 dbg("SEP Driver:--------> sep_lock_user_pages start\n");
865 /* set start and end pages and num pages */
866 end_page = (app_virt_addr + data_size - 1) >> PAGE_SHIFT;
867 start_page = app_virt_addr >> PAGE_SHIFT;
868 num_pages = end_page - start_page + 1;
870 edbg("SEP Driver: app_virt_addr is %08lx\n", app_virt_addr);
871 edbg("SEP Driver: data_size is %lu\n", data_size);
872 edbg("SEP Driver: start_page is %lu\n", start_page);
873 edbg("SEP Driver: end_page is %lu\n", end_page);
874 edbg("SEP Driver: num_pages is %lu\n", num_pages);
876 /* allocate array of pages structure pointers */
877 page_array = kmalloc(sizeof(struct page *) * num_pages, GFP_ATOMIC);
878 if (!page_array) {
879 edbg("SEP Driver: kmalloc for page_array failed\n");
881 error = -ENOMEM;
882 goto end_function;
885 lli_array = kmalloc(sizeof(struct sep_lli_entry_t) * num_pages, GFP_ATOMIC);
886 if (!lli_array) {
887 edbg("SEP Driver: kmalloc for lli_array failed\n");
889 error = -ENOMEM;
890 goto end_function_with_error1;
893 /* convert the application virtual address into a set of physical */
894 down_read(&current->mm->mmap_sem);
895 result = get_user_pages(current, current->mm, app_virt_addr, num_pages, 1, 0, page_array, 0);
896 up_read(&current->mm->mmap_sem);
898 /* check the number of pages locked - if not all then exit with error */
899 if (result != num_pages) {
900 dbg("SEP Driver: not all pages locked by get_user_pages\n");
902 error = -ENOMEM;
903 goto end_function_with_error2;
906 /* flush the cache */
907 for (count = 0; count < num_pages; count++)
908 flush_dcache_page(page_array[count]);
910 /* set the start address of the first page - app data may start not at
911 the beginning of the page */
912 lli_array[0].physical_address = ((unsigned long) page_to_phys(page_array[0])) + (app_virt_addr & (~PAGE_MASK));
914 /* check that not all the data is in the first page only */
915 if ((PAGE_SIZE - (app_virt_addr & (~PAGE_MASK))) >= data_size)
916 lli_array[0].block_size = data_size;
917 else
918 lli_array[0].block_size = PAGE_SIZE - (app_virt_addr & (~PAGE_MASK));
920 /* debug print */
921 dbg("lli_array[0].physical_address is %08lx, lli_array[0].block_size is %lu\n", lli_array[0].physical_address, lli_array[0].block_size);
923 /* go from the second page to the prev before last */
924 for (count = 1; count < (num_pages - 1); count++) {
925 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
926 lli_array[count].block_size = PAGE_SIZE;
928 edbg("lli_array[%lu].physical_address is %08lx, lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
931 /* if more then 1 pages locked - then update for the last page size needed */
932 if (num_pages > 1) {
933 /* update the address of the last page */
934 lli_array[count].physical_address = (unsigned long) page_to_phys(page_array[count]);
936 /* set the size of the last page */
937 lli_array[count].block_size = (app_virt_addr + data_size) & (~PAGE_MASK);
939 if (lli_array[count].block_size == 0) {
940 dbg("app_virt_addr is %08lx\n", app_virt_addr);
941 dbg("data_size is %lu\n", data_size);
942 while (1);
944 edbg("lli_array[%lu].physical_address is %08lx, \
945 lli_array[%lu].block_size is %lu\n", count, lli_array[count].physical_address, count, lli_array[count].block_size);
948 /* set output params */
949 *lli_array_ptr = lli_array;
950 *num_pages_ptr = num_pages;
951 *page_array_ptr = page_array;
952 goto end_function;
954 end_function_with_error2:
955 /* release the cache */
956 for (count = 0; count < num_pages; count++)
957 page_cache_release(page_array[count]);
958 kfree(lli_array);
959 end_function_with_error1:
960 kfree(page_array);
961 end_function:
962 dbg("SEP Driver:<-------- sep_lock_user_pages end\n");
963 return 0;
968 this function calculates the size of data that can be inserted into the lli
969 table from this array the condition is that either the table is full
970 (all etnries are entered), or there are no more entries in the lli array
972 static unsigned long sep_calculate_lli_table_max_size(struct sep_lli_entry_t *lli_in_array_ptr, unsigned long num_array_entries)
974 unsigned long table_data_size = 0;
975 unsigned long counter;
977 /* calculate the data in the out lli table if till we fill the whole
978 table or till the data has ended */
979 for (counter = 0; (counter < (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP - 1)) && (counter < num_array_entries); counter++)
980 table_data_size += lli_in_array_ptr[counter].block_size;
981 return table_data_size;
985 this functions builds ont lli table from the lli_array according to
986 the given size of data
988 static void sep_build_lli_table(struct sep_lli_entry_t *lli_array_ptr, struct sep_lli_entry_t *lli_table_ptr, unsigned long *num_processed_entries_ptr, unsigned long *num_table_entries_ptr, unsigned long table_data_size)
990 unsigned long curr_table_data_size;
991 /* counter of lli array entry */
992 unsigned long array_counter;
994 dbg("SEP Driver:--------> sep_build_lli_table start\n");
996 /* init currrent table data size and lli array entry counter */
997 curr_table_data_size = 0;
998 array_counter = 0;
999 *num_table_entries_ptr = 1;
1001 edbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1003 /* fill the table till table size reaches the needed amount */
1004 while (curr_table_data_size < table_data_size) {
1005 /* update the number of entries in table */
1006 (*num_table_entries_ptr)++;
1008 lli_table_ptr->physical_address = lli_array_ptr[array_counter].physical_address;
1009 lli_table_ptr->block_size = lli_array_ptr[array_counter].block_size;
1010 curr_table_data_size += lli_table_ptr->block_size;
1012 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1013 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1014 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1016 /* check for overflow of the table data */
1017 if (curr_table_data_size > table_data_size) {
1018 edbg("SEP Driver:curr_table_data_size > table_data_size\n");
1020 /* update the size of block in the table */
1021 lli_table_ptr->block_size -= (curr_table_data_size - table_data_size);
1023 /* update the physical address in the lli array */
1024 lli_array_ptr[array_counter].physical_address += lli_table_ptr->block_size;
1026 /* update the block size left in the lli array */
1027 lli_array_ptr[array_counter].block_size = (curr_table_data_size - table_data_size);
1028 } else
1029 /* advance to the next entry in the lli_array */
1030 array_counter++;
1032 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1033 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1035 /* move to the next entry in table */
1036 lli_table_ptr++;
1039 /* set the info entry to default */
1040 lli_table_ptr->physical_address = 0xffffffff;
1041 lli_table_ptr->block_size = 0;
1043 edbg("SEP Driver:lli_table_ptr is %08lx\n", (unsigned long) lli_table_ptr);
1044 edbg("SEP Driver:lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1045 edbg("SEP Driver:lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1047 /* set the output parameter */
1048 *num_processed_entries_ptr += array_counter;
1050 edbg("SEP Driver:*num_processed_entries_ptr is %lu\n", *num_processed_entries_ptr);
1051 dbg("SEP Driver:<-------- sep_build_lli_table end\n");
1052 return;
1056 this function goes over the list of the print created tables and
1057 prints all the data
1059 static void sep_debug_print_lli_tables(struct sep_device *sep, struct sep_lli_entry_t *lli_table_ptr, unsigned long num_table_entries, unsigned long table_data_size)
1061 unsigned long table_count;
1062 unsigned long entries_count;
1064 dbg("SEP Driver:--------> sep_debug_print_lli_tables start\n");
1066 table_count = 1;
1067 while ((unsigned long) lli_table_ptr != 0xffffffff) {
1068 edbg("SEP Driver: lli table %08lx, table_data_size is %lu\n", table_count, table_data_size);
1069 edbg("SEP Driver: num_table_entries is %lu\n", num_table_entries);
1071 /* print entries of the table (without info entry) */
1072 for (entries_count = 0; entries_count < num_table_entries; entries_count++, lli_table_ptr++) {
1073 edbg("SEP Driver:lli_table_ptr address is %08lx\n", (unsigned long) lli_table_ptr);
1074 edbg("SEP Driver:phys address is %08lx block size is %lu\n", lli_table_ptr->physical_address, lli_table_ptr->block_size);
1077 /* point to the info entry */
1078 lli_table_ptr--;
1080 edbg("SEP Driver:phys lli_table_ptr->block_size is %lu\n", lli_table_ptr->block_size);
1081 edbg("SEP Driver:phys lli_table_ptr->physical_address is %08lx\n", lli_table_ptr->physical_address);
1084 table_data_size = lli_table_ptr->block_size & 0xffffff;
1085 num_table_entries = (lli_table_ptr->block_size >> 24) & 0xff;
1086 lli_table_ptr = (struct sep_lli_entry_t *)
1087 (lli_table_ptr->physical_address);
1089 edbg("SEP Driver:phys table_data_size is %lu num_table_entries is %lu lli_table_ptr is%lu\n", table_data_size, num_table_entries, (unsigned long) lli_table_ptr);
1091 if ((unsigned long) lli_table_ptr != 0xffffffff)
1092 lli_table_ptr = (struct sep_lli_entry_t *) sep_shared_bus_to_virt(sep, (unsigned long) lli_table_ptr);
1094 table_count++;
1096 dbg("SEP Driver:<-------- sep_debug_print_lli_tables end\n");
1101 This function prepares only input DMA table for synhronic symmetric
1102 operations (HASH)
1104 static int sep_prepare_input_dma_table(struct sep_device *sep,
1105 unsigned long app_virt_addr,
1106 unsigned long data_size,
1107 unsigned long block_size,
1108 unsigned long *lli_table_ptr,
1109 unsigned long *num_entries_ptr,
1110 unsigned long *table_data_size_ptr,
1111 bool isKernelVirtualAddress)
1113 /* pointer to the info entry of the table - the last entry */
1114 struct sep_lli_entry_t *info_entry_ptr;
1115 /* array of pointers ot page */
1116 struct sep_lli_entry_t *lli_array_ptr;
1117 /* points to the first entry to be processed in the lli_in_array */
1118 unsigned long current_entry;
1119 /* num entries in the virtual buffer */
1120 unsigned long sep_lli_entries;
1121 /* lli table pointer */
1122 struct sep_lli_entry_t *in_lli_table_ptr;
1123 /* the total data in one table */
1124 unsigned long table_data_size;
1125 /* number of entries in lli table */
1126 unsigned long num_entries_in_table;
1127 /* next table address */
1128 void *lli_table_alloc_addr;
1129 unsigned long result;
1131 dbg("SEP Driver:--------> sep_prepare_input_dma_table start\n");
1133 edbg("SEP Driver:data_size is %lu\n", data_size);
1134 edbg("SEP Driver:block_size is %lu\n", block_size);
1136 /* initialize the pages pointers */
1137 sep->in_page_array = 0;
1138 sep->in_num_pages = 0;
1140 if (data_size == 0) {
1141 /* special case - created 2 entries table with zero data */
1142 in_lli_table_ptr = (struct sep_lli_entry_t *) (sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES);
1143 /* FIXME: Should the entry below not be for _bus */
1144 in_lli_table_ptr->physical_address = (unsigned long)sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1145 in_lli_table_ptr->block_size = 0;
1147 in_lli_table_ptr++;
1148 in_lli_table_ptr->physical_address = 0xFFFFFFFF;
1149 in_lli_table_ptr->block_size = 0;
1151 *lli_table_ptr = sep->shared_bus + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1152 *num_entries_ptr = 2;
1153 *table_data_size_ptr = 0;
1155 goto end_function;
1158 /* check if the pages are in Kernel Virtual Address layout */
1159 if (isKernelVirtualAddress == true)
1160 /* lock the pages of the kernel buffer and translate them to pages */
1161 result = sep_lock_kernel_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1162 else
1163 /* lock the pages of the user buffer and translate them to pages */
1164 result = sep_lock_user_pages(sep, app_virt_addr, data_size, &sep->in_num_pages, &lli_array_ptr, &sep->in_page_array);
1166 if (result)
1167 return result;
1169 edbg("SEP Driver:output sep->in_num_pages is %lu\n", sep->in_num_pages);
1171 current_entry = 0;
1172 info_entry_ptr = 0;
1173 sep_lli_entries = sep->in_num_pages;
1175 /* initiate to point after the message area */
1176 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1178 /* loop till all the entries in in array are not processed */
1179 while (current_entry < sep_lli_entries) {
1180 /* set the new input and output tables */
1181 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1183 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1185 /* calculate the maximum size of data for input table */
1186 table_data_size = sep_calculate_lli_table_max_size(&lli_array_ptr[current_entry], (sep_lli_entries - current_entry));
1188 /* now calculate the table size so that it will be module block size */
1189 table_data_size = (table_data_size / block_size) * block_size;
1191 edbg("SEP Driver:output table_data_size is %lu\n", table_data_size);
1193 /* construct input lli table */
1194 sep_build_lli_table(&lli_array_ptr[current_entry], in_lli_table_ptr, &current_entry, &num_entries_in_table, table_data_size);
1196 if (info_entry_ptr == 0) {
1197 /* set the output parameters to physical addresses */
1198 *lli_table_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1199 *num_entries_ptr = num_entries_in_table;
1200 *table_data_size_ptr = table_data_size;
1202 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_ptr);
1203 } else {
1204 /* update the info entry of the previous in table */
1205 info_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1206 info_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1209 /* save the pointer to the info entry of the current tables */
1210 info_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1213 /* print input tables */
1214 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1215 sep_shared_bus_to_virt(sep, *lli_table_ptr), *num_entries_ptr, *table_data_size_ptr);
1217 /* the array of the pages */
1218 kfree(lli_array_ptr);
1219 end_function:
1220 dbg("SEP Driver:<-------- sep_prepare_input_dma_table end\n");
1221 return 0;
1226 This function creates the input and output dma tables for
1227 symmetric operations (AES/DES) according to the block size from LLI arays
1229 static int sep_construct_dma_tables_from_lli(struct sep_device *sep,
1230 struct sep_lli_entry_t *lli_in_array,
1231 unsigned long sep_in_lli_entries,
1232 struct sep_lli_entry_t *lli_out_array,
1233 unsigned long sep_out_lli_entries,
1234 unsigned long block_size, unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr)
1236 /* points to the area where next lli table can be allocated: keep void *
1237 as there is pointer scaling to fix otherwise */
1238 void *lli_table_alloc_addr;
1239 /* input lli table */
1240 struct sep_lli_entry_t *in_lli_table_ptr;
1241 /* output lli table */
1242 struct sep_lli_entry_t *out_lli_table_ptr;
1243 /* pointer to the info entry of the table - the last entry */
1244 struct sep_lli_entry_t *info_in_entry_ptr;
1245 /* pointer to the info entry of the table - the last entry */
1246 struct sep_lli_entry_t *info_out_entry_ptr;
1247 /* points to the first entry to be processed in the lli_in_array */
1248 unsigned long current_in_entry;
1249 /* points to the first entry to be processed in the lli_out_array */
1250 unsigned long current_out_entry;
1251 /* max size of the input table */
1252 unsigned long in_table_data_size;
1253 /* max size of the output table */
1254 unsigned long out_table_data_size;
1255 /* flag te signifies if this is the first tables build from the arrays */
1256 unsigned long first_table_flag;
1257 /* the data size that should be in table */
1258 unsigned long table_data_size;
1259 /* number of etnries in the input table */
1260 unsigned long num_entries_in_table;
1261 /* number of etnries in the output table */
1262 unsigned long num_entries_out_table;
1264 dbg("SEP Driver:--------> sep_construct_dma_tables_from_lli start\n");
1266 /* initiate to pint after the message area */
1267 lli_table_alloc_addr = sep->shared_addr + SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1269 current_in_entry = 0;
1270 current_out_entry = 0;
1271 first_table_flag = 1;
1272 info_in_entry_ptr = 0;
1273 info_out_entry_ptr = 0;
1275 /* loop till all the entries in in array are not processed */
1276 while (current_in_entry < sep_in_lli_entries) {
1277 /* set the new input and output tables */
1278 in_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1280 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1282 /* set the first output tables */
1283 out_lli_table_ptr = (struct sep_lli_entry_t *) lli_table_alloc_addr;
1285 lli_table_alloc_addr += sizeof(struct sep_lli_entry_t) * SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP;
1287 /* calculate the maximum size of data for input table */
1288 in_table_data_size = sep_calculate_lli_table_max_size(&lli_in_array[current_in_entry], (sep_in_lli_entries - current_in_entry));
1290 /* calculate the maximum size of data for output table */
1291 out_table_data_size = sep_calculate_lli_table_max_size(&lli_out_array[current_out_entry], (sep_out_lli_entries - current_out_entry));
1293 edbg("SEP Driver:in_table_data_size is %lu\n", in_table_data_size);
1294 edbg("SEP Driver:out_table_data_size is %lu\n", out_table_data_size);
1296 /* check where the data is smallest */
1297 table_data_size = in_table_data_size;
1298 if (table_data_size > out_table_data_size)
1299 table_data_size = out_table_data_size;
1301 /* now calculate the table size so that it will be module block size */
1302 table_data_size = (table_data_size / block_size) * block_size;
1304 dbg("SEP Driver:table_data_size is %lu\n", table_data_size);
1306 /* construct input lli table */
1307 sep_build_lli_table(&lli_in_array[current_in_entry], in_lli_table_ptr, &current_in_entry, &num_entries_in_table, table_data_size);
1309 /* construct output lli table */
1310 sep_build_lli_table(&lli_out_array[current_out_entry], out_lli_table_ptr, &current_out_entry, &num_entries_out_table, table_data_size);
1312 /* if info entry is null - this is the first table built */
1313 if (info_in_entry_ptr == 0) {
1314 /* set the output parameters to physical addresses */
1315 *lli_table_in_ptr = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1316 *in_num_entries_ptr = num_entries_in_table;
1317 *lli_table_out_ptr = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1318 *out_num_entries_ptr = num_entries_out_table;
1319 *table_data_size_ptr = table_data_size;
1321 edbg("SEP Driver:output lli_table_in_ptr is %08lx\n", *lli_table_in_ptr);
1322 edbg("SEP Driver:output lli_table_out_ptr is %08lx\n", *lli_table_out_ptr);
1323 } else {
1324 /* update the info entry of the previous in table */
1325 info_in_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, in_lli_table_ptr);
1326 info_in_entry_ptr->block_size = ((num_entries_in_table) << 24) | (table_data_size);
1328 /* update the info entry of the previous in table */
1329 info_out_entry_ptr->physical_address = sep_shared_virt_to_bus(sep, out_lli_table_ptr);
1330 info_out_entry_ptr->block_size = ((num_entries_out_table) << 24) | (table_data_size);
1333 /* save the pointer to the info entry of the current tables */
1334 info_in_entry_ptr = in_lli_table_ptr + num_entries_in_table - 1;
1335 info_out_entry_ptr = out_lli_table_ptr + num_entries_out_table - 1;
1337 edbg("SEP Driver:output num_entries_out_table is %lu\n", (unsigned long) num_entries_out_table);
1338 edbg("SEP Driver:output info_in_entry_ptr is %lu\n", (unsigned long) info_in_entry_ptr);
1339 edbg("SEP Driver:output info_out_entry_ptr is %lu\n", (unsigned long) info_out_entry_ptr);
1342 /* print input tables */
1343 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1344 sep_shared_bus_to_virt(sep, *lli_table_in_ptr), *in_num_entries_ptr, *table_data_size_ptr);
1345 /* print output tables */
1346 sep_debug_print_lli_tables(sep, (struct sep_lli_entry_t *)
1347 sep_shared_bus_to_virt(sep, *lli_table_out_ptr), *out_num_entries_ptr, *table_data_size_ptr);
1348 dbg("SEP Driver:<-------- sep_construct_dma_tables_from_lli end\n");
1349 return 0;
1354 This function builds input and output DMA tables for synhronic
1355 symmetric operations (AES, DES). It also checks that each table
1356 is of the modular block size
1358 static int sep_prepare_input_output_dma_table(struct sep_device *sep,
1359 unsigned long app_virt_in_addr,
1360 unsigned long app_virt_out_addr,
1361 unsigned long data_size,
1362 unsigned long block_size,
1363 unsigned long *lli_table_in_ptr, unsigned long *lli_table_out_ptr, unsigned long *in_num_entries_ptr, unsigned long *out_num_entries_ptr, unsigned long *table_data_size_ptr, bool isKernelVirtualAddress)
1365 /* array of pointers of page */
1366 struct sep_lli_entry_t *lli_in_array;
1367 /* array of pointers of page */
1368 struct sep_lli_entry_t *lli_out_array;
1369 int result = 0;
1371 dbg("SEP Driver:--------> sep_prepare_input_output_dma_table start\n");
1373 /* initialize the pages pointers */
1374 sep->in_page_array = 0;
1375 sep->out_page_array = 0;
1377 /* check if the pages are in Kernel Virtual Address layout */
1378 if (isKernelVirtualAddress == true) {
1379 /* lock the pages of the kernel buffer and translate them to pages */
1380 result = sep_lock_kernel_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1381 if (result) {
1382 edbg("SEP Driver: sep_lock_kernel_pages for input virtual buffer failed\n");
1383 goto end_function;
1385 } else {
1386 /* lock the pages of the user buffer and translate them to pages */
1387 result = sep_lock_user_pages(sep, app_virt_in_addr, data_size, &sep->in_num_pages, &lli_in_array, &sep->in_page_array);
1388 if (result) {
1389 edbg("SEP Driver: sep_lock_user_pages for input virtual buffer failed\n");
1390 goto end_function;
1394 if (isKernelVirtualAddress == true) {
1395 result = sep_lock_kernel_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1396 if (result) {
1397 edbg("SEP Driver: sep_lock_kernel_pages for output virtual buffer failed\n");
1398 goto end_function_with_error1;
1400 } else {
1401 result = sep_lock_user_pages(sep, app_virt_out_addr, data_size, &sep->out_num_pages, &lli_out_array, &sep->out_page_array);
1402 if (result) {
1403 edbg("SEP Driver: sep_lock_user_pages for output virtual buffer failed\n");
1404 goto end_function_with_error1;
1407 edbg("sep->in_num_pages is %lu\n", sep->in_num_pages);
1408 edbg("sep->out_num_pages is %lu\n", sep->out_num_pages);
1409 edbg("SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n", SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP);
1412 /* call the fucntion that creates table from the lli arrays */
1413 result = sep_construct_dma_tables_from_lli(sep, lli_in_array, sep->in_num_pages, lli_out_array, sep->out_num_pages, block_size, lli_table_in_ptr, lli_table_out_ptr, in_num_entries_ptr, out_num_entries_ptr, table_data_size_ptr);
1414 if (result) {
1415 edbg("SEP Driver: sep_construct_dma_tables_from_lli failed\n");
1416 goto end_function_with_error2;
1419 /* fall through - free the lli entry arrays */
1420 dbg("in_num_entries_ptr is %08lx\n", *in_num_entries_ptr);
1421 dbg("out_num_entries_ptr is %08lx\n", *out_num_entries_ptr);
1422 dbg("table_data_size_ptr is %08lx\n", *table_data_size_ptr);
1423 end_function_with_error2:
1424 kfree(lli_out_array);
1425 end_function_with_error1:
1426 kfree(lli_in_array);
1427 end_function:
1428 dbg("SEP Driver:<-------- sep_prepare_input_output_dma_table end result = %d\n", (int) result);
1429 return result;
1434 this function handles tha request for creation of the DMA table
1435 for the synchronic symmetric operations (AES,DES)
1437 static int sep_create_sync_dma_tables_handler(struct sep_device *sep,
1438 unsigned long arg)
1440 int error;
1441 /* command arguments */
1442 struct sep_driver_build_sync_table_t command_args;
1444 dbg("SEP Driver:--------> sep_create_sync_dma_tables_handler start\n");
1446 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_sync_table_t));
1447 if (error)
1448 goto end_function;
1450 edbg("app_in_address is %08lx\n", command_args.app_in_address);
1451 edbg("app_out_address is %08lx\n", command_args.app_out_address);
1452 edbg("data_size is %lu\n", command_args.data_in_size);
1453 edbg("block_size is %lu\n", command_args.block_size);
1455 /* check if we need to build only input table or input/output */
1456 if (command_args.app_out_address)
1457 /* prepare input and output tables */
1458 error = sep_prepare_input_output_dma_table(sep,
1459 command_args.app_in_address,
1460 command_args.app_out_address,
1461 command_args.data_in_size,
1462 command_args.block_size,
1463 &command_args.in_table_address,
1464 &command_args.out_table_address, &command_args.in_table_num_entries, &command_args.out_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1465 else
1466 /* prepare input tables */
1467 error = sep_prepare_input_dma_table(sep,
1468 command_args.app_in_address,
1469 command_args.data_in_size, command_args.block_size, &command_args.in_table_address, &command_args.in_table_num_entries, &command_args.table_data_size, command_args.isKernelVirtualAddress);
1471 if (error)
1472 goto end_function;
1473 /* copy to user */
1474 if (copy_to_user((void *) arg, (void *) &command_args, sizeof(struct sep_driver_build_sync_table_t)))
1475 error = -EFAULT;
1476 end_function:
1477 dbg("SEP Driver:<-------- sep_create_sync_dma_tables_handler end\n");
1478 return error;
1482 this function handles the request for freeing dma table for synhronic actions
1484 static int sep_free_dma_table_data_handler(struct sep_device *sep)
1486 dbg("SEP Driver:--------> sep_free_dma_table_data_handler start\n");
1488 /* free input pages array */
1489 sep_free_dma_pages(sep->in_page_array, sep->in_num_pages, 0);
1491 /* free output pages array if needed */
1492 if (sep->out_page_array)
1493 sep_free_dma_pages(sep->out_page_array, sep->out_num_pages, 1);
1495 /* reset all the values */
1496 sep->in_page_array = 0;
1497 sep->out_page_array = 0;
1498 sep->in_num_pages = 0;
1499 sep->out_num_pages = 0;
1500 dbg("SEP Driver:<-------- sep_free_dma_table_data_handler end\n");
1501 return 0;
1505 this function find a space for the new flow dma table
1507 static int sep_find_free_flow_dma_table_space(struct sep_device *sep,
1508 unsigned long **table_address_ptr)
1510 int error = 0;
1511 /* pointer to the id field of the flow dma table */
1512 unsigned long *start_table_ptr;
1513 /* Do not make start_addr unsigned long * unless fixing the offset
1514 computations ! */
1515 void *flow_dma_area_start_addr;
1516 unsigned long *flow_dma_area_end_addr;
1517 /* maximum table size in words */
1518 unsigned long table_size_in_words;
1520 /* find the start address of the flow DMA table area */
1521 flow_dma_area_start_addr = sep->shared_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_OFFSET_IN_BYTES;
1523 /* set end address of the flow table area */
1524 flow_dma_area_end_addr = flow_dma_area_start_addr + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES;
1526 /* set table size in words */
1527 table_size_in_words = SEP_DRIVER_MAX_FLOW_NUM_ENTRIES_IN_TABLE * (sizeof(struct sep_lli_entry_t) / sizeof(long)) + 2;
1529 /* set the pointer to the start address of DMA area */
1530 start_table_ptr = flow_dma_area_start_addr;
1532 /* find the space for the next table */
1533 while (((*start_table_ptr & 0x7FFFFFFF) != 0) && start_table_ptr < flow_dma_area_end_addr)
1534 start_table_ptr += table_size_in_words;
1536 /* check if we reached the end of floa tables area */
1537 if (start_table_ptr >= flow_dma_area_end_addr)
1538 error = -1;
1539 else
1540 *table_address_ptr = start_table_ptr;
1542 return error;
1546 This function creates one DMA table for flow and returns its data,
1547 and pointer to its info entry
1549 static int sep_prepare_one_flow_dma_table(struct sep_device *sep,
1550 unsigned long virt_buff_addr,
1551 unsigned long virt_buff_size,
1552 struct sep_lli_entry_t *table_data,
1553 struct sep_lli_entry_t **info_entry_ptr,
1554 struct sep_flow_context_t *flow_data_ptr,
1555 bool isKernelVirtualAddress)
1557 int error;
1558 /* the range in pages */
1559 unsigned long lli_array_size;
1560 struct sep_lli_entry_t *lli_array;
1561 struct sep_lli_entry_t *flow_dma_table_entry_ptr;
1562 unsigned long *start_dma_table_ptr;
1563 /* total table data counter */
1564 unsigned long dma_table_data_count;
1565 /* pointer that will keep the pointer to the pages of the virtual buffer */
1566 struct page **page_array_ptr;
1567 unsigned long entry_count;
1569 /* find the space for the new table */
1570 error = sep_find_free_flow_dma_table_space(sep, &start_dma_table_ptr);
1571 if (error)
1572 goto end_function;
1574 /* check if the pages are in Kernel Virtual Address layout */
1575 if (isKernelVirtualAddress == true)
1576 /* lock kernel buffer in the memory */
1577 error = sep_lock_kernel_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1578 else
1579 /* lock user buffer in the memory */
1580 error = sep_lock_user_pages(sep, virt_buff_addr, virt_buff_size, &lli_array_size, &lli_array, &page_array_ptr);
1582 if (error)
1583 goto end_function;
1585 /* set the pointer to page array at the beginning of table - this table is
1586 now considered taken */
1587 *start_dma_table_ptr = lli_array_size;
1589 /* point to the place of the pages pointers of the table */
1590 start_dma_table_ptr++;
1592 /* set the pages pointer */
1593 *start_dma_table_ptr = (unsigned long) page_array_ptr;
1595 /* set the pointer to the first entry */
1596 flow_dma_table_entry_ptr = (struct sep_lli_entry_t *) (++start_dma_table_ptr);
1598 /* now create the entries for table */
1599 for (dma_table_data_count = entry_count = 0; entry_count < lli_array_size; entry_count++) {
1600 flow_dma_table_entry_ptr->physical_address = lli_array[entry_count].physical_address;
1602 flow_dma_table_entry_ptr->block_size = lli_array[entry_count].block_size;
1604 /* set the total data of a table */
1605 dma_table_data_count += lli_array[entry_count].block_size;
1607 flow_dma_table_entry_ptr++;
1610 /* set the physical address */
1611 table_data->physical_address = virt_to_phys(start_dma_table_ptr);
1613 /* set the num_entries and total data size */
1614 table_data->block_size = ((lli_array_size + 1) << SEP_NUM_ENTRIES_OFFSET_IN_BITS) | (dma_table_data_count);
1616 /* set the info entry */
1617 flow_dma_table_entry_ptr->physical_address = 0xffffffff;
1618 flow_dma_table_entry_ptr->block_size = 0;
1620 /* set the pointer to info entry */
1621 *info_entry_ptr = flow_dma_table_entry_ptr;
1623 /* the array of the lli entries */
1624 kfree(lli_array);
1625 end_function:
1626 return error;
1632 This function creates a list of tables for flow and returns the data for
1633 the first and last tables of the list
1635 static int sep_prepare_flow_dma_tables(struct sep_device *sep,
1636 unsigned long num_virtual_buffers,
1637 unsigned long first_buff_addr, struct sep_flow_context_t *flow_data_ptr, struct sep_lli_entry_t *first_table_data_ptr, struct sep_lli_entry_t *last_table_data_ptr, bool isKernelVirtualAddress)
1639 int error;
1640 unsigned long virt_buff_addr;
1641 unsigned long virt_buff_size;
1642 struct sep_lli_entry_t table_data;
1643 struct sep_lli_entry_t *info_entry_ptr;
1644 struct sep_lli_entry_t *prev_info_entry_ptr;
1645 unsigned long i;
1647 /* init vars */
1648 error = 0;
1649 prev_info_entry_ptr = 0;
1651 /* init the first table to default */
1652 table_data.physical_address = 0xffffffff;
1653 first_table_data_ptr->physical_address = 0xffffffff;
1654 table_data.block_size = 0;
1656 for (i = 0; i < num_virtual_buffers; i++) {
1657 /* get the virtual buffer address */
1658 error = get_user(virt_buff_addr, &first_buff_addr);
1659 if (error)
1660 goto end_function;
1662 /* get the virtual buffer size */
1663 first_buff_addr++;
1664 error = get_user(virt_buff_size, &first_buff_addr);
1665 if (error)
1666 goto end_function;
1668 /* advance the address to point to the next pair of address|size */
1669 first_buff_addr++;
1671 /* now prepare the one flow LLI table from the data */
1672 error = sep_prepare_one_flow_dma_table(sep, virt_buff_addr, virt_buff_size, &table_data, &info_entry_ptr, flow_data_ptr, isKernelVirtualAddress);
1673 if (error)
1674 goto end_function;
1676 if (i == 0) {
1677 /* if this is the first table - save it to return to the user
1678 application */
1679 *first_table_data_ptr = table_data;
1681 /* set the pointer to info entry */
1682 prev_info_entry_ptr = info_entry_ptr;
1683 } else {
1684 /* not first table - the previous table info entry should
1685 be updated */
1686 prev_info_entry_ptr->block_size = (0x1 << SEP_INT_FLAG_OFFSET_IN_BITS) | (table_data.block_size);
1688 /* set the pointer to info entry */
1689 prev_info_entry_ptr = info_entry_ptr;
1693 /* set the last table data */
1694 *last_table_data_ptr = table_data;
1695 end_function:
1696 return error;
1700 this function goes over all the flow tables connected to the given
1701 table and deallocate them
1703 static void sep_deallocated_flow_tables(struct sep_lli_entry_t *first_table_ptr)
1705 /* id pointer */
1706 unsigned long *table_ptr;
1707 /* end address of the flow dma area */
1708 unsigned long num_entries;
1709 unsigned long num_pages;
1710 struct page **pages_ptr;
1711 /* maximum table size in words */
1712 struct sep_lli_entry_t *info_entry_ptr;
1714 /* set the pointer to the first table */
1715 table_ptr = (unsigned long *) first_table_ptr->physical_address;
1717 /* set the num of entries */
1718 num_entries = (first_table_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS)
1719 & SEP_NUM_ENTRIES_MASK;
1721 /* go over all the connected tables */
1722 while (*table_ptr != 0xffffffff) {
1723 /* get number of pages */
1724 num_pages = *(table_ptr - 2);
1726 /* get the pointer to the pages */
1727 pages_ptr = (struct page **) (*(table_ptr - 1));
1729 /* free the pages */
1730 sep_free_dma_pages(pages_ptr, num_pages, 1);
1732 /* goto to the info entry */
1733 info_entry_ptr = ((struct sep_lli_entry_t *) table_ptr) + (num_entries - 1);
1735 table_ptr = (unsigned long *) info_entry_ptr->physical_address;
1736 num_entries = (info_entry_ptr->block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1739 return;
1743 * sep_find_flow_context - find a flow
1744 * @sep: the SEP we are working with
1745 * @flow_id: flow identifier
1747 * Returns a pointer the matching flow, or NULL if the flow does not
1748 * exist.
1751 static struct sep_flow_context_t *sep_find_flow_context(struct sep_device *sep,
1752 unsigned long flow_id)
1754 int count;
1756 * always search for flow with id default first - in case we
1757 * already started working on the flow there can be no situation
1758 * when 2 flows are with default flag
1760 for (count = 0; count < SEP_DRIVER_NUM_FLOWS; count++) {
1761 if (sep->flows[count].flow_id == flow_id)
1762 return &sep->flows[count];
1764 return NULL;
1769 this function handles the request to create the DMA tables for flow
1771 static int sep_create_flow_dma_tables_handler(struct sep_device *sep,
1772 unsigned long arg)
1774 int error;
1775 struct sep_driver_build_flow_table_t command_args;
1776 /* first table - output */
1777 struct sep_lli_entry_t first_table_data;
1778 /* dma table data */
1779 struct sep_lli_entry_t last_table_data;
1780 /* pointer to the info entry of the previuos DMA table */
1781 struct sep_lli_entry_t *prev_info_entry_ptr;
1782 /* pointer to the flow data strucutre */
1783 struct sep_flow_context_t *flow_context_ptr;
1785 dbg("SEP Driver:--------> sep_create_flow_dma_tables_handler start\n");
1787 /* init variables */
1788 prev_info_entry_ptr = 0;
1789 first_table_data.physical_address = 0xffffffff;
1791 /* find the free structure for flow data */
1792 error = -EINVAL;
1793 flow_context_ptr = sep_find_flow_context(sep, SEP_FREE_FLOW_ID);
1794 if (flow_context_ptr == NULL)
1795 goto end_function;
1797 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_build_flow_table_t));
1798 if (error)
1799 goto end_function;
1801 /* create flow tables */
1802 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1803 if (error)
1804 goto end_function_with_error;
1806 /* check if flow is static */
1807 if (!command_args.flow_type)
1808 /* point the info entry of the last to the info entry of the first */
1809 last_table_data = first_table_data;
1811 /* set output params */
1812 command_args.first_table_addr = first_table_data.physical_address;
1813 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1814 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1816 /* send the parameters to user application */
1817 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_build_flow_table_t));
1818 if (error)
1819 goto end_function_with_error;
1821 /* all the flow created - update the flow entry with temp id */
1822 flow_context_ptr->flow_id = SEP_TEMP_FLOW_ID;
1824 /* set the processing tables data in the context */
1825 if (command_args.input_output_flag == SEP_DRIVER_IN_FLAG)
1826 flow_context_ptr->input_tables_in_process = first_table_data;
1827 else
1828 flow_context_ptr->output_tables_in_process = first_table_data;
1830 goto end_function;
1832 end_function_with_error:
1833 /* free the allocated tables */
1834 sep_deallocated_flow_tables(&first_table_data);
1835 end_function:
1836 dbg("SEP Driver:<-------- sep_create_flow_dma_tables_handler end\n");
1837 return error;
1841 this function handles add tables to flow
1843 static int sep_add_flow_tables_handler(struct sep_device *sep, unsigned long arg)
1845 int error;
1846 unsigned long num_entries;
1847 struct sep_driver_add_flow_table_t command_args;
1848 struct sep_flow_context_t *flow_context_ptr;
1849 /* first dma table data */
1850 struct sep_lli_entry_t first_table_data;
1851 /* last dma table data */
1852 struct sep_lli_entry_t last_table_data;
1853 /* pointer to the info entry of the current DMA table */
1854 struct sep_lli_entry_t *info_entry_ptr;
1856 dbg("SEP Driver:--------> sep_add_flow_tables_handler start\n");
1858 /* get input parameters */
1859 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_flow_table_t));
1860 if (error)
1861 goto end_function;
1863 /* find the flow structure for the flow id */
1864 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1865 if (flow_context_ptr == NULL)
1866 goto end_function;
1868 /* prepare the flow dma tables */
1869 error = sep_prepare_flow_dma_tables(sep, command_args.num_virtual_buffers, command_args.virt_buff_data_addr, flow_context_ptr, &first_table_data, &last_table_data, command_args.isKernelVirtualAddress);
1870 if (error)
1871 goto end_function_with_error;
1873 /* now check if there is already an existing add table for this flow */
1874 if (command_args.inputOutputFlag == SEP_DRIVER_IN_FLAG) {
1875 /* this buffer was for input buffers */
1876 if (flow_context_ptr->input_tables_flag) {
1877 /* add table already exists - add the new tables to the end
1878 of the previous */
1879 num_entries = (flow_context_ptr->last_input_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1881 info_entry_ptr = (struct sep_lli_entry_t *)
1882 (flow_context_ptr->last_input_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1884 /* connect to list of tables */
1885 *info_entry_ptr = first_table_data;
1887 /* set the first table data */
1888 first_table_data = flow_context_ptr->first_input_table;
1889 } else {
1890 /* set the input flag */
1891 flow_context_ptr->input_tables_flag = 1;
1893 /* set the first table data */
1894 flow_context_ptr->first_input_table = first_table_data;
1896 /* set the last table data */
1897 flow_context_ptr->last_input_table = last_table_data;
1898 } else { /* this is output tables */
1900 /* this buffer was for input buffers */
1901 if (flow_context_ptr->output_tables_flag) {
1902 /* add table already exists - add the new tables to
1903 the end of the previous */
1904 num_entries = (flow_context_ptr->last_output_table.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK;
1906 info_entry_ptr = (struct sep_lli_entry_t *)
1907 (flow_context_ptr->last_output_table.physical_address + (sizeof(struct sep_lli_entry_t) * (num_entries - 1)));
1909 /* connect to list of tables */
1910 *info_entry_ptr = first_table_data;
1912 /* set the first table data */
1913 first_table_data = flow_context_ptr->first_output_table;
1914 } else {
1915 /* set the input flag */
1916 flow_context_ptr->output_tables_flag = 1;
1918 /* set the first table data */
1919 flow_context_ptr->first_output_table = first_table_data;
1921 /* set the last table data */
1922 flow_context_ptr->last_output_table = last_table_data;
1925 /* set output params */
1926 command_args.first_table_addr = first_table_data.physical_address;
1927 command_args.first_table_num_entries = ((first_table_data.block_size >> SEP_NUM_ENTRIES_OFFSET_IN_BITS) & SEP_NUM_ENTRIES_MASK);
1928 command_args.first_table_data_size = (first_table_data.block_size & SEP_TABLE_DATA_SIZE_MASK);
1930 /* send the parameters to user application */
1931 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_add_flow_table_t));
1932 end_function_with_error:
1933 /* free the allocated tables */
1934 sep_deallocated_flow_tables(&first_table_data);
1935 end_function:
1936 dbg("SEP Driver:<-------- sep_add_flow_tables_handler end\n");
1937 return error;
1941 this function add the flow add message to the specific flow
1943 static int sep_add_flow_tables_message_handler(struct sep_device *sep, unsigned long arg)
1945 int error;
1946 struct sep_driver_add_message_t command_args;
1947 struct sep_flow_context_t *flow_context_ptr;
1949 dbg("SEP Driver:--------> sep_add_flow_tables_message_handler start\n");
1951 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_add_message_t));
1952 if (error)
1953 goto end_function;
1955 /* check input */
1956 if (command_args.message_size_in_bytes > SEP_MAX_ADD_MESSAGE_LENGTH_IN_BYTES) {
1957 error = -ENOMEM;
1958 goto end_function;
1961 /* find the flow context */
1962 flow_context_ptr = sep_find_flow_context(sep, command_args.flow_id);
1963 if (flow_context_ptr == NULL)
1964 goto end_function;
1966 /* copy the message into context */
1967 flow_context_ptr->message_size_in_bytes = command_args.message_size_in_bytes;
1968 error = copy_from_user(flow_context_ptr->message, (void *) command_args.message_address, command_args.message_size_in_bytes);
1969 end_function:
1970 dbg("SEP Driver:<-------- sep_add_flow_tables_message_handler end\n");
1971 return error;
1976 this function returns the bus and virtual addresses of the static pool
1978 static int sep_get_static_pool_addr_handler(struct sep_device *sep, unsigned long arg)
1980 int error;
1981 struct sep_driver_static_pool_addr_t command_args;
1983 dbg("SEP Driver:--------> sep_get_static_pool_addr_handler start\n");
1985 /*prepare the output parameters in the struct */
1986 command_args.physical_static_address = sep->shared_bus + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1987 command_args.virtual_static_address = (unsigned long)sep->shared_addr + SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES;
1989 edbg("SEP Driver:bus_static_address is %08lx, virtual_static_address %08lx\n", command_args.physical_static_address, command_args.virtual_static_address);
1991 /* send the parameters to user application */
1992 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_static_pool_addr_t));
1993 dbg("SEP Driver:<-------- sep_get_static_pool_addr_handler end\n");
1994 return error;
1998 this address gets the offset of the physical address from the start
1999 of the mapped area
2001 static int sep_get_physical_mapped_offset_handler(struct sep_device *sep, unsigned long arg)
2003 int error;
2004 struct sep_driver_get_mapped_offset_t command_args;
2006 dbg("SEP Driver:--------> sep_get_physical_mapped_offset_handler start\n");
2008 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_get_mapped_offset_t));
2009 if (error)
2010 goto end_function;
2012 if (command_args.physical_address < sep->shared_bus) {
2013 error = -EINVAL;
2014 goto end_function;
2017 /*prepare the output parameters in the struct */
2018 command_args.offset = command_args.physical_address - sep->shared_bus;
2020 edbg("SEP Driver:bus_address is %08lx, offset is %lu\n", command_args.physical_address, command_args.offset);
2022 /* send the parameters to user application */
2023 error = copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_get_mapped_offset_t));
2024 end_function:
2025 dbg("SEP Driver:<-------- sep_get_physical_mapped_offset_handler end\n");
2026 return error;
2033 static int sep_start_handler(struct sep_device *sep)
2035 unsigned long reg_val;
2036 unsigned long error = 0;
2038 dbg("SEP Driver:--------> sep_start_handler start\n");
2040 /* wait in polling for message from SEP */
2042 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2043 while (!reg_val);
2045 /* check the value */
2046 if (reg_val == 0x1)
2047 /* fatal error - read error status from GPRO */
2048 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2049 dbg("SEP Driver:<-------- sep_start_handler end\n");
2050 return error;
2054 this function handles the request for SEP initialization
2056 static int sep_init_handler(struct sep_device *sep, unsigned long arg)
2058 unsigned long message_word;
2059 unsigned long *message_ptr;
2060 struct sep_driver_init_t command_args;
2061 unsigned long counter;
2062 unsigned long error;
2063 unsigned long reg_val;
2065 dbg("SEP Driver:--------> sep_init_handler start\n");
2066 error = 0;
2068 error = copy_from_user(&command_args, (void *) arg, sizeof(struct sep_driver_init_t));
2070 dbg("SEP Driver:--------> sep_init_handler - finished copy_from_user \n");
2072 if (error)
2073 goto end_function;
2075 /* PATCH - configure the DMA to single -burst instead of multi-burst */
2076 /*sep_configure_dma_burst(); */
2078 dbg("SEP Driver:--------> sep_init_handler - finished sep_configure_dma_burst \n");
2080 message_ptr = (unsigned long *) command_args.message_addr;
2082 /* set the base address of the SRAM */
2083 sep_write_reg(sep, HW_SRAM_ADDR_REG_ADDR, HW_CC_SRAM_BASE_ADDRESS);
2085 for (counter = 0; counter < command_args.message_size_in_words; counter++, message_ptr++) {
2086 get_user(message_word, message_ptr);
2087 /* write data to SRAM */
2088 sep_write_reg(sep, HW_SRAM_DATA_REG_ADDR, message_word);
2089 edbg("SEP Driver:message_word is %lu\n", message_word);
2090 /* wait for write complete */
2091 sep_wait_sram_write(sep);
2093 dbg("SEP Driver:--------> sep_init_handler - finished getting messages from user space\n");
2094 /* signal SEP */
2095 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x1);
2098 reg_val = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR3_REG_ADDR);
2099 while (!(reg_val & 0xFFFFFFFD));
2101 dbg("SEP Driver:--------> sep_init_handler - finished waiting for reg_val & 0xFFFFFFFD \n");
2103 /* check the value */
2104 if (reg_val == 0x1) {
2105 edbg("SEP Driver:init failed\n");
2107 error = sep_read_reg(sep, 0x8060);
2108 edbg("SEP Driver:sw monitor is %lu\n", error);
2110 /* fatal error - read erro status from GPRO */
2111 error = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR0_REG_ADDR);
2112 edbg("SEP Driver:error is %lu\n", error);
2114 end_function:
2115 dbg("SEP Driver:<-------- sep_init_handler end\n");
2116 return error;
2121 this function handles the request cache and resident reallocation
2123 static int sep_realloc_cache_resident_handler(struct sep_device *sep,
2124 unsigned long arg)
2126 struct sep_driver_realloc_cache_resident_t command_args;
2127 int error;
2129 /* copy cache and resident to the their intended locations */
2130 error = sep_load_firmware(sep);
2131 if (error)
2132 return error;
2134 command_args.new_base_addr = sep->shared_bus;
2136 /* find the new base address according to the lowest address between
2137 cache, resident and shared area */
2138 if (sep->resident_bus < command_args.new_base_addr)
2139 command_args.new_base_addr = sep->resident_bus;
2140 if (sep->rar_bus < command_args.new_base_addr)
2141 command_args.new_base_addr = sep->rar_bus;
2143 /* set the return parameters */
2144 command_args.new_cache_addr = sep->rar_bus;
2145 command_args.new_resident_addr = sep->resident_bus;
2147 /* set the new shared area */
2148 command_args.new_shared_area_addr = sep->shared_bus;
2150 edbg("SEP Driver:command_args.new_shared_addr is %08llx\n", command_args.new_shared_area_addr);
2151 edbg("SEP Driver:command_args.new_base_addr is %08llx\n", command_args.new_base_addr);
2152 edbg("SEP Driver:command_args.new_resident_addr is %08llx\n", command_args.new_resident_addr);
2153 edbg("SEP Driver:command_args.new_rar_addr is %08llx\n", command_args.new_cache_addr);
2155 /* return to user */
2156 if (copy_to_user((void *) arg, &command_args, sizeof(struct sep_driver_realloc_cache_resident_t)))
2157 return -EFAULT;
2158 return 0;
2162 * sep_get_time_handler - time request from user space
2163 * @sep: sep we are to set the time for
2164 * @arg: pointer to user space arg buffer
2166 * This function reports back the time and the address in the SEP
2167 * shared buffer at which it has been placed. (Do we really need this!!!)
2170 static int sep_get_time_handler(struct sep_device *sep, unsigned long arg)
2172 struct sep_driver_get_time_t command_args;
2174 mutex_lock(&sep_mutex);
2175 command_args.time_value = sep_set_time(sep);
2176 command_args.time_physical_address = (unsigned long)sep_time_address(sep);
2177 mutex_unlock(&sep_mutex);
2178 if (copy_to_user((void __user *)arg,
2179 &command_args, sizeof(struct sep_driver_get_time_t)))
2180 return -EFAULT;
2181 return 0;
2186 This API handles the end transaction request
2188 static int sep_end_transaction_handler(struct sep_device *sep, unsigned long arg)
2190 dbg("SEP Driver:--------> sep_end_transaction_handler start\n");
2192 #if 0 /*!SEP_DRIVER_POLLING_MODE */
2193 /* close IMR */
2194 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, 0x7FFF);
2196 /* release IRQ line */
2197 free_irq(SEP_DIRVER_IRQ_NUM, sep);
2199 /* lock the sep mutex */
2200 mutex_unlock(&sep_mutex);
2201 #endif
2203 dbg("SEP Driver:<-------- sep_end_transaction_handler end\n");
2205 return 0;
2210 * sep_set_flow_id_handler - handle flow setting
2211 * @sep: the SEP we are configuring
2212 * @flow_id: the flow we are setting
2214 * This function handler the set flow id command
2216 static int sep_set_flow_id_handler(struct sep_device *sep,
2217 unsigned long flow_id)
2219 int error = 0;
2220 struct sep_flow_context_t *flow_data_ptr;
2222 /* find the flow data structure that was just used for creating new flow
2223 - its id should be default */
2225 mutex_lock(&sep_mutex);
2226 flow_data_ptr = sep_find_flow_context(sep, SEP_TEMP_FLOW_ID);
2227 if (flow_data_ptr)
2228 flow_data_ptr->flow_id = flow_id; /* set flow id */
2229 else
2230 error = -EINVAL;
2231 mutex_unlock(&sep_mutex);
2232 return error;
2235 static int sep_ioctl(struct inode *inode, struct file *filp, unsigned int cmd, unsigned long arg)
2237 int error = 0;
2238 struct sep_device *sep = filp->private_data;
2240 dbg("------------>SEP Driver: ioctl start\n");
2242 edbg("SEP Driver: cmd is %x\n", cmd);
2244 switch (cmd) {
2245 case SEP_IOCSENDSEPCOMMAND:
2246 /* send command to SEP */
2247 sep_send_command_handler(sep);
2248 edbg("SEP Driver: after sep_send_command_handler\n");
2249 break;
2250 case SEP_IOCSENDSEPRPLYCOMMAND:
2251 /* send reply command to SEP */
2252 sep_send_reply_command_handler(sep);
2253 break;
2254 case SEP_IOCALLOCDATAPOLL:
2255 /* allocate data pool */
2256 error = sep_allocate_data_pool_memory_handler(sep, arg);
2257 break;
2258 case SEP_IOCWRITEDATAPOLL:
2259 /* write data into memory pool */
2260 error = sep_write_into_data_pool_handler(sep, arg);
2261 break;
2262 case SEP_IOCREADDATAPOLL:
2263 /* read data from data pool into application memory */
2264 error = sep_read_from_data_pool_handler(sep, arg);
2265 break;
2266 case SEP_IOCCREATESYMDMATABLE:
2267 /* create dma table for synhronic operation */
2268 error = sep_create_sync_dma_tables_handler(sep, arg);
2269 break;
2270 case SEP_IOCCREATEFLOWDMATABLE:
2271 /* create flow dma tables */
2272 error = sep_create_flow_dma_tables_handler(sep, arg);
2273 break;
2274 case SEP_IOCFREEDMATABLEDATA:
2275 /* free the pages */
2276 error = sep_free_dma_table_data_handler(sep);
2277 break;
2278 case SEP_IOCSETFLOWID:
2279 /* set flow id */
2280 error = sep_set_flow_id_handler(sep, (unsigned long)arg);
2281 break;
2282 case SEP_IOCADDFLOWTABLE:
2283 /* add tables to the dynamic flow */
2284 error = sep_add_flow_tables_handler(sep, arg);
2285 break;
2286 case SEP_IOCADDFLOWMESSAGE:
2287 /* add message of add tables to flow */
2288 error = sep_add_flow_tables_message_handler(sep, arg);
2289 break;
2290 case SEP_IOCSEPSTART:
2291 /* start command to sep */
2292 error = sep_start_handler(sep);
2293 break;
2294 case SEP_IOCSEPINIT:
2295 /* init command to sep */
2296 error = sep_init_handler(sep, arg);
2297 break;
2298 case SEP_IOCGETSTATICPOOLADDR:
2299 /* get the physical and virtual addresses of the static pool */
2300 error = sep_get_static_pool_addr_handler(sep, arg);
2301 break;
2302 case SEP_IOCENDTRANSACTION:
2303 error = sep_end_transaction_handler(sep, arg);
2304 break;
2305 case SEP_IOCREALLOCCACHERES:
2306 error = sep_realloc_cache_resident_handler(sep, arg);
2307 break;
2308 case SEP_IOCGETMAPPEDADDROFFSET:
2309 error = sep_get_physical_mapped_offset_handler(sep, arg);
2310 break;
2311 case SEP_IOCGETIME:
2312 error = sep_get_time_handler(sep, arg);
2313 break;
2314 default:
2315 error = -ENOTTY;
2316 break;
2318 dbg("SEP Driver:<-------- ioctl end\n");
2319 return error;
2324 #if !SEP_DRIVER_POLLING_MODE
2326 /* handler for flow done interrupt */
2328 static void sep_flow_done_handler(struct work_struct *work)
2330 struct sep_flow_context_t *flow_data_ptr;
2332 /* obtain the mutex */
2333 mutex_lock(&sep_mutex);
2335 /* get the pointer to context */
2336 flow_data_ptr = (struct sep_flow_context_t *) work;
2338 /* free all the current input tables in sep */
2339 sep_deallocated_flow_tables(&flow_data_ptr->input_tables_in_process);
2341 /* free all the current tables output tables in SEP (if needed) */
2342 if (flow_data_ptr->output_tables_in_process.physical_address != 0xffffffff)
2343 sep_deallocated_flow_tables(&flow_data_ptr->output_tables_in_process);
2345 /* check if we have additional tables to be sent to SEP only input
2346 flag may be checked */
2347 if (flow_data_ptr->input_tables_flag) {
2348 /* copy the message to the shared RAM and signal SEP */
2349 memcpy((void *) flow_data_ptr->message, (void *) sep->shared_addr, flow_data_ptr->message_size_in_bytes);
2351 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR2_REG_ADDR, 0x2);
2353 mutex_unlock(&sep_mutex);
2356 interrupt handler function
2358 static irqreturn_t sep_inthandler(int irq, void *dev_id)
2360 irqreturn_t int_error;
2361 unsigned long reg_val;
2362 unsigned long flow_id;
2363 struct sep_flow_context_t *flow_context_ptr;
2364 struct sep_device *sep = dev_id;
2366 int_error = IRQ_HANDLED;
2368 /* read the IRR register to check if this is SEP interrupt */
2369 reg_val = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2370 edbg("SEP Interrupt - reg is %08lx\n", reg_val);
2372 /* check if this is the flow interrupt */
2373 if (0 /*reg_val & (0x1 << 11) */ ) {
2374 /* read GPRO to find out the which flow is done */
2375 flow_id = sep_read_reg(sep, HW_HOST_IRR_REG_ADDR);
2377 /* find the contex of the flow */
2378 flow_context_ptr = sep_find_flow_context(sep, flow_id >> 28);
2379 if (flow_context_ptr == NULL)
2380 goto end_function_with_error;
2382 /* queue the work */
2383 INIT_WORK(&flow_context_ptr->flow_wq, sep_flow_done_handler);
2384 queue_work(sep->flow_wq, &flow_context_ptr->flow_wq);
2386 } else {
2387 /* check if this is reply interrupt from SEP */
2388 if (reg_val & (0x1 << 13)) {
2389 /* update the counter of reply messages */
2390 sep->reply_ct++;
2391 /* wake up the waiting process */
2392 wake_up(&sep_event);
2393 } else {
2394 int_error = IRQ_NONE;
2395 goto end_function;
2398 end_function_with_error:
2399 /* clear the interrupt */
2400 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, reg_val);
2401 end_function:
2402 return int_error;
2405 #endif
2409 #if 0
2411 static void sep_wait_busy(struct sep_device *sep)
2413 u32 reg;
2415 do {
2416 reg = sep_read_reg(sep, HW_HOST_SEP_BUSY_REG_ADDR);
2417 } while (reg);
2421 PATCH for configuring the DMA to single burst instead of multi-burst
2423 static void sep_configure_dma_burst(struct sep_device *sep)
2425 #define HW_AHB_RD_WR_BURSTS_REG_ADDR 0x0E10UL
2427 dbg("SEP Driver:<-------- sep_configure_dma_burst start \n");
2429 /* request access to registers from SEP */
2430 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x2);
2432 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (write reg) \n");
2434 sep_wait_busy(sep);
2436 dbg("SEP Driver:<-------- sep_configure_dma_burst finished request access to registers from SEP (while(revVal) wait loop) \n");
2438 /* set the DMA burst register to single burst */
2439 sep_write_reg(sep, HW_AHB_RD_WR_BURSTS_REG_ADDR, 0x0UL);
2441 /* release the sep busy */
2442 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR0_REG_ADDR, 0x0UL);
2443 sep_wait_busy(sep);
2445 dbg("SEP Driver:<-------- sep_configure_dma_burst done \n");
2449 #endif
2452 Function that is activaed on the succesful probe of the SEP device
2454 static int __devinit sep_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2456 int error = 0;
2457 struct sep_device *sep;
2458 int counter;
2459 int size; /* size of memory for allocation */
2461 edbg("Sep pci probe starting\n");
2462 if (sep_dev != NULL) {
2463 dev_warn(&pdev->dev, "only one SEP supported.\n");
2464 return -EBUSY;
2467 /* enable the device */
2468 error = pci_enable_device(pdev);
2469 if (error) {
2470 edbg("error enabling pci device\n");
2471 goto end_function;
2474 /* set the pci dev pointer */
2475 sep_dev = &sep_instance;
2476 sep = &sep_instance;
2478 edbg("sep->shared_addr = %p\n", sep->shared_addr);
2479 /* transaction counter that coordinates the transactions between SEP
2480 and HOST */
2481 sep->send_ct = 0;
2482 /* counter for the messages from sep */
2483 sep->reply_ct = 0;
2484 /* counter for the number of bytes allocated in the pool
2485 for the current transaction */
2486 sep->data_pool_bytes_allocated = 0;
2488 /* calculate the total size for allocation */
2489 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2490 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2492 /* allocate the shared area */
2493 if (sep_map_and_alloc_shared_area(sep, size)) {
2494 error = -ENOMEM;
2495 /* allocation failed */
2496 goto end_function_error;
2498 /* now set the memory regions */
2499 #if (SEP_DRIVER_RECONFIG_MESSAGE_AREA == 1)
2500 /* Note: this test section will need moving before it could ever
2501 work as the registers are not yet mapped ! */
2502 /* send the new SHARED MESSAGE AREA to the SEP */
2503 sep_write_reg(sep, HW_HOST_HOST_SEP_GPR1_REG_ADDR, sep->shared_bus);
2505 /* poll for SEP response */
2506 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2507 while (retval != 0xffffffff && retval != sep->shared_bus)
2508 retval = sep_read_reg(sep, HW_HOST_SEP_HOST_GPR1_REG_ADDR);
2510 /* check the return value (register) */
2511 if (retval != sep->shared_bus) {
2512 error = -ENOMEM;
2513 goto end_function_deallocate_sep_shared_area;
2515 #endif
2516 /* init the flow contextes */
2517 for (counter = 0; counter < SEP_DRIVER_NUM_FLOWS; counter++)
2518 sep->flows[counter].flow_id = SEP_FREE_FLOW_ID;
2520 sep->flow_wq = create_singlethread_workqueue("sepflowwq");
2521 if (sep->flow_wq == NULL) {
2522 error = -ENOMEM;
2523 edbg("sep_driver:flow queue creation failed\n");
2524 goto end_function_deallocate_sep_shared_area;
2526 edbg("SEP Driver: create flow workqueue \n");
2527 sep->pdev = pci_dev_get(pdev);
2529 sep->reg_addr = pci_ioremap_bar(pdev, 0);
2530 if (!sep->reg_addr) {
2531 edbg("sep: ioremap of registers failed.\n");
2532 goto end_function_deallocate_sep_shared_area;
2534 edbg("SEP Driver:reg_addr is %p\n", sep->reg_addr);
2536 /* load the rom code */
2537 sep_load_rom_code(sep);
2539 /* set up system base address and shared memory location */
2540 sep->rar_addr = dma_alloc_coherent(&sep->pdev->dev,
2541 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2542 &sep->rar_bus, GFP_KERNEL);
2544 if (!sep->rar_addr) {
2545 edbg("SEP Driver:can't allocate rar\n");
2546 goto end_function_uniomap;
2550 edbg("SEP Driver:rar_bus is %08llx\n", (unsigned long long)sep->rar_bus);
2551 edbg("SEP Driver:rar_virtual is %p\n", sep->rar_addr);
2553 #if !SEP_DRIVER_POLLING_MODE
2555 edbg("SEP Driver: about to write IMR and ICR REG_ADDR\n");
2557 /* clear ICR register */
2558 sep_write_reg(sep, HW_HOST_ICR_REG_ADDR, 0xFFFFFFFF);
2560 /* set the IMR register - open only GPR 2 */
2561 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2563 edbg("SEP Driver: about to call request_irq\n");
2564 /* get the interrupt line */
2565 error = request_irq(pdev->irq, sep_inthandler, IRQF_SHARED, "sep_driver", sep);
2566 if (error)
2567 goto end_function_free_res;
2568 return 0;
2569 edbg("SEP Driver: about to write IMR REG_ADDR");
2571 /* set the IMR register - open only GPR 2 */
2572 sep_write_reg(sep, HW_HOST_IMR_REG_ADDR, (~(0x1 << 13)));
2574 end_function_free_res:
2575 dma_free_coherent(&sep->pdev->dev, 2 * SEP_RAR_IO_MEM_REGION_SIZE,
2576 sep->rar_addr, sep->rar_bus);
2577 #endif /* SEP_DRIVER_POLLING_MODE */
2578 end_function_uniomap:
2579 iounmap(sep->reg_addr);
2580 end_function_deallocate_sep_shared_area:
2581 /* de-allocate shared area */
2582 sep_unmap_and_free_shared_area(sep, size);
2583 end_function_error:
2584 sep_dev = NULL;
2585 end_function:
2586 return error;
2589 static struct pci_device_id sep_pci_id_tbl[] = {
2590 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x080c)},
2594 MODULE_DEVICE_TABLE(pci, sep_pci_id_tbl);
2596 /* field for registering driver to PCI device */
2597 static struct pci_driver sep_pci_driver = {
2598 .name = "sep_sec_driver",
2599 .id_table = sep_pci_id_tbl,
2600 .probe = sep_probe
2601 /* FIXME: remove handler */
2604 /* major and minor device numbers */
2605 static dev_t sep_devno;
2607 /* the files operations structure of the driver */
2608 static struct file_operations sep_file_operations = {
2609 .owner = THIS_MODULE,
2610 .ioctl = sep_ioctl,
2611 .poll = sep_poll,
2612 .open = sep_open,
2613 .release = sep_release,
2614 .mmap = sep_mmap,
2618 /* cdev struct of the driver */
2619 static struct cdev sep_cdev;
2622 this function registers the driver to the file system
2624 static int sep_register_driver_to_fs(void)
2626 int ret_val = alloc_chrdev_region(&sep_devno, 0, 1, "sep_sec_driver");
2627 if (ret_val) {
2628 edbg("sep: major number allocation failed, retval is %d\n",
2629 ret_val);
2630 return ret_val;
2632 /* init cdev */
2633 cdev_init(&sep_cdev, &sep_file_operations);
2634 sep_cdev.owner = THIS_MODULE;
2636 /* register the driver with the kernel */
2637 ret_val = cdev_add(&sep_cdev, sep_devno, 1);
2638 if (ret_val) {
2639 edbg("sep_driver:cdev_add failed, retval is %d\n", ret_val);
2640 /* unregister dev numbers */
2641 unregister_chrdev_region(sep_devno, 1);
2643 return ret_val;
2647 /*--------------------------------------------------------------
2648 init function
2649 ----------------------------------------------------------------*/
2650 static int __init sep_init(void)
2652 int ret_val = 0;
2653 dbg("SEP Driver:-------->Init start\n");
2654 /* FIXME: Probe can occur before we are ready to survive a probe */
2655 ret_val = pci_register_driver(&sep_pci_driver);
2656 if (ret_val) {
2657 edbg("sep_driver:sep_driver_to_device failed, ret_val is %d\n", ret_val);
2658 goto end_function_unregister_from_fs;
2660 /* register driver to fs */
2661 ret_val = sep_register_driver_to_fs();
2662 if (ret_val)
2663 goto end_function_unregister_pci;
2664 goto end_function;
2665 end_function_unregister_pci:
2666 pci_unregister_driver(&sep_pci_driver);
2667 end_function_unregister_from_fs:
2668 /* unregister from fs */
2669 cdev_del(&sep_cdev);
2670 /* unregister dev numbers */
2671 unregister_chrdev_region(sep_devno, 1);
2672 end_function:
2673 dbg("SEP Driver:<-------- Init end\n");
2674 return ret_val;
2678 /*-------------------------------------------------------------
2679 exit function
2680 --------------------------------------------------------------*/
2681 static void __exit sep_exit(void)
2683 int size;
2685 dbg("SEP Driver:--------> Exit start\n");
2687 /* unregister from fs */
2688 cdev_del(&sep_cdev);
2689 /* unregister dev numbers */
2690 unregister_chrdev_region(sep_devno, 1);
2691 /* calculate the total size for de-allocation */
2692 size = SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES +
2693 SEP_DRIVER_SYNCHRONIC_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES + SEP_DRIVER_FLOW_DMA_TABLES_AREA_SIZE_IN_BYTES + SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES + SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES;
2694 /* FIXME: We need to do this in the unload for the device */
2695 /* free shared area */
2696 if (sep_dev) {
2697 sep_unmap_and_free_shared_area(sep_dev, size);
2698 edbg("SEP Driver: free pages SEP SHARED AREA \n");
2699 iounmap((void *) sep_dev->reg_addr);
2700 edbg("SEP Driver: iounmap \n");
2702 edbg("SEP Driver: release_mem_region \n");
2703 dbg("SEP Driver:<-------- Exit end\n");
2707 module_init(sep_init);
2708 module_exit(sep_exit);
2710 MODULE_LICENSE("GPL");