3 * sep_driver.c - Security Processor Driver main group of functions
5 * Copyright(c) 2009,2010 Intel Corporation. All rights reserved.
6 * Contributions(c) 2009,2010 Discretix. All rights reserved.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the Free
10 * Software Foundation; version 2 of the License.
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 * Mark Allyn mark.a.allyn@intel.com
24 * Jayant Mangalampalli jayant.mangalampalli@intel.com
28 * 2009.06.26 Initial publish
29 * 2010.09.14 Upgrade to Medfield
32 #include <linux/init.h>
33 #include <linux/module.h>
34 #include <linux/miscdevice.h>
36 #include <linux/cdev.h>
37 #include <linux/kdev_t.h>
38 #include <linux/mutex.h>
39 #include <linux/sched.h>
41 #include <linux/poll.h>
42 #include <linux/wait.h>
43 #include <linux/pci.h>
44 #include <linux/firmware.h>
45 #include <linux/slab.h>
46 #include <linux/ioctl.h>
47 #include <asm/current.h>
48 #include <linux/ioport.h>
50 #include <linux/interrupt.h>
51 #include <linux/pagemap.h>
52 #include <asm/cacheflush.h>
53 #include <linux/sched.h>
54 #include <linux/delay.h>
55 #include <linux/jiffies.h>
56 #include <linux/rar_register.h>
58 #include "sep_driver_hw_defs.h"
59 #include "sep_driver_config.h"
60 #include "sep_driver_api.h"
63 /*----------------------------------------
65 -----------------------------------------*/
67 #define SEP_RAR_IO_MEM_REGION_SIZE 0x40000
69 /*--------------------------------------------
71 --------------------------------------------*/
73 /* Keep this a single static object for now to keep the conversion easy */
75 static struct sep_device
*sep_dev
;
78 * sep_dump_message - dump the message that is pending
81 static void sep_dump_message(struct sep_device
*sep
)
84 u32
*p
= sep
->shared_addr
;
85 for (count
= 0; count
< 12 * 4; count
+= 4)
86 dev_dbg(&sep
->pdev
->dev
, "Word %d of the message is %x\n",
91 * sep_map_and_alloc_shared_area - allocate shared block
92 * @sep: security processor
93 * @size: size of shared area
95 static int sep_map_and_alloc_shared_area(struct sep_device
*sep
)
97 sep
->shared_addr
= dma_alloc_coherent(&sep
->pdev
->dev
,
99 &sep
->shared_bus
, GFP_KERNEL
);
101 if (!sep
->shared_addr
) {
102 dev_warn(&sep
->pdev
->dev
,
103 "shared memory dma_alloc_coherent failed\n");
106 dev_dbg(&sep
->pdev
->dev
,
107 "shared_addr %zx bytes @%p (bus %llx)\n",
108 sep
->shared_size
, sep
->shared_addr
,
109 (unsigned long long)sep
->shared_bus
);
114 * sep_unmap_and_free_shared_area - free shared block
115 * @sep: security processor
117 static void sep_unmap_and_free_shared_area(struct sep_device
*sep
)
119 dma_free_coherent(&sep
->pdev
->dev
, sep
->shared_size
,
120 sep
->shared_addr
, sep
->shared_bus
);
124 * sep_shared_bus_to_virt - convert bus/virt addresses
125 * @sep: pointer to struct sep_device
126 * @bus_address: address to convert
128 * Returns virtual address inside the shared area according
129 * to the bus address.
131 static void *sep_shared_bus_to_virt(struct sep_device
*sep
,
132 dma_addr_t bus_address
)
134 return sep
->shared_addr
+ (bus_address
- sep
->shared_bus
);
138 * open function for the singleton driver
139 * @inode_ptr struct inode *
140 * @file_ptr struct file *
142 * Called when the user opens the singleton device interface
144 static int sep_singleton_open(struct inode
*inode_ptr
, struct file
*file_ptr
)
146 struct sep_device
*sep
;
149 * Get the SEP device structure and use it for the
150 * private_data field in filp for other methods
154 file_ptr
->private_data
= sep
;
156 if (test_and_set_bit(0, &sep
->singleton_access_flag
))
162 * sep_open - device open method
163 * @inode: inode of SEP device
164 * @filp: file handle to SEP device
166 * Open method for the SEP device. Called when userspace opens
167 * the SEP device node.
169 * Returns zero on success otherwise an error code.
171 static int sep_open(struct inode
*inode
, struct file
*filp
)
173 struct sep_device
*sep
;
176 * Get the SEP device structure and use it for the
177 * private_data field in filp for other methods
180 filp
->private_data
= sep
;
182 /* Anyone can open; locking takes place at transaction level */
187 * sep_singleton_release - close a SEP singleton device
188 * @inode: inode of SEP device
189 * @filp: file handle being closed
191 * Called on the final close of a SEP device. As the open protects against
192 * multiple simultaenous opens that means this method is called when the
193 * final reference to the open handle is dropped.
195 static int sep_singleton_release(struct inode
*inode
, struct file
*filp
)
197 struct sep_device
*sep
= filp
->private_data
;
199 clear_bit(0, &sep
->singleton_access_flag
);
204 * sep_request_daemonopen - request daemon open method
205 * @inode: inode of SEP device
206 * @filp: file handle to SEP device
208 * Open method for the SEP request daemon. Called when
209 * request daemon in userspace opens the SEP device node.
211 * Returns zero on success otherwise an error code.
213 static int sep_request_daemon_open(struct inode
*inode
, struct file
*filp
)
215 struct sep_device
*sep
= sep_dev
;
218 filp
->private_data
= sep
;
220 /* There is supposed to be only one request daemon */
221 if (test_and_set_bit(0, &sep
->request_daemon_open
))
227 * sep_request_daemon_release - close a SEP daemon
228 * @inode: inode of SEP device
229 * @filp: file handle being closed
231 * Called on the final close of a SEP daemon.
233 static int sep_request_daemon_release(struct inode
*inode
, struct file
*filp
)
235 struct sep_device
*sep
= filp
->private_data
;
237 dev_dbg(&sep
->pdev
->dev
, "Request daemon release for pid %d\n",
240 /* Clear the request_daemon_open flag */
241 clear_bit(0, &sep
->request_daemon_open
);
246 * sep_req_daemon_send_reply_command_handler - poke the SEP
247 * @sep: struct sep_device *
249 * This function raises interrupt to SEPm that signals that is has a
250 * new command from HOST
252 static int sep_req_daemon_send_reply_command_handler(struct sep_device
*sep
)
254 unsigned long lck_flags
;
256 sep_dump_message(sep
);
258 /* Counters are lockable region */
259 spin_lock_irqsave(&sep
->snd_rply_lck
, lck_flags
);
263 /* Send the interrupt to SEP */
264 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR2_REG_ADDR
, sep
->send_ct
);
267 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
269 dev_dbg(&sep
->pdev
->dev
,
270 "sep_req_daemon_send_reply send_ct %lx reply_ct %lx\n",
271 sep
->send_ct
, sep
->reply_ct
);
278 * sep_free_dma_table_data_handler - free DMA table
279 * @sep: pointere to struct sep_device
281 * Handles the request to free DMA table for synchronic actions
283 static int sep_free_dma_table_data_handler(struct sep_device
*sep
)
287 /* Pointer to the current dma_resource struct */
288 struct sep_dma_resource
*dma
;
290 for (dcb_counter
= 0; dcb_counter
< sep
->nr_dcb_creat
; dcb_counter
++) {
291 dma
= &sep
->dma_res_arr
[dcb_counter
];
293 /* Unmap and free input map array */
294 if (dma
->in_map_array
) {
295 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
296 dma_unmap_page(&sep
->pdev
->dev
,
297 dma
->in_map_array
[count
].dma_addr
,
298 dma
->in_map_array
[count
].size
,
301 kfree(dma
->in_map_array
);
304 /* Unmap output map array, DON'T free it yet */
305 if (dma
->out_map_array
) {
306 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
307 dma_unmap_page(&sep
->pdev
->dev
,
308 dma
->out_map_array
[count
].dma_addr
,
309 dma
->out_map_array
[count
].size
,
312 kfree(dma
->out_map_array
);
315 /* Free page cache for output */
316 if (dma
->in_page_array
) {
317 for (count
= 0; count
< dma
->in_num_pages
; count
++) {
318 flush_dcache_page(dma
->in_page_array
[count
]);
319 page_cache_release(dma
->in_page_array
[count
]);
321 kfree(dma
->in_page_array
);
324 if (dma
->out_page_array
) {
325 for (count
= 0; count
< dma
->out_num_pages
; count
++) {
326 if (!PageReserved(dma
->out_page_array
[count
]))
327 SetPageDirty(dma
->out_page_array
[count
]);
328 flush_dcache_page(dma
->out_page_array
[count
]);
329 page_cache_release(dma
->out_page_array
[count
]);
331 kfree(dma
->out_page_array
);
334 /* Reset all the values */
335 dma
->in_page_array
= NULL
;
336 dma
->out_page_array
= NULL
;
337 dma
->in_num_pages
= 0;
338 dma
->out_num_pages
= 0;
339 dma
->in_map_array
= NULL
;
340 dma
->out_map_array
= NULL
;
341 dma
->in_map_num_entries
= 0;
342 dma
->out_map_num_entries
= 0;
345 sep
->nr_dcb_creat
= 0;
346 sep
->num_lli_tables_created
= 0;
352 * sep_request_daemon_mmap - maps the shared area to user space
353 * @filp: pointer to struct file
354 * @vma: pointer to vm_area_struct
356 * Called by the kernel when the daemon attempts an mmap() syscall
359 static int sep_request_daemon_mmap(struct file
*filp
,
360 struct vm_area_struct
*vma
)
362 struct sep_device
*sep
= filp
->private_data
;
363 dma_addr_t bus_address
;
366 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
371 /* Get physical address */
372 bus_address
= sep
->shared_bus
;
374 if (remap_pfn_range(vma
, vma
->vm_start
, bus_address
>> PAGE_SHIFT
,
375 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
377 dev_warn(&sep
->pdev
->dev
, "remap_page_range failed\n");
387 * sep_request_daemon_poll - poll implementation
388 * @sep: struct sep_device * for current SEP device
389 * @filp: struct file * for open file
390 * @wait: poll_table * for poll
392 * Called when our device is part of a poll() or select() syscall
394 static unsigned int sep_request_daemon_poll(struct file
*filp
,
400 unsigned long lck_flags
;
401 struct sep_device
*sep
= filp
->private_data
;
403 poll_wait(filp
, &sep
->event_request_daemon
, wait
);
405 dev_dbg(&sep
->pdev
->dev
, "daemon poll: send_ct is %lx reply ct is %lx\n",
406 sep
->send_ct
, sep
->reply_ct
);
408 spin_lock_irqsave(&sep
->snd_rply_lck
, lck_flags
);
409 /* Check if the data is ready */
410 if (sep
->send_ct
== sep
->reply_ct
) {
411 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
413 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
414 dev_dbg(&sep
->pdev
->dev
,
415 "daemon poll: data check (GPR2) is %x\n", retval2
);
417 /* Check if PRINT request */
418 if ((retval2
>> 30) & 0x1) {
419 dev_dbg(&sep
->pdev
->dev
, "daemon poll: PRINTF request in\n");
423 /* Check if NVS request */
425 dev_dbg(&sep
->pdev
->dev
, "daemon poll: NVS request in\n");
426 mask
|= POLLPRI
| POLLWRNORM
;
429 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
430 dev_dbg(&sep
->pdev
->dev
,
431 "daemon poll: no reply received; returning 0\n");
439 * sep_release - close a SEP device
440 * @inode: inode of SEP device
441 * @filp: file handle being closed
443 * Called on the final close of a SEP device.
445 static int sep_release(struct inode
*inode
, struct file
*filp
)
447 struct sep_device
*sep
= filp
->private_data
;
449 dev_dbg(&sep
->pdev
->dev
, "Release for pid %d\n", current
->pid
);
451 mutex_lock(&sep
->sep_mutex
);
452 /* Is this the process that has a transaction open?
453 * If so, lets reset pid_doing_transaction to 0 and
454 * clear the in use flags, and then wake up sep_event
455 * so that other processes can do transactions
457 if (sep
->pid_doing_transaction
== current
->pid
) {
458 clear_bit(SEP_MMAP_LOCK_BIT
, &sep
->in_use_flags
);
459 clear_bit(SEP_SEND_MSG_LOCK_BIT
, &sep
->in_use_flags
);
460 sep_free_dma_table_data_handler(sep
);
461 wake_up(&sep
->event
);
462 sep
->pid_doing_transaction
= 0;
465 mutex_unlock(&sep
->sep_mutex
);
470 * sep_mmap - maps the shared area to user space
471 * @filp: pointer to struct file
472 * @vma: pointer to vm_area_struct
474 * Called on an mmap of our space via the normal SEP device
476 static int sep_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
479 struct sep_device
*sep
= filp
->private_data
;
480 unsigned long error
= 0;
482 /* Set the transaction busy (own the device) */
483 wait_event_interruptible(sep
->event
,
484 test_and_set_bit(SEP_MMAP_LOCK_BIT
,
485 &sep
->in_use_flags
) == 0);
487 if (signal_pending(current
)) {
489 goto end_function_with_error
;
492 * The pid_doing_transaction indicates that this process
493 * now owns the facilities to performa a transaction with
494 * the SEP. While this process is performing a transaction,
495 * no other process who has the SEP device open can perform
496 * any transactions. This method allows more than one process
497 * to have the device open at any given time, which provides
498 * finer granularity for device utilization by multiple
501 mutex_lock(&sep
->sep_mutex
);
502 sep
->pid_doing_transaction
= current
->pid
;
503 mutex_unlock(&sep
->sep_mutex
);
505 /* Zero the pools and the number of data pool alocation pointers */
506 sep
->data_pool_bytes_allocated
= 0;
507 sep
->num_of_data_allocations
= 0;
510 * Check that the size of the mapped range is as the size of the message
513 if ((vma
->vm_end
- vma
->vm_start
) > SEP_DRIVER_MMMAP_AREA_SIZE
) {
515 goto end_function_with_error
;
518 dev_dbg(&sep
->pdev
->dev
, "shared_addr is %p\n", sep
->shared_addr
);
520 /* Get bus address */
521 bus_addr
= sep
->shared_bus
;
523 if (remap_pfn_range(vma
, vma
->vm_start
, bus_addr
>> PAGE_SHIFT
,
524 vma
->vm_end
- vma
->vm_start
, vma
->vm_page_prot
)) {
525 dev_warn(&sep
->pdev
->dev
, "remap_page_range failed\n");
527 goto end_function_with_error
;
531 end_function_with_error
:
533 clear_bit(SEP_MMAP_LOCK_BIT
, &sep
->in_use_flags
);
534 mutex_lock(&sep
->sep_mutex
);
535 sep
->pid_doing_transaction
= 0;
536 mutex_unlock(&sep
->sep_mutex
);
538 /* Raise event for stuck contextes */
540 wake_up(&sep
->event
);
547 * sep_poll - poll handler
548 * @filp: pointer to struct file
549 * @wait: pointer to poll_table
551 * Called by the OS when the kernel is asked to do a poll on
554 static unsigned int sep_poll(struct file
*filp
, poll_table
*wait
)
559 unsigned long lck_flags
;
561 struct sep_device
*sep
= filp
->private_data
;
563 /* Am I the process that owns the transaction? */
564 mutex_lock(&sep
->sep_mutex
);
565 if (current
->pid
!= sep
->pid_doing_transaction
) {
566 dev_dbg(&sep
->pdev
->dev
, "poll; wrong pid\n");
568 mutex_unlock(&sep
->sep_mutex
);
571 mutex_unlock(&sep
->sep_mutex
);
573 /* Check if send command or send_reply were activated previously */
574 if (!test_bit(SEP_SEND_MSG_LOCK_BIT
, &sep
->in_use_flags
)) {
579 /* Add the event to the polling wait table */
580 dev_dbg(&sep
->pdev
->dev
, "poll: calling wait sep_event\n");
582 poll_wait(filp
, &sep
->event
, wait
);
584 dev_dbg(&sep
->pdev
->dev
, "poll: send_ct is %lx reply ct is %lx\n",
585 sep
->send_ct
, sep
->reply_ct
);
587 /* Check if error occurred during poll */
588 retval2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR3_REG_ADDR
);
589 if (retval2
!= 0x0) {
590 dev_warn(&sep
->pdev
->dev
, "poll; poll error %x\n", retval2
);
595 spin_lock_irqsave(&sep
->snd_rply_lck
, lck_flags
);
597 if (sep
->send_ct
== sep
->reply_ct
) {
598 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
599 retval
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
600 dev_dbg(&sep
->pdev
->dev
, "poll: data ready check (GPR2) %x\n",
603 /* Check if printf request */
604 if ((retval
>> 30) & 0x1) {
605 dev_dbg(&sep
->pdev
->dev
, "poll: SEP printf request\n");
606 wake_up(&sep
->event_request_daemon
);
610 /* Check if the this is SEP reply or request */
612 dev_dbg(&sep
->pdev
->dev
, "poll: SEP request\n");
613 wake_up(&sep
->event_request_daemon
);
615 dev_dbg(&sep
->pdev
->dev
, "poll: normal return\n");
616 /* In case it is again by send_reply_comand */
617 clear_bit(SEP_SEND_MSG_LOCK_BIT
, &sep
->in_use_flags
);
618 sep_dump_message(sep
);
619 dev_dbg(&sep
->pdev
->dev
,
620 "poll; SEP reply POLLIN | POLLRDNORM\n");
621 mask
|= POLLIN
| POLLRDNORM
;
624 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
625 dev_dbg(&sep
->pdev
->dev
,
626 "poll; no reply received; returning mask of 0\n");
635 * sep_time_address - address in SEP memory of time
636 * @sep: SEP device we want the address from
638 * Return the address of the two dwords in memory used for time
641 static u32
*sep_time_address(struct sep_device
*sep
)
643 return sep
->shared_addr
+ SEP_DRIVER_SYSTEM_TIME_MEMORY_OFFSET_IN_BYTES
;
647 * sep_set_time - set the SEP time
648 * @sep: the SEP we are setting the time for
650 * Calculates time and sets it at the predefined address.
651 * Called with the SEP mutex held.
653 static unsigned long sep_set_time(struct sep_device
*sep
)
656 u32
*time_addr
; /* Address of time as seen by the kernel */
659 do_gettimeofday(&time
);
661 /* Set value in the SYSTEM MEMORY offset */
662 time_addr
= sep_time_address(sep
);
664 time_addr
[0] = SEP_TIME_VAL_TOKEN
;
665 time_addr
[1] = time
.tv_sec
;
667 dev_dbg(&sep
->pdev
->dev
, "time.tv_sec is %lu\n", time
.tv_sec
);
668 dev_dbg(&sep
->pdev
->dev
, "time_addr is %p\n", time_addr
);
669 dev_dbg(&sep
->pdev
->dev
, "sep->shared_addr is %p\n", sep
->shared_addr
);
675 * sep_set_caller_id_handler - insert caller id entry
677 * @arg: pointer to struct caller_id_struct
679 * Inserts the data into the caller id table. Note that this function
680 * falls under the ioctl lock
682 static int sep_set_caller_id_handler(struct sep_device
*sep
, unsigned long arg
)
687 struct caller_id_struct command_args
;
689 for (i
= 0; i
< SEP_CALLER_ID_TABLE_NUM_ENTRIES
; i
++) {
690 if (sep
->caller_id_table
[i
].pid
== 0)
694 if (i
== SEP_CALLER_ID_TABLE_NUM_ENTRIES
) {
695 dev_dbg(&sep
->pdev
->dev
, "no more caller id entries left\n");
696 dev_dbg(&sep
->pdev
->dev
, "maximum number is %d\n",
697 SEP_CALLER_ID_TABLE_NUM_ENTRIES
);
703 if (copy_from_user(&command_args
, (void __user
*)arg
,
704 sizeof(command_args
))) {
709 hash
= (void __user
*)(unsigned long)command_args
.callerIdAddress
;
711 if (!command_args
.pid
|| !command_args
.callerIdSizeInBytes
) {
716 dev_dbg(&sep
->pdev
->dev
, "pid is %x\n", command_args
.pid
);
717 dev_dbg(&sep
->pdev
->dev
, "callerIdSizeInBytes is %x\n",
718 command_args
.callerIdSizeInBytes
);
720 if (command_args
.callerIdSizeInBytes
>
721 SEP_CALLER_ID_HASH_SIZE_IN_BYTES
) {
726 sep
->caller_id_table
[i
].pid
= command_args
.pid
;
728 if (copy_from_user(sep
->caller_id_table
[i
].callerIdHash
,
729 hash
, command_args
.callerIdSizeInBytes
))
736 * sep_set_current_caller_id - set the caller id
737 * @sep: pointer to struct_sep_device
739 * Set the caller ID (if it exists) to the SEP. Note that this
740 * function falls under the ioctl lock
742 static int sep_set_current_caller_id(struct sep_device
*sep
)
747 /* Zero the previous value */
748 memset(sep
->shared_addr
+ SEP_CALLER_ID_OFFSET_BYTES
,
749 0, SEP_CALLER_ID_HASH_SIZE_IN_BYTES
);
751 for (i
= 0; i
< SEP_CALLER_ID_TABLE_NUM_ENTRIES
; i
++) {
752 if (sep
->caller_id_table
[i
].pid
== current
->pid
) {
753 dev_dbg(&sep
->pdev
->dev
, "Caller Id found\n");
755 memcpy(sep
->shared_addr
+ SEP_CALLER_ID_OFFSET_BYTES
,
756 (void *)(sep
->caller_id_table
[i
].callerIdHash
),
757 SEP_CALLER_ID_HASH_SIZE_IN_BYTES
);
761 /* Ensure data is in little endian */
762 hash_buf_ptr
= (u32
*)sep
->shared_addr
+
763 SEP_CALLER_ID_OFFSET_BYTES
;
765 for (i
= 0; i
< SEP_CALLER_ID_HASH_SIZE_IN_WORDS
; i
++)
766 hash_buf_ptr
[i
] = cpu_to_le32(hash_buf_ptr
[i
]);
772 * sep_send_command_handler - kick off a command
773 * @sep: SEP being signalled
775 * This function raises interrupt to SEP that signals that is has a new
776 * command from the host
778 * Note that this function does fall under the ioctl lock
780 static int sep_send_command_handler(struct sep_device
*sep
)
782 unsigned long lck_flags
;
785 if (test_and_set_bit(SEP_SEND_MSG_LOCK_BIT
, &sep
->in_use_flags
)) {
791 sep_set_current_caller_id(sep
);
793 sep_dump_message(sep
);
796 spin_lock_irqsave(&sep
->snd_rply_lck
, lck_flags
);
798 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
800 dev_dbg(&sep
->pdev
->dev
,
801 "sep_send_command_handler send_ct %lx reply_ct %lx\n",
802 sep
->send_ct
, sep
->reply_ct
);
804 /* Send interrupt to SEP */
805 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR0_REG_ADDR
, 0x2);
812 * sep_allocate_data_pool_memory_handler -allocate pool memory
813 * @sep: pointer to struct sep_device
814 * @arg: pointer to struct alloc_struct
816 * This function handles the allocate data pool memory request
817 * This function returns calculates the bus address of the
818 * allocated memory, and the offset of this area from the mapped address.
819 * Therefore, the FVOs in user space can calculate the exact virtual
820 * address of this allocated memory
822 static int sep_allocate_data_pool_memory_handler(struct sep_device
*sep
,
826 struct alloc_struct command_args
;
828 /* Holds the allocated buffer address in the system memory pool */
831 if (copy_from_user(&command_args
, (void __user
*)arg
,
832 sizeof(struct alloc_struct
))) {
837 /* Allocate memory */
838 if ((sep
->data_pool_bytes_allocated
+ command_args
.num_bytes
) >
839 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
) {
844 dev_dbg(&sep
->pdev
->dev
,
845 "data pool bytes_allocated: %x\n", (int)sep
->data_pool_bytes_allocated
);
846 dev_dbg(&sep
->pdev
->dev
,
847 "offset: %x\n", SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES
);
848 /* Set the virtual and bus address */
849 command_args
.offset
= SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES
+
850 sep
->data_pool_bytes_allocated
;
852 /* Place in the shared area that is known by the SEP */
853 token_addr
= (u32
*)(sep
->shared_addr
+
854 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES
+
855 (sep
->num_of_data_allocations
)*2*sizeof(u32
));
857 token_addr
[0] = SEP_DATA_POOL_POINTERS_VAL_TOKEN
;
858 token_addr
[1] = (u32
)sep
->shared_bus
+
859 SEP_DRIVER_DATA_POOL_AREA_OFFSET_IN_BYTES
+
860 sep
->data_pool_bytes_allocated
;
862 /* Write the memory back to the user space */
863 error
= copy_to_user((void *)arg
, (void *)&command_args
,
864 sizeof(struct alloc_struct
));
870 /* Update the allocation */
871 sep
->data_pool_bytes_allocated
+= command_args
.num_bytes
;
872 sep
->num_of_data_allocations
+= 1;
879 * sep_lock_kernel_pages - map kernel pages for DMA
880 * @sep: pointer to struct sep_device
881 * @kernel_virt_addr: address of data buffer in kernel
882 * @data_size: size of data
883 * @lli_array_ptr: lli array
884 * @in_out_flag: input into device or output from device
886 * This function locks all the physical pages of the kernel virtual buffer
887 * and construct a basic lli array, where each entry holds the physical
888 * page address and the size that application data holds in this page
889 * This function is used only during kernel crypto mod calls from within
890 * the kernel (when ioctl is not used)
892 static int sep_lock_kernel_pages(struct sep_device
*sep
,
893 unsigned long kernel_virt_addr
,
895 struct sep_lli_entry
**lli_array_ptr
,
901 struct sep_lli_entry
*lli_array
;
903 struct sep_dma_map
*map_array
;
905 dev_dbg(&sep
->pdev
->dev
, "lock kernel pages kernel_virt_addr is %08lx\n",
906 (unsigned long)kernel_virt_addr
);
907 dev_dbg(&sep
->pdev
->dev
, "data_size is %x\n", data_size
);
909 lli_array
= kmalloc(sizeof(struct sep_lli_entry
), GFP_ATOMIC
);
914 map_array
= kmalloc(sizeof(struct sep_dma_map
), GFP_ATOMIC
);
917 goto end_function_with_error
;
920 map_array
[0].dma_addr
=
921 dma_map_single(&sep
->pdev
->dev
, (void *)kernel_virt_addr
,
922 data_size
, DMA_BIDIRECTIONAL
);
923 map_array
[0].size
= data_size
;
927 * Set the start address of the first page - app data may start not at
928 * the beginning of the page
930 lli_array
[0].bus_address
= (u32
)map_array
[0].dma_addr
;
931 lli_array
[0].block_size
= map_array
[0].size
;
933 dev_dbg(&sep
->pdev
->dev
,
934 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
935 (unsigned long)lli_array
[0].bus_address
,
936 lli_array
[0].block_size
);
938 /* Set the output parameters */
939 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
940 *lli_array_ptr
= lli_array
;
941 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
= 1;
942 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
= NULL
;
943 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_array
= map_array
;
944 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_num_entries
= 1;
946 *lli_array_ptr
= lli_array
;
947 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_num_pages
= 1;
948 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_page_array
= NULL
;
949 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_map_array
= map_array
;
950 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_map_num_entries
= 1;
954 end_function_with_error
:
962 * sep_lock_user_pages - lock and map user pages for DMA
963 * @sep: pointer to struct sep_device
964 * @app_virt_addr: user memory data buffer
965 * @data_size: size of data buffer
966 * @lli_array_ptr: lli array
967 * @in_out_flag: input or output to device
969 * This function locks all the physical pages of the application
970 * virtual buffer and construct a basic lli array, where each entry
971 * holds the physical page address and the size that application
972 * data holds in this physical pages
974 static int sep_lock_user_pages(struct sep_device
*sep
,
977 struct sep_lli_entry
**lli_array_ptr
,
984 /* The the page of the end address of the user space buffer */
986 /* The page of the start address of the user space buffer */
988 /* The range in pages */
990 /* Array of pointers to page */
991 struct page
**page_array
;
993 struct sep_lli_entry
*lli_array
;
995 struct sep_dma_map
*map_array
;
996 /* Direction of the DMA mapping for locked pages */
997 enum dma_data_direction dir
;
999 /* Set start and end pages and num pages */
1000 end_page
= (app_virt_addr
+ data_size
- 1) >> PAGE_SHIFT
;
1001 start_page
= app_virt_addr
>> PAGE_SHIFT
;
1002 num_pages
= end_page
- start_page
+ 1;
1004 dev_dbg(&sep
->pdev
->dev
, "lock user pages app_virt_addr is %x\n", app_virt_addr
);
1005 dev_dbg(&sep
->pdev
->dev
, "data_size is %x\n", data_size
);
1006 dev_dbg(&sep
->pdev
->dev
, "start_page is %x\n", start_page
);
1007 dev_dbg(&sep
->pdev
->dev
, "end_page is %x\n", end_page
);
1008 dev_dbg(&sep
->pdev
->dev
, "num_pages is %x\n", num_pages
);
1010 /* Allocate array of pages structure pointers */
1011 page_array
= kmalloc(sizeof(struct page
*) * num_pages
, GFP_ATOMIC
);
1016 map_array
= kmalloc(sizeof(struct sep_dma_map
) * num_pages
, GFP_ATOMIC
);
1018 dev_warn(&sep
->pdev
->dev
, "kmalloc for map_array failed\n");
1020 goto end_function_with_error1
;
1023 lli_array
= kmalloc(sizeof(struct sep_lli_entry
) * num_pages
,
1027 dev_warn(&sep
->pdev
->dev
, "kmalloc for lli_array failed\n");
1029 goto end_function_with_error2
;
1032 /* Convert the application virtual address into a set of physical */
1033 down_read(¤t
->mm
->mmap_sem
);
1034 result
= get_user_pages(current
, current
->mm
, app_virt_addr
,
1036 ((in_out_flag
== SEP_DRIVER_IN_FLAG
) ? 0 : 1),
1037 0, page_array
, NULL
);
1039 up_read(¤t
->mm
->mmap_sem
);
1041 /* Check the number of pages locked - if not all then exit with error */
1042 if (result
!= num_pages
) {
1043 dev_warn(&sep
->pdev
->dev
,
1044 "not all pages locked by get_user_pages\n");
1046 goto end_function_with_error3
;
1049 dev_dbg(&sep
->pdev
->dev
, "get_user_pages succeeded\n");
1052 if (in_out_flag
== SEP_DRIVER_IN_FLAG
)
1053 dir
= DMA_TO_DEVICE
;
1055 dir
= DMA_FROM_DEVICE
;
1058 * Fill the array using page array data and
1059 * map the pages - this action will also flush the cache as needed
1061 for (count
= 0; count
< num_pages
; count
++) {
1062 /* Fill the map array */
1063 map_array
[count
].dma_addr
=
1064 dma_map_page(&sep
->pdev
->dev
, page_array
[count
],
1065 0, PAGE_SIZE
, /*dir*/DMA_BIDIRECTIONAL
);
1067 map_array
[count
].size
= PAGE_SIZE
;
1069 /* Fill the lli array entry */
1070 lli_array
[count
].bus_address
= (u32
)map_array
[count
].dma_addr
;
1071 lli_array
[count
].block_size
= PAGE_SIZE
;
1073 dev_warn(&sep
->pdev
->dev
, "lli_array[%x].bus_address is %08lx, lli_array[%x].block_size is %x\n",
1074 count
, (unsigned long)lli_array
[count
].bus_address
,
1075 count
, lli_array
[count
].block_size
);
1078 /* Check the offset for the first page */
1079 lli_array
[0].bus_address
=
1080 lli_array
[0].bus_address
+ (app_virt_addr
& (~PAGE_MASK
));
1082 /* Check that not all the data is in the first page only */
1083 if ((PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
))) >= data_size
)
1084 lli_array
[0].block_size
= data_size
;
1086 lli_array
[0].block_size
=
1087 PAGE_SIZE
- (app_virt_addr
& (~PAGE_MASK
));
1089 dev_dbg(&sep
->pdev
->dev
,
1090 "lli_array[0].bus_address is %08lx, lli_array[0].block_size is %x\n",
1091 (unsigned long)lli_array
[count
].bus_address
,
1092 lli_array
[count
].block_size
);
1094 /* Check the size of the last page */
1095 if (num_pages
> 1) {
1096 lli_array
[num_pages
- 1].block_size
=
1097 (app_virt_addr
+ data_size
) & (~PAGE_MASK
);
1098 if (lli_array
[num_pages
- 1].block_size
== 0)
1099 lli_array
[num_pages
- 1].block_size
= PAGE_SIZE
;
1101 dev_warn(&sep
->pdev
->dev
,
1102 "lli_array[%x].bus_address is "
1103 "%08lx, lli_array[%x].block_size is %x\n",
1105 (unsigned long)lli_array
[num_pages
-1].bus_address
,
1107 lli_array
[num_pages
-1].block_size
);
1110 /* Set output params according to the in_out flag */
1111 if (in_out_flag
== SEP_DRIVER_IN_FLAG
) {
1112 *lli_array_ptr
= lli_array
;
1113 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
= num_pages
;
1114 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
= page_array
;
1115 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_array
= map_array
;
1116 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_num_entries
=
1119 *lli_array_ptr
= lli_array
;
1120 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_num_pages
= num_pages
;
1121 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_page_array
=
1123 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_map_array
= map_array
;
1124 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_map_num_entries
=
1129 end_function_with_error3
:
1130 /* Free lli array */
1133 end_function_with_error2
:
1136 end_function_with_error1
:
1137 /* Free page array */
1145 * u32 sep_calculate_lli_table_max_size - size the LLI table
1146 * @sep: pointer to struct sep_device
1148 * @num_array_entries
1151 * This function calculates the size of data that can be inserted into
1152 * the lli table from this array, such that either the table is full
1153 * (all entries are entered), or there are no more entries in the
1156 static u32
sep_calculate_lli_table_max_size(struct sep_device
*sep
,
1157 struct sep_lli_entry
*lli_in_array_ptr
,
1158 u32 num_array_entries
,
1159 u32
*last_table_flag
)
1162 /* Table data size */
1163 u32 table_data_size
= 0;
1164 /* Data size for the next table */
1165 u32 next_table_data_size
;
1167 *last_table_flag
= 0;
1170 * Calculate the data in the out lli table till we fill the whole
1171 * table or till the data has ended
1174 (counter
< (SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
- 1)) &&
1175 (counter
< num_array_entries
); counter
++)
1176 table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1179 * Check if we reached the last entry,
1180 * meaning this ia the last table to build,
1181 * and no need to check the block alignment
1183 if (counter
== num_array_entries
) {
1184 /* Set the last table flag */
1185 *last_table_flag
= 1;
1190 * Calculate the data size of the next table.
1191 * Stop if no entries left or if data size is more the DMA restriction
1193 next_table_data_size
= 0;
1194 for (; counter
< num_array_entries
; counter
++) {
1195 next_table_data_size
+= lli_in_array_ptr
[counter
].block_size
;
1196 if (next_table_data_size
>= SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1201 * Check if the next table data size is less then DMA rstriction.
1202 * if it is - recalculate the current table size, so that the next
1203 * table data size will be adaquete for DMA
1205 if (next_table_data_size
&&
1206 next_table_data_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
)
1208 table_data_size
-= (SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
-
1209 next_table_data_size
);
1212 return table_data_size
;
1216 * sep_build_lli_table - build an lli array for the given table
1217 * @sep: pointer to struct sep_device
1218 * @lli_array_ptr: pointer to lli array
1219 * @lli_table_ptr: pointer to lli table
1220 * @num_processed_entries_ptr: pointer to number of entries
1221 * @num_table_entries_ptr: pointer to number of tables
1222 * @table_data_size: total data size
1224 * Builds ant lli table from the lli_array according to
1225 * the given size of data
1227 static void sep_build_lli_table(struct sep_device
*sep
,
1228 struct sep_lli_entry
*lli_array_ptr
,
1229 struct sep_lli_entry
*lli_table_ptr
,
1230 u32
*num_processed_entries_ptr
,
1231 u32
*num_table_entries_ptr
,
1232 u32 table_data_size
)
1234 /* Current table data size */
1235 u32 curr_table_data_size
;
1236 /* Counter of lli array entry */
1239 /* Init currrent table data size and lli array entry counter */
1240 curr_table_data_size
= 0;
1242 *num_table_entries_ptr
= 1;
1244 dev_dbg(&sep
->pdev
->dev
, "build lli table table_data_size is %x\n", table_data_size
);
1246 /* Fill the table till table size reaches the needed amount */
1247 while (curr_table_data_size
< table_data_size
) {
1248 /* Update the number of entries in table */
1249 (*num_table_entries_ptr
)++;
1251 lli_table_ptr
->bus_address
=
1252 cpu_to_le32(lli_array_ptr
[array_counter
].bus_address
);
1254 lli_table_ptr
->block_size
=
1255 cpu_to_le32(lli_array_ptr
[array_counter
].block_size
);
1257 curr_table_data_size
+= lli_array_ptr
[array_counter
].block_size
;
1259 dev_dbg(&sep
->pdev
->dev
, "lli_table_ptr is %p\n",
1261 dev_dbg(&sep
->pdev
->dev
, "lli_table_ptr->bus_address is %08lx\n",
1262 (unsigned long)lli_table_ptr
->bus_address
);
1263 dev_dbg(&sep
->pdev
->dev
, "lli_table_ptr->block_size is %x\n",
1264 lli_table_ptr
->block_size
);
1266 /* Check for overflow of the table data */
1267 if (curr_table_data_size
> table_data_size
) {
1268 dev_dbg(&sep
->pdev
->dev
,
1269 "curr_table_data_size too large\n");
1271 /* Update the size of block in the table */
1272 lli_table_ptr
->block_size
-=
1273 cpu_to_le32((curr_table_data_size
- table_data_size
));
1275 /* Update the physical address in the lli array */
1276 lli_array_ptr
[array_counter
].bus_address
+=
1277 cpu_to_le32(lli_table_ptr
->block_size
);
1279 /* Update the block size left in the lli array */
1280 lli_array_ptr
[array_counter
].block_size
=
1281 (curr_table_data_size
- table_data_size
);
1283 /* Advance to the next entry in the lli_array */
1286 dev_dbg(&sep
->pdev
->dev
,
1287 "lli_table_ptr->bus_address is %08lx\n",
1288 (unsigned long)lli_table_ptr
->bus_address
);
1289 dev_dbg(&sep
->pdev
->dev
,
1290 "lli_table_ptr->block_size is %x\n",
1291 lli_table_ptr
->block_size
);
1293 /* Move to the next entry in table */
1297 /* Set the info entry to default */
1298 lli_table_ptr
->bus_address
= 0xffffffff;
1299 lli_table_ptr
->block_size
= 0;
1301 /* Set the output parameter */
1302 *num_processed_entries_ptr
+= array_counter
;
1307 * sep_shared_area_virt_to_bus - map shared area to bus address
1308 * @sep: pointer to struct sep_device
1309 * @virt_address: virtual address to convert
1311 * This functions returns the physical address inside shared area according
1312 * to the virtual address. It can be either on the externa RAM device
1313 * (ioremapped), or on the system RAM
1314 * This implementation is for the external RAM
1316 static dma_addr_t
sep_shared_area_virt_to_bus(struct sep_device
*sep
,
1319 dev_dbg(&sep
->pdev
->dev
, "sh virt to phys v %p\n", virt_address
);
1320 dev_dbg(&sep
->pdev
->dev
, "sh virt to phys p %08lx\n",
1322 sep
->shared_bus
+ (virt_address
- sep
->shared_addr
));
1324 return sep
->shared_bus
+ (size_t)(virt_address
- sep
->shared_addr
);
1328 * sep_shared_area_bus_to_virt - map shared area bus address to kernel
1329 * @sep: pointer to struct sep_device
1330 * @bus_address: bus address to convert
1332 * This functions returns the virtual address inside shared area
1333 * according to the physical address. It can be either on the
1334 * externa RAM device (ioremapped), or on the system RAM
1335 * This implementation is for the external RAM
1337 static void *sep_shared_area_bus_to_virt(struct sep_device
*sep
,
1338 dma_addr_t bus_address
)
1340 dev_dbg(&sep
->pdev
->dev
, "shared bus to virt b=%lx v=%lx\n",
1341 (unsigned long)bus_address
, (unsigned long)(sep
->shared_addr
+
1342 (size_t)(bus_address
- sep
->shared_bus
)));
1344 return sep
->shared_addr
+ (size_t)(bus_address
- sep
->shared_bus
);
1348 * sep_debug_print_lli_tables - dump LLI table
1349 * @sep: pointer to struct sep_device
1350 * @lli_table_ptr: pointer to sep_lli_entry
1351 * @num_table_entries: number of entries
1352 * @table_data_size: total data size
1354 * Walk the the list of the print created tables and print all the data
1356 static void sep_debug_print_lli_tables(struct sep_device
*sep
,
1357 struct sep_lli_entry
*lli_table_ptr
,
1358 unsigned long num_table_entries
,
1359 unsigned long table_data_size
)
1361 unsigned long table_count
= 1;
1362 unsigned long entries_count
= 0;
1364 dev_dbg(&sep
->pdev
->dev
, "sep_debug_print_lli_tables start\n");
1366 while ((unsigned long) lli_table_ptr
->bus_address
!= 0xffffffff) {
1367 dev_dbg(&sep
->pdev
->dev
,
1368 "lli table %08lx, table_data_size is %lu\n",
1369 table_count
, table_data_size
);
1370 dev_dbg(&sep
->pdev
->dev
, "num_table_entries is %lu\n",
1373 /* Print entries of the table (without info entry) */
1374 for (entries_count
= 0; entries_count
< num_table_entries
;
1375 entries_count
++, lli_table_ptr
++) {
1377 dev_dbg(&sep
->pdev
->dev
,
1378 "lli_table_ptr address is %08lx\n",
1379 (unsigned long) lli_table_ptr
);
1381 dev_dbg(&sep
->pdev
->dev
,
1382 "phys address is %08lx block size is %x\n",
1383 (unsigned long)lli_table_ptr
->bus_address
,
1384 lli_table_ptr
->block_size
);
1386 /* Point to the info entry */
1389 dev_dbg(&sep
->pdev
->dev
,
1390 "phys lli_table_ptr->block_size is %x\n",
1391 lli_table_ptr
->block_size
);
1393 dev_dbg(&sep
->pdev
->dev
,
1394 "phys lli_table_ptr->physical_address is %08lu\n",
1395 (unsigned long)lli_table_ptr
->bus_address
);
1398 table_data_size
= lli_table_ptr
->block_size
& 0xffffff;
1399 num_table_entries
= (lli_table_ptr
->block_size
>> 24) & 0xff;
1401 dev_dbg(&sep
->pdev
->dev
,
1402 "phys table_data_size is %lu num_table_entries is"
1403 " %lu bus_address is%lu\n", table_data_size
,
1404 num_table_entries
, (unsigned long)lli_table_ptr
->bus_address
);
1406 if ((unsigned long)lli_table_ptr
->bus_address
!= 0xffffffff)
1407 lli_table_ptr
= (struct sep_lli_entry
*)
1408 sep_shared_bus_to_virt(sep
,
1409 (unsigned long)lli_table_ptr
->bus_address
);
1413 dev_dbg(&sep
->pdev
->dev
, "sep_debug_print_lli_tables end\n");
1418 * sep_prepare_empty_lli_table - create a blank LLI table
1419 * @sep: pointer to struct sep_device
1420 * @lli_table_addr_ptr: pointer to lli table
1421 * @num_entries_ptr: pointer to number of entries
1422 * @table_data_size_ptr: point to table data size
1424 * This function creates empty lli tables when there is no data
1426 static void sep_prepare_empty_lli_table(struct sep_device
*sep
,
1427 dma_addr_t
*lli_table_addr_ptr
,
1428 u32
*num_entries_ptr
,
1429 u32
*table_data_size_ptr
)
1431 struct sep_lli_entry
*lli_table_ptr
;
1433 /* Find the area for new table */
1435 (struct sep_lli_entry
*)(sep
->shared_addr
+
1436 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1437 sep
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1438 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1440 lli_table_ptr
->bus_address
= 0;
1441 lli_table_ptr
->block_size
= 0;
1444 lli_table_ptr
->bus_address
= 0xFFFFFFFF;
1445 lli_table_ptr
->block_size
= 0;
1447 /* Set the output parameter value */
1448 *lli_table_addr_ptr
= sep
->shared_bus
+
1449 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1450 sep
->num_lli_tables_created
*
1451 sizeof(struct sep_lli_entry
) *
1452 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1454 /* Set the num of entries and table data size for empty table */
1455 *num_entries_ptr
= 2;
1456 *table_data_size_ptr
= 0;
1458 /* Update the number of created tables */
1459 sep
->num_lli_tables_created
++;
1463 * sep_prepare_input_dma_table - prepare input DMA mappings
1464 * @sep: pointer to struct sep_device
1469 * @table_data_size_ptr:
1470 * @is_kva: set for kernel data (kernel cryptio call)
1472 * This function prepares only input DMA table for synhronic symmetric
1474 * Note that all bus addresses that are passed to the SEP
1475 * are in 32 bit format; the SEP is a 32 bit device
1477 static int sep_prepare_input_dma_table(struct sep_device
*sep
,
1478 unsigned long app_virt_addr
,
1481 dma_addr_t
*lli_table_ptr
,
1482 u32
*num_entries_ptr
,
1483 u32
*table_data_size_ptr
,
1487 /* Pointer to the info entry of the table - the last entry */
1488 struct sep_lli_entry
*info_entry_ptr
;
1489 /* Array of pointers to page */
1490 struct sep_lli_entry
*lli_array_ptr
;
1491 /* Points to the first entry to be processed in the lli_in_array */
1492 u32 current_entry
= 0;
1493 /* Num entries in the virtual buffer */
1494 u32 sep_lli_entries
= 0;
1495 /* Lli table pointer */
1496 struct sep_lli_entry
*in_lli_table_ptr
;
1497 /* The total data in one table */
1498 u32 table_data_size
= 0;
1499 /* Flag for last table */
1500 u32 last_table_flag
= 0;
1501 /* Number of entries in lli table */
1502 u32 num_entries_in_table
= 0;
1503 /* Next table address */
1504 void *lli_table_alloc_addr
= 0;
1506 dev_dbg(&sep
->pdev
->dev
, "prepare intput dma table data_size is %x\n", data_size
);
1507 dev_dbg(&sep
->pdev
->dev
, "block_size is %x\n", block_size
);
1509 /* Initialize the pages pointers */
1510 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
= NULL
;
1511 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
= 0;
1513 /* Set the kernel address for first table to be allocated */
1514 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1515 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1516 sep
->num_lli_tables_created
* sizeof(struct sep_lli_entry
) *
1517 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1519 if (data_size
== 0) {
1520 /* Special case - create meptu table - 2 entries, zero data */
1521 sep_prepare_empty_lli_table(sep
, lli_table_ptr
,
1522 num_entries_ptr
, table_data_size_ptr
);
1523 goto update_dcb_counter
;
1526 /* Check if the pages are in Kernel Virtual Address layout */
1528 /* Lock the pages in the kernel */
1529 error
= sep_lock_kernel_pages(sep
, app_virt_addr
,
1530 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
);
1533 * Lock the pages of the user buffer
1534 * and translate them to pages
1536 error
= sep_lock_user_pages(sep
, app_virt_addr
,
1537 data_size
, &lli_array_ptr
, SEP_DRIVER_IN_FLAG
);
1542 dev_dbg(&sep
->pdev
->dev
, "output sep_in_num_pages is %x\n",
1543 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
);
1546 info_entry_ptr
= NULL
;
1548 sep_lli_entries
= sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
;
1550 /* Loop till all the entries in in array are not processed */
1551 while (current_entry
< sep_lli_entries
) {
1553 /* Set the new input and output tables */
1555 (struct sep_lli_entry
*)lli_table_alloc_addr
;
1557 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
1558 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1560 if (lli_table_alloc_addr
>
1561 ((void *)sep
->shared_addr
+
1562 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1563 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
1566 goto end_function_error
;
1570 /* Update the number of created tables */
1571 sep
->num_lli_tables_created
++;
1573 /* Calculate the maximum size of data for input table */
1574 table_data_size
= sep_calculate_lli_table_max_size(sep
,
1575 &lli_array_ptr
[current_entry
],
1576 (sep_lli_entries
- current_entry
),
1580 * If this is not the last table -
1581 * then align it to the block size
1583 if (!last_table_flag
)
1585 (table_data_size
/ block_size
) * block_size
;
1587 dev_dbg(&sep
->pdev
->dev
, "output table_data_size is %x\n",
1590 /* Construct input lli table */
1591 sep_build_lli_table(sep
, &lli_array_ptr
[current_entry
],
1593 ¤t_entry
, &num_entries_in_table
, table_data_size
);
1595 if (info_entry_ptr
== NULL
) {
1597 /* Set the output parameters to physical addresses */
1598 *lli_table_ptr
= sep_shared_area_virt_to_bus(sep
,
1600 *num_entries_ptr
= num_entries_in_table
;
1601 *table_data_size_ptr
= table_data_size
;
1603 dev_dbg(&sep
->pdev
->dev
,
1604 "output lli_table_in_ptr is %08lx\n",
1605 (unsigned long)*lli_table_ptr
);
1608 /* Update the info entry of the previous in table */
1609 info_entry_ptr
->bus_address
=
1610 sep_shared_area_virt_to_bus(sep
,
1612 info_entry_ptr
->block_size
=
1613 ((num_entries_in_table
) << 24) |
1616 /* Save the pointer to the info entry of the current tables */
1617 info_entry_ptr
= in_lli_table_ptr
+ num_entries_in_table
- 1;
1619 /* Print input tables */
1620 sep_debug_print_lli_tables(sep
, (struct sep_lli_entry
*)
1621 sep_shared_area_bus_to_virt(sep
, *lli_table_ptr
),
1622 *num_entries_ptr
, *table_data_size_ptr
);
1623 /* The array of the pages */
1624 kfree(lli_array_ptr
);
1627 /* Update DCB counter */
1628 sep
->nr_dcb_creat
++;
1632 /* Free all the allocated resources */
1633 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_array
);
1634 kfree(lli_array_ptr
);
1635 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
);
1642 * sep_construct_dma_tables_from_lli - prepare AES/DES mappings
1643 * @sep: pointer to struct sep_device
1645 * @sep_in_lli_entries:
1647 * @sep_out_lli_entries
1650 * @lli_table_out_ptr
1651 * @in_num_entries_ptr
1652 * @out_num_entries_ptr
1653 * @table_data_size_ptr
1655 * This function creates the input and output DMA tables for
1656 * symmetric operations (AES/DES) according to the block
1657 * size from LLI arays
1658 * Note that all bus addresses that are passed to the SEP
1659 * are in 32 bit format; the SEP is a 32 bit device
1661 static int sep_construct_dma_tables_from_lli(
1662 struct sep_device
*sep
,
1663 struct sep_lli_entry
*lli_in_array
,
1664 u32 sep_in_lli_entries
,
1665 struct sep_lli_entry
*lli_out_array
,
1666 u32 sep_out_lli_entries
,
1668 dma_addr_t
*lli_table_in_ptr
,
1669 dma_addr_t
*lli_table_out_ptr
,
1670 u32
*in_num_entries_ptr
,
1671 u32
*out_num_entries_ptr
,
1672 u32
*table_data_size_ptr
)
1674 /* Points to the area where next lli table can be allocated */
1675 void *lli_table_alloc_addr
= 0;
1676 /* Input lli table */
1677 struct sep_lli_entry
*in_lli_table_ptr
= NULL
;
1678 /* Output lli table */
1679 struct sep_lli_entry
*out_lli_table_ptr
= NULL
;
1680 /* Pointer to the info entry of the table - the last entry */
1681 struct sep_lli_entry
*info_in_entry_ptr
= NULL
;
1682 /* Pointer to the info entry of the table - the last entry */
1683 struct sep_lli_entry
*info_out_entry_ptr
= NULL
;
1684 /* Points to the first entry to be processed in the lli_in_array */
1685 u32 current_in_entry
= 0;
1686 /* Points to the first entry to be processed in the lli_out_array */
1687 u32 current_out_entry
= 0;
1688 /* Max size of the input table */
1689 u32 in_table_data_size
= 0;
1690 /* Max size of the output table */
1691 u32 out_table_data_size
= 0;
1692 /* Flag te signifies if this is the last tables build */
1693 u32 last_table_flag
= 0;
1694 /* The data size that should be in table */
1695 u32 table_data_size
= 0;
1696 /* Number of etnries in the input table */
1697 u32 num_entries_in_table
= 0;
1698 /* Number of etnries in the output table */
1699 u32 num_entries_out_table
= 0;
1701 /* Initiate to point after the message area */
1702 lli_table_alloc_addr
= (void *)(sep
->shared_addr
+
1703 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1704 (sep
->num_lli_tables_created
*
1705 (sizeof(struct sep_lli_entry
) *
1706 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
)));
1708 /* Loop till all the entries in in array are not processed */
1709 while (current_in_entry
< sep_in_lli_entries
) {
1710 /* Set the new input and output tables */
1712 (struct sep_lli_entry
*)lli_table_alloc_addr
;
1714 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
1715 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1717 /* Set the first output tables */
1719 (struct sep_lli_entry
*)lli_table_alloc_addr
;
1721 /* Check if the DMA table area limit was overrun */
1722 if ((lli_table_alloc_addr
+ sizeof(struct sep_lli_entry
) *
1723 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
) >
1724 ((void *)sep
->shared_addr
+
1725 SYNCHRONIC_DMA_TABLES_AREA_OFFSET_BYTES
+
1726 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
)) {
1728 dev_warn(&sep
->pdev
->dev
, "dma table limit overrun\n");
1732 /* Update the number of the lli tables created */
1733 sep
->num_lli_tables_created
+= 2;
1735 lli_table_alloc_addr
+= sizeof(struct sep_lli_entry
) *
1736 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
;
1738 /* Calculate the maximum size of data for input table */
1739 in_table_data_size
=
1740 sep_calculate_lli_table_max_size(sep
,
1741 &lli_in_array
[current_in_entry
],
1742 (sep_in_lli_entries
- current_in_entry
),
1745 /* Calculate the maximum size of data for output table */
1746 out_table_data_size
=
1747 sep_calculate_lli_table_max_size(sep
,
1748 &lli_out_array
[current_out_entry
],
1749 (sep_out_lli_entries
- current_out_entry
),
1752 dev_dbg(&sep
->pdev
->dev
,
1753 "construct tables from lli in_table_data_size is %x\n",
1754 in_table_data_size
);
1756 dev_dbg(&sep
->pdev
->dev
,
1757 "construct tables from lli out_table_data_size is %x\n",
1758 out_table_data_size
);
1760 table_data_size
= in_table_data_size
;
1762 if (!last_table_flag
) {
1764 * If this is not the last table,
1765 * then must check where the data is smallest
1766 * and then align it to the block size
1768 if (table_data_size
> out_table_data_size
)
1769 table_data_size
= out_table_data_size
;
1772 * Now calculate the table size so that
1773 * it will be module block size
1775 table_data_size
= (table_data_size
/ block_size
) *
1779 /* Construct input lli table */
1780 sep_build_lli_table(sep
, &lli_in_array
[current_in_entry
],
1783 &num_entries_in_table
,
1786 /* Construct output lli table */
1787 sep_build_lli_table(sep
, &lli_out_array
[current_out_entry
],
1790 &num_entries_out_table
,
1793 /* If info entry is null - this is the first table built */
1794 if (info_in_entry_ptr
== NULL
) {
1795 /* Set the output parameters to physical addresses */
1797 sep_shared_area_virt_to_bus(sep
, in_lli_table_ptr
);
1799 *in_num_entries_ptr
= num_entries_in_table
;
1801 *lli_table_out_ptr
=
1802 sep_shared_area_virt_to_bus(sep
,
1805 *out_num_entries_ptr
= num_entries_out_table
;
1806 *table_data_size_ptr
= table_data_size
;
1808 dev_dbg(&sep
->pdev
->dev
,
1809 "output lli_table_in_ptr is %08lx\n",
1810 (unsigned long)*lli_table_in_ptr
);
1811 dev_dbg(&sep
->pdev
->dev
,
1812 "output lli_table_out_ptr is %08lx\n",
1813 (unsigned long)*lli_table_out_ptr
);
1815 /* Update the info entry of the previous in table */
1816 info_in_entry_ptr
->bus_address
=
1817 sep_shared_area_virt_to_bus(sep
,
1820 info_in_entry_ptr
->block_size
=
1821 ((num_entries_in_table
) << 24) |
1824 /* Update the info entry of the previous in table */
1825 info_out_entry_ptr
->bus_address
=
1826 sep_shared_area_virt_to_bus(sep
,
1829 info_out_entry_ptr
->block_size
=
1830 ((num_entries_out_table
) << 24) |
1833 dev_dbg(&sep
->pdev
->dev
,
1834 "output lli_table_in_ptr:%08lx %08x\n",
1835 (unsigned long)info_in_entry_ptr
->bus_address
,
1836 info_in_entry_ptr
->block_size
);
1838 dev_dbg(&sep
->pdev
->dev
,
1839 "output lli_table_out_ptr:%08lx %08x\n",
1840 (unsigned long)info_out_entry_ptr
->bus_address
,
1841 info_out_entry_ptr
->block_size
);
1844 /* Save the pointer to the info entry of the current tables */
1845 info_in_entry_ptr
= in_lli_table_ptr
+
1846 num_entries_in_table
- 1;
1847 info_out_entry_ptr
= out_lli_table_ptr
+
1848 num_entries_out_table
- 1;
1850 dev_dbg(&sep
->pdev
->dev
,
1851 "output num_entries_out_table is %x\n",
1852 (u32
)num_entries_out_table
);
1853 dev_dbg(&sep
->pdev
->dev
,
1854 "output info_in_entry_ptr is %lx\n",
1855 (unsigned long)info_in_entry_ptr
);
1856 dev_dbg(&sep
->pdev
->dev
,
1857 "output info_out_entry_ptr is %lx\n",
1858 (unsigned long)info_out_entry_ptr
);
1861 /* Print input tables */
1862 sep_debug_print_lli_tables(sep
,
1863 (struct sep_lli_entry
*)
1864 sep_shared_area_bus_to_virt(sep
, *lli_table_in_ptr
),
1865 *in_num_entries_ptr
,
1866 *table_data_size_ptr
);
1868 /* Print output tables */
1869 sep_debug_print_lli_tables(sep
,
1870 (struct sep_lli_entry
*)
1871 sep_shared_area_bus_to_virt(sep
, *lli_table_out_ptr
),
1872 *out_num_entries_ptr
,
1873 *table_data_size_ptr
);
1879 * sep_prepare_input_output_dma_table - prepare DMA I/O table
1880 * @app_virt_in_addr:
1881 * @app_virt_out_addr:
1884 * @lli_table_in_ptr:
1885 * @lli_table_out_ptr:
1886 * @in_num_entries_ptr:
1887 * @out_num_entries_ptr:
1888 * @table_data_size_ptr:
1889 * @is_kva: set for kernel data; used only for kernel crypto module
1891 * This function builds input and output DMA tables for synhronic
1892 * symmetric operations (AES, DES, HASH). It also checks that each table
1893 * is of the modular block size
1894 * Note that all bus addresses that are passed to the SEP
1895 * are in 32 bit format; the SEP is a 32 bit device
1897 static int sep_prepare_input_output_dma_table(struct sep_device
*sep
,
1898 unsigned long app_virt_in_addr
,
1899 unsigned long app_virt_out_addr
,
1902 dma_addr_t
*lli_table_in_ptr
,
1903 dma_addr_t
*lli_table_out_ptr
,
1904 u32
*in_num_entries_ptr
,
1905 u32
*out_num_entries_ptr
,
1906 u32
*table_data_size_ptr
,
1911 /* Array of pointers of page */
1912 struct sep_lli_entry
*lli_in_array
;
1913 /* Array of pointers of page */
1914 struct sep_lli_entry
*lli_out_array
;
1916 if (data_size
== 0) {
1917 /* Prepare empty table for input and output */
1918 sep_prepare_empty_lli_table(sep
, lli_table_in_ptr
,
1919 in_num_entries_ptr
, table_data_size_ptr
);
1921 sep_prepare_empty_lli_table(sep
, lli_table_out_ptr
,
1922 out_num_entries_ptr
, table_data_size_ptr
);
1924 goto update_dcb_counter
;
1927 /* Initialize the pages pointers */
1928 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
= NULL
;
1929 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_page_array
= NULL
;
1931 /* Lock the pages of the buffer and translate them to pages */
1932 if (is_kva
== true) {
1933 error
= sep_lock_kernel_pages(sep
, app_virt_in_addr
,
1934 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
);
1937 dev_warn(&sep
->pdev
->dev
,
1938 "lock kernel for in failed\n");
1942 error
= sep_lock_kernel_pages(sep
, app_virt_out_addr
,
1943 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
);
1946 dev_warn(&sep
->pdev
->dev
,
1947 "lock kernel for out failed\n");
1953 error
= sep_lock_user_pages(sep
, app_virt_in_addr
,
1954 data_size
, &lli_in_array
, SEP_DRIVER_IN_FLAG
);
1956 dev_warn(&sep
->pdev
->dev
,
1957 "sep_lock_user_pages for input virtual buffer failed\n");
1961 error
= sep_lock_user_pages(sep
, app_virt_out_addr
,
1962 data_size
, &lli_out_array
, SEP_DRIVER_OUT_FLAG
);
1965 dev_warn(&sep
->pdev
->dev
,
1966 "sep_lock_user_pages for output virtual buffer failed\n");
1967 goto end_function_free_lli_in
;
1971 dev_dbg(&sep
->pdev
->dev
, "prep input output dma table sep_in_num_pages is %x\n",
1972 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
);
1973 dev_dbg(&sep
->pdev
->dev
, "sep_out_num_pages is %x\n",
1974 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_num_pages
);
1975 dev_dbg(&sep
->pdev
->dev
, "SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP is %x\n",
1976 SEP_DRIVER_ENTRIES_PER_TABLE_IN_SEP
);
1978 /* Call the function that creates table from the lli arrays */
1979 error
= sep_construct_dma_tables_from_lli(sep
, lli_in_array
,
1980 sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_num_pages
,
1982 sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_num_pages
,
1983 block_size
, lli_table_in_ptr
, lli_table_out_ptr
,
1984 in_num_entries_ptr
, out_num_entries_ptr
, table_data_size_ptr
);
1987 dev_warn(&sep
->pdev
->dev
,
1988 "sep_construct_dma_tables_from_lli failed\n");
1989 goto end_function_with_error
;
1992 kfree(lli_out_array
);
1993 kfree(lli_in_array
);
1996 /* Update DCB counter */
1997 sep
->nr_dcb_creat
++;
2001 end_function_with_error
:
2002 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_map_array
);
2003 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].out_page_array
);
2004 kfree(lli_out_array
);
2007 end_function_free_lli_in
:
2008 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_map_array
);
2009 kfree(sep
->dma_res_arr
[sep
->nr_dcb_creat
].in_page_array
);
2010 kfree(lli_in_array
);
2019 * sep_prepare_input_output_dma_table_in_dcb - prepare control blocks
2020 * @app_in_address: unsigned long; for data buffer in (user space)
2021 * @app_out_address: unsigned long; for data buffer out (user space)
2022 * @data_in_size: u32; for size of data
2023 * @block_size: u32; for block size
2024 * @tail_block_size: u32; for size of tail block
2025 * @isapplet: bool; to indicate external app
2026 * @is_kva: bool; kernel buffer; only used for kernel crypto module
2028 * This function prepares the linked DMA tables and puts the
2029 * address for the linked list of tables inta a DCB (data control
2030 * block) the address of which is known by the SEP hardware
2031 * Note that all bus addresses that are passed to the SEP
2032 * are in 32 bit format; the SEP is a 32 bit device
2034 static int sep_prepare_input_output_dma_table_in_dcb(struct sep_device
*sep
,
2035 unsigned long app_in_address
,
2036 unsigned long app_out_address
,
2039 u32 tail_block_size
,
2046 /* Address of the created DCB table */
2047 struct sep_dcblock
*dcb_table_ptr
= NULL
;
2048 /* The physical address of the first input DMA table */
2049 dma_addr_t in_first_mlli_address
= 0;
2050 /* Number of entries in the first input DMA table */
2051 u32 in_first_num_entries
= 0;
2052 /* The physical address of the first output DMA table */
2053 dma_addr_t out_first_mlli_address
= 0;
2054 /* Number of entries in the first output DMA table */
2055 u32 out_first_num_entries
= 0;
2056 /* Data in the first input/output table */
2057 u32 first_data_size
= 0;
2059 if (sep
->nr_dcb_creat
== SEP_MAX_NUM_SYNC_DMA_OPS
) {
2060 /* No more DCBs to allocate */
2061 dev_warn(&sep
->pdev
->dev
, "no more DCBs available\n");
2066 /* Allocate new DCB */
2067 dcb_table_ptr
= (struct sep_dcblock
*)(sep
->shared_addr
+
2068 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
+
2069 (sep
->nr_dcb_creat
* sizeof(struct sep_dcblock
)));
2071 /* Set the default values in the DCB */
2072 dcb_table_ptr
->input_mlli_address
= 0;
2073 dcb_table_ptr
->input_mlli_num_entries
= 0;
2074 dcb_table_ptr
->input_mlli_data_size
= 0;
2075 dcb_table_ptr
->output_mlli_address
= 0;
2076 dcb_table_ptr
->output_mlli_num_entries
= 0;
2077 dcb_table_ptr
->output_mlli_data_size
= 0;
2078 dcb_table_ptr
->tail_data_size
= 0;
2079 dcb_table_ptr
->out_vr_tail_pt
= 0;
2081 if (isapplet
== true) {
2083 /* Check if there is enough data for DMA operation */
2084 if (data_in_size
< SEP_DRIVER_MIN_DATA_SIZE_PER_TABLE
) {
2085 if (is_kva
== true) {
2086 memcpy(dcb_table_ptr
->tail_data
,
2087 (void *)app_in_address
, data_in_size
);
2089 if (copy_from_user(dcb_table_ptr
->tail_data
,
2090 (void __user
*)app_in_address
,
2097 dcb_table_ptr
->tail_data_size
= data_in_size
;
2099 /* Set the output user-space address for mem2mem op */
2100 if (app_out_address
)
2101 dcb_table_ptr
->out_vr_tail_pt
=
2102 (aligned_u64
)app_out_address
;
2105 * Update both data length parameters in order to avoid
2106 * second data copy and allow building of empty mlli
2113 if (!app_out_address
) {
2114 tail_size
= data_in_size
% block_size
;
2116 if (tail_block_size
== block_size
)
2117 tail_size
= block_size
;
2124 if (is_kva
== true) {
2125 memcpy(dcb_table_ptr
->tail_data
,
2126 (void *)(app_in_address
+ data_in_size
-
2127 tail_size
), tail_size
);
2129 /* We have tail data - copy it to DCB */
2130 if (copy_from_user(dcb_table_ptr
->tail_data
,
2131 (void *)(app_in_address
+
2132 data_in_size
- tail_size
), tail_size
)) {
2137 if (app_out_address
)
2139 * Calculate the output address
2140 * according to tail data size
2142 dcb_table_ptr
->out_vr_tail_pt
=
2143 (aligned_u64
)app_out_address
+ data_in_size
2146 /* Save the real tail data size */
2147 dcb_table_ptr
->tail_data_size
= tail_size
;
2149 * Update the data size without the tail
2150 * data size AKA data for the dma
2152 data_in_size
= (data_in_size
- tail_size
);
2155 /* Check if we need to build only input table or input/output */
2156 if (app_out_address
) {
2157 /* Prepare input/output tables */
2158 error
= sep_prepare_input_output_dma_table(sep
,
2163 &in_first_mlli_address
,
2164 &out_first_mlli_address
,
2165 &in_first_num_entries
,
2166 &out_first_num_entries
,
2170 /* Prepare input tables */
2171 error
= sep_prepare_input_dma_table(sep
,
2175 &in_first_mlli_address
,
2176 &in_first_num_entries
,
2182 dev_warn(&sep
->pdev
->dev
, "prepare DMA table call failed from prepare DCB call\n");
2186 /* Set the DCB values */
2187 dcb_table_ptr
->input_mlli_address
= in_first_mlli_address
;
2188 dcb_table_ptr
->input_mlli_num_entries
= in_first_num_entries
;
2189 dcb_table_ptr
->input_mlli_data_size
= first_data_size
;
2190 dcb_table_ptr
->output_mlli_address
= out_first_mlli_address
;
2191 dcb_table_ptr
->output_mlli_num_entries
= out_first_num_entries
;
2192 dcb_table_ptr
->output_mlli_data_size
= first_data_size
;
2200 * sep_free_dma_tables_and_dcb - free DMA tables and DCBs
2201 * @sep: pointer to struct sep_device
2202 * @isapplet: indicates external application (used for kernel access)
2203 * @is_kva: indicates kernel addresses (only used for kernel crypto)
2205 * This function frees the DMA tables and DCB
2207 static int sep_free_dma_tables_and_dcb(struct sep_device
*sep
, bool isapplet
,
2213 struct sep_dcblock
*dcb_table_ptr
;
2214 unsigned long pt_hold
;
2217 if (isapplet
== true) {
2218 /* Set pointer to first DCB table */
2219 dcb_table_ptr
= (struct sep_dcblock
*)
2221 SEP_DRIVER_SYSTEM_DCB_MEMORY_OFFSET_IN_BYTES
);
2223 /* Go over each DCB and see if tail pointer must be updated */
2224 for (i
= 0; i
< sep
->nr_dcb_creat
; i
++, dcb_table_ptr
++) {
2225 if (dcb_table_ptr
->out_vr_tail_pt
) {
2226 pt_hold
= (unsigned long)dcb_table_ptr
->out_vr_tail_pt
;
2227 tail_pt
= (void *)pt_hold
;
2228 if (is_kva
== true) {
2230 dcb_table_ptr
->tail_data
,
2231 dcb_table_ptr
->tail_data_size
);
2233 error_temp
= copy_to_user(
2235 dcb_table_ptr
->tail_data
,
2236 dcb_table_ptr
->tail_data_size
);
2239 /* Release the DMA resource */
2246 /* Free the output pages, if any */
2247 sep_free_dma_table_data_handler(sep
);
2253 * sep_get_static_pool_addr_handler - get static pool address
2254 * @sep: pointer to struct sep_device
2256 * This function sets the bus and virtual addresses of the static pool
2258 static int sep_get_static_pool_addr_handler(struct sep_device
*sep
)
2260 u32
*static_pool_addr
= NULL
;
2262 static_pool_addr
= (u32
*)(sep
->shared_addr
+
2263 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES
);
2265 static_pool_addr
[0] = SEP_STATIC_POOL_VAL_TOKEN
;
2266 static_pool_addr
[1] = (u32
)sep
->shared_bus
+
2267 SEP_DRIVER_STATIC_AREA_OFFSET_IN_BYTES
;
2269 dev_dbg(&sep
->pdev
->dev
, "static pool segment: physical %x\n",
2270 (u32
)static_pool_addr
[1]);
2276 * sep_end_transaction_handler - end transaction
2277 * @sep: pointer to struct sep_device
2279 * This API handles the end transaction request
2281 static int sep_end_transaction_handler(struct sep_device
*sep
)
2283 /* Clear the data pool pointers Token */
2284 memset((void *)(sep
->shared_addr
+
2285 SEP_DRIVER_DATA_POOL_ALLOCATION_OFFSET_IN_BYTES
),
2286 0, sep
->num_of_data_allocations
*2*sizeof(u32
));
2288 /* Check that all the DMA resources were freed */
2289 sep_free_dma_table_data_handler(sep
);
2291 clear_bit(SEP_MMAP_LOCK_BIT
, &sep
->in_use_flags
);
2294 * We are now through with the transaction. Let's
2295 * allow other processes who have the device open
2296 * to perform transactions
2298 mutex_lock(&sep
->sep_mutex
);
2299 sep
->pid_doing_transaction
= 0;
2300 mutex_unlock(&sep
->sep_mutex
);
2301 /* Raise event for stuck contextes */
2302 wake_up(&sep
->event
);
2308 * sep_prepare_dcb_handler - prepare a control block
2309 * @sep: pointer to struct sep_device
2310 * @arg: pointer to user parameters
2312 * This function will retrieve the RAR buffer physical addresses, type
2313 * & size corresponding to the RAR handles provided in the buffers vector.
2315 static int sep_prepare_dcb_handler(struct sep_device
*sep
, unsigned long arg
)
2318 /* Command arguments */
2319 struct build_dcb_struct command_args
;
2321 /* Get the command arguments */
2322 if (copy_from_user(&command_args
, (void __user
*)arg
,
2323 sizeof(struct build_dcb_struct
))) {
2328 dev_dbg(&sep
->pdev
->dev
, "prep dcb handler app_in_address is %08llx\n",
2329 command_args
.app_in_address
);
2330 dev_dbg(&sep
->pdev
->dev
, "app_out_address is %08llx\n",
2331 command_args
.app_out_address
);
2332 dev_dbg(&sep
->pdev
->dev
, "data_size is %x\n",
2333 command_args
.data_in_size
);
2334 dev_dbg(&sep
->pdev
->dev
, "block_size is %x\n",
2335 command_args
.block_size
);
2336 dev_dbg(&sep
->pdev
->dev
, "tail block_size is %x\n",
2337 command_args
.tail_block_size
);
2339 error
= sep_prepare_input_output_dma_table_in_dcb(sep
,
2340 (unsigned long)command_args
.app_in_address
,
2341 (unsigned long)command_args
.app_out_address
,
2342 command_args
.data_in_size
, command_args
.block_size
,
2343 command_args
.tail_block_size
, true, false);
2351 * sep_free_dcb_handler - free control block resources
2352 * @sep: pointer to struct sep_device
2354 * This function frees the DCB resources and updates the needed
2355 * user-space buffers.
2357 static int sep_free_dcb_handler(struct sep_device
*sep
)
2359 return sep_free_dma_tables_and_dcb(sep
, false, false);
2363 * sep_rar_prepare_output_msg_handler - prepare an output message
2364 * @sep: pointer to struct sep_device
2365 * @arg: pointer to user parameters
2367 * This function will retrieve the RAR buffer physical addresses, type
2368 * & size corresponding to the RAR handles provided in the buffers vector.
2370 static int sep_rar_prepare_output_msg_handler(struct sep_device
*sep
,
2375 struct rar_hndl_to_bus_struct command_args
;
2377 dma_addr_t rar_bus
= 0;
2378 /* Holds the RAR address in the system memory offset */
2382 if (copy_from_user(&command_args
, (void __user
*)arg
,
2383 sizeof(command_args
))) {
2388 /* Call to translation function only if user handle is not NULL */
2389 if (command_args
.rar_handle
)
2391 dev_dbg(&sep
->pdev
->dev
, "rar msg; rar_addr_bus = %x\n", (u32
)rar_bus
);
2393 /* Set value in the SYSTEM MEMORY offset */
2394 rar_addr
= (u32
*)(sep
->shared_addr
+
2395 SEP_DRIVER_SYSTEM_RAR_MEMORY_OFFSET_IN_BYTES
);
2397 /* Copy the physical address to the System Area for the SEP */
2398 rar_addr
[0] = SEP_RAR_VAL_TOKEN
;
2399 rar_addr
[1] = rar_bus
;
2406 * sep_ioctl - ioctl api
2407 * @filp: pointer to struct file
2409 * @arg: pointer to argument structure
2411 * Implement the ioctl methods available on the SEP device.
2413 static long sep_ioctl(struct file
*filp
, unsigned int cmd
, unsigned long arg
)
2416 struct sep_device
*sep
= filp
->private_data
;
2418 /* Make sure we own this device */
2419 mutex_lock(&sep
->sep_mutex
);
2420 if ((current
->pid
!= sep
->pid_doing_transaction
) &&
2421 (sep
->pid_doing_transaction
!= 0)) {
2422 dev_dbg(&sep
->pdev
->dev
, "ioctl pid is not owner\n");
2427 mutex_unlock(&sep
->sep_mutex
);
2429 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
)
2432 /* Lock to prevent the daemon to interfere with operation */
2433 mutex_lock(&sep
->ioctl_mutex
);
2436 case SEP_IOCSENDSEPCOMMAND
:
2437 /* Send command to SEP */
2438 error
= sep_send_command_handler(sep
);
2440 case SEP_IOCALLOCDATAPOLL
:
2441 /* Allocate data pool */
2442 error
= sep_allocate_data_pool_memory_handler(sep
, arg
);
2444 case SEP_IOCGETSTATICPOOLADDR
:
2445 /* Inform the SEP the bus address of the static pool */
2446 error
= sep_get_static_pool_addr_handler(sep
);
2448 case SEP_IOCENDTRANSACTION
:
2449 error
= sep_end_transaction_handler(sep
);
2451 case SEP_IOCRARPREPAREMESSAGE
:
2452 error
= sep_rar_prepare_output_msg_handler(sep
, arg
);
2454 case SEP_IOCPREPAREDCB
:
2455 error
= sep_prepare_dcb_handler(sep
, arg
);
2457 case SEP_IOCFREEDCB
:
2458 error
= sep_free_dcb_handler(sep
);
2466 mutex_unlock(&sep
->ioctl_mutex
);
2471 * sep_singleton_ioctl - ioctl api for singleton interface
2472 * @filp: pointer to struct file
2474 * @arg: pointer to argument structure
2476 * Implement the additional ioctls for the singleton device
2478 static long sep_singleton_ioctl(struct file
*filp
, u32 cmd
, unsigned long arg
)
2481 struct sep_device
*sep
= filp
->private_data
;
2483 /* Check that the command is for the SEP device */
2484 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
)
2487 /* Make sure we own this device */
2488 mutex_lock(&sep
->sep_mutex
);
2489 if ((current
->pid
!= sep
->pid_doing_transaction
) &&
2490 (sep
->pid_doing_transaction
!= 0)) {
2491 dev_dbg(&sep
->pdev
->dev
, "singleton ioctl pid is not owner\n");
2492 mutex_unlock(&sep
->sep_mutex
);
2496 mutex_unlock(&sep
->sep_mutex
);
2499 case SEP_IOCTLSETCALLERID
:
2500 mutex_lock(&sep
->ioctl_mutex
);
2501 error
= sep_set_caller_id_handler(sep
, arg
);
2502 mutex_unlock(&sep
->ioctl_mutex
);
2505 error
= sep_ioctl(filp
, cmd
, arg
);
2512 * sep_request_daemon_ioctl - ioctl for daemon
2513 * @filp: pointer to struct file
2515 * @arg: pointer to argument structure
2517 * Called by the request daemon to perform ioctls on the daemon device
2519 static long sep_request_daemon_ioctl(struct file
*filp
, u32 cmd
,
2524 struct sep_device
*sep
= filp
->private_data
;
2526 /* Check that the command is for SEP device */
2527 if (_IOC_TYPE(cmd
) != SEP_IOC_MAGIC_NUMBER
)
2530 /* Only one process can access ioctl at any given time */
2531 mutex_lock(&sep
->ioctl_mutex
);
2534 case SEP_IOCSENDSEPRPLYCOMMAND
:
2535 /* Send reply command to SEP */
2536 error
= sep_req_daemon_send_reply_command_handler(sep
);
2538 case SEP_IOCENDTRANSACTION
:
2540 * End req daemon transaction, do nothing
2541 * will be removed upon update in middleware
2549 mutex_unlock(&sep
->ioctl_mutex
);
2554 * sep_inthandler - interrupt handler
2556 * @dev_id: device id
2558 static irqreturn_t
sep_inthandler(int irq
, void *dev_id
)
2560 irqreturn_t int_error
= IRQ_HANDLED
;
2561 unsigned long lck_flags
;
2562 u32 reg_val
, reg_val2
= 0;
2563 struct sep_device
*sep
= dev_id
;
2565 /* Read the IRR register to check if this is SEP interrupt */
2566 reg_val
= sep_read_reg(sep
, HW_HOST_IRR_REG_ADDR
);
2568 if (reg_val
& (0x1 << 13)) {
2569 /* Lock and update the counter of reply messages */
2570 spin_lock_irqsave(&sep
->snd_rply_lck
, lck_flags
);
2572 spin_unlock_irqrestore(&sep
->snd_rply_lck
, lck_flags
);
2574 dev_dbg(&sep
->pdev
->dev
, "sep int: send_ct %lx reply_ct %lx\n",
2575 sep
->send_ct
, sep
->reply_ct
);
2577 /* Is this printf or daemon request? */
2578 reg_val2
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
2579 dev_dbg(&sep
->pdev
->dev
,
2580 "SEP Interrupt - reg2 is %08x\n", reg_val2
);
2582 if ((reg_val2
>> 30) & 0x1) {
2583 dev_dbg(&sep
->pdev
->dev
, "int: printf request\n");
2584 wake_up(&sep
->event_request_daemon
);
2585 } else if (reg_val2
>> 31) {
2586 dev_dbg(&sep
->pdev
->dev
, "int: daemon request\n");
2587 wake_up(&sep
->event_request_daemon
);
2589 dev_dbg(&sep
->pdev
->dev
, "int: SEP reply\n");
2590 wake_up(&sep
->event
);
2593 dev_dbg(&sep
->pdev
->dev
, "int: not SEP interrupt\n");
2594 int_error
= IRQ_NONE
;
2596 if (int_error
== IRQ_HANDLED
)
2597 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, reg_val
);
2603 * sep_reconfig_shared_area - reconfigure shared area
2604 * @sep: pointer to struct sep_device
2606 * Reconfig the shared area between HOST and SEP - needed in case
2607 * the DX_CC_Init function was called before OS loading.
2609 static int sep_reconfig_shared_area(struct sep_device
*sep
)
2613 /* use to limit waiting for SEP */
2614 unsigned long end_time
;
2616 /* Send the new SHARED MESSAGE AREA to the SEP */
2617 dev_dbg(&sep
->pdev
->dev
, "reconfig shared; sending %08llx to sep\n",
2618 (unsigned long long)sep
->shared_bus
);
2620 sep_write_reg(sep
, HW_HOST_HOST_SEP_GPR1_REG_ADDR
, sep
->shared_bus
);
2622 /* Poll for SEP response */
2623 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
2625 end_time
= jiffies
+ (WAIT_TIME
* HZ
);
2627 while ((time_before(jiffies
, end_time
)) && (ret_val
!= 0xffffffff) &&
2628 (ret_val
!= sep
->shared_bus
))
2629 ret_val
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR1_REG_ADDR
);
2631 /* Check the return value (register) */
2632 if (ret_val
!= sep
->shared_bus
) {
2633 dev_warn(&sep
->pdev
->dev
, "could not reconfig shared area\n");
2634 dev_warn(&sep
->pdev
->dev
, "result was %x\n", ret_val
);
2639 dev_dbg(&sep
->pdev
->dev
, "reconfig shared area end\n");
2643 /* File operation for singleton SEP operations */
2644 static const struct file_operations singleton_file_operations
= {
2645 .owner
= THIS_MODULE
,
2646 .unlocked_ioctl
= sep_singleton_ioctl
,
2648 .open
= sep_singleton_open
,
2649 .release
= sep_singleton_release
,
2653 /* File operation for daemon operations */
2654 static const struct file_operations daemon_file_operations
= {
2655 .owner
= THIS_MODULE
,
2656 .unlocked_ioctl
= sep_request_daemon_ioctl
,
2657 .poll
= sep_request_daemon_poll
,
2658 .open
= sep_request_daemon_open
,
2659 .release
= sep_request_daemon_release
,
2660 .mmap
= sep_request_daemon_mmap
,
2663 /* The files operations structure of the driver */
2664 static const struct file_operations sep_file_operations
= {
2665 .owner
= THIS_MODULE
,
2666 .unlocked_ioctl
= sep_ioctl
,
2669 .release
= sep_release
,
2674 * sep_register_driver_with_fs - register misc devices
2675 * @sep: pointer to struct sep_device
2677 * This function registers the driver with the file system
2679 static int sep_register_driver_with_fs(struct sep_device
*sep
)
2683 sep
->miscdev_sep
.minor
= MISC_DYNAMIC_MINOR
;
2684 sep
->miscdev_sep
.name
= SEP_DEV_NAME
;
2685 sep
->miscdev_sep
.fops
= &sep_file_operations
;
2687 sep
->miscdev_singleton
.minor
= MISC_DYNAMIC_MINOR
;
2688 sep
->miscdev_singleton
.name
= SEP_DEV_SINGLETON
;
2689 sep
->miscdev_singleton
.fops
= &singleton_file_operations
;
2691 sep
->miscdev_daemon
.minor
= MISC_DYNAMIC_MINOR
;
2692 sep
->miscdev_daemon
.name
= SEP_DEV_DAEMON
;
2693 sep
->miscdev_daemon
.fops
= &daemon_file_operations
;
2695 ret_val
= misc_register(&sep
->miscdev_sep
);
2697 dev_warn(&sep
->pdev
->dev
, "misc reg fails for SEP %x\n",
2702 ret_val
= misc_register(&sep
->miscdev_singleton
);
2704 dev_warn(&sep
->pdev
->dev
, "misc reg fails for sing %x\n",
2706 misc_deregister(&sep
->miscdev_sep
);
2710 ret_val
= misc_register(&sep
->miscdev_daemon
);
2712 dev_warn(&sep
->pdev
->dev
, "misc reg fails for dmn %x\n",
2714 misc_deregister(&sep
->miscdev_sep
);
2715 misc_deregister(&sep
->miscdev_singleton
);
2724 * sep_probe - probe a matching PCI device
2726 * @end: pci_device_id
2728 * Attempt to set up and configure a SEP device that has been
2729 * discovered by the PCI layer.
2731 static int __devinit
sep_probe(struct pci_dev
*pdev
,
2732 const struct pci_device_id
*ent
)
2735 struct sep_device
*sep
;
2737 if (sep_dev
!= NULL
) {
2738 dev_warn(&pdev
->dev
, "only one SEP supported.\n");
2742 /* Enable the device */
2743 error
= pci_enable_device(pdev
);
2745 dev_warn(&pdev
->dev
, "error enabling pci device\n");
2749 /* Allocate the sep_device structure for this device */
2750 sep_dev
= kzalloc(sizeof(struct sep_device
), GFP_ATOMIC
);
2751 if (sep_dev
== NULL
) {
2752 dev_warn(&pdev
->dev
,
2753 "can't kmalloc the sep_device structure\n");
2755 goto end_function_disable_device
;
2759 * We're going to use another variable for actually
2760 * working with the device; this way, if we have
2761 * multiple devices in the future, it would be easier
2762 * to make appropriate changes
2766 sep
->pdev
= pci_dev_get(pdev
);
2768 init_waitqueue_head(&sep
->event
);
2769 init_waitqueue_head(&sep
->event_request_daemon
);
2770 spin_lock_init(&sep
->snd_rply_lck
);
2771 mutex_init(&sep
->sep_mutex
);
2772 mutex_init(&sep
->ioctl_mutex
);
2774 dev_dbg(&sep
->pdev
->dev
, "sep probe: PCI obtained, device being prepared\n");
2775 dev_dbg(&sep
->pdev
->dev
, "revision is %d\n", sep
->pdev
->revision
);
2777 /* Set up our register area */
2778 sep
->reg_physical_addr
= pci_resource_start(sep
->pdev
, 0);
2779 if (!sep
->reg_physical_addr
) {
2780 dev_warn(&sep
->pdev
->dev
, "Error getting register start\n");
2782 goto end_function_free_sep_dev
;
2785 sep
->reg_physical_end
= pci_resource_end(sep
->pdev
, 0);
2786 if (!sep
->reg_physical_end
) {
2787 dev_warn(&sep
->pdev
->dev
, "Error getting register end\n");
2789 goto end_function_free_sep_dev
;
2792 sep
->reg_addr
= ioremap_nocache(sep
->reg_physical_addr
,
2793 (size_t)(sep
->reg_physical_end
- sep
->reg_physical_addr
+ 1));
2794 if (!sep
->reg_addr
) {
2795 dev_warn(&sep
->pdev
->dev
, "Error getting register virtual\n");
2797 goto end_function_free_sep_dev
;
2800 dev_dbg(&sep
->pdev
->dev
,
2801 "Register area start %llx end %llx virtual %p\n",
2802 (unsigned long long)sep
->reg_physical_addr
,
2803 (unsigned long long)sep
->reg_physical_end
,
2806 /* Allocate the shared area */
2807 sep
->shared_size
= SEP_DRIVER_MESSAGE_SHARED_AREA_SIZE_IN_BYTES
+
2808 SYNCHRONIC_DMA_TABLES_AREA_SIZE_BYTES
+
2809 SEP_DRIVER_DATA_POOL_SHARED_AREA_SIZE_IN_BYTES
+
2810 SEP_DRIVER_STATIC_AREA_SIZE_IN_BYTES
+
2811 SEP_DRIVER_SYSTEM_DATA_MEMORY_SIZE_IN_BYTES
;
2813 if (sep_map_and_alloc_shared_area(sep
)) {
2815 /* Allocation failed */
2816 goto end_function_error
;
2819 /* Clear ICR register */
2820 sep_write_reg(sep
, HW_HOST_ICR_REG_ADDR
, 0xFFFFFFFF);
2822 /* Set the IMR register - open only GPR 2 */
2823 sep_write_reg(sep
, HW_HOST_IMR_REG_ADDR
, (~(0x1 << 13)));
2825 /* Read send/receive counters from SEP */
2826 sep
->reply_ct
= sep_read_reg(sep
, HW_HOST_SEP_HOST_GPR2_REG_ADDR
);
2827 sep
->reply_ct
&= 0x3FFFFFFF;
2828 sep
->send_ct
= sep
->reply_ct
;
2830 /* Get the interrupt line */
2831 error
= request_irq(pdev
->irq
, sep_inthandler
, IRQF_SHARED
,
2835 goto end_function_deallocate_sep_shared_area
;
2837 /* The new chip requires a shared area reconfigure */
2838 if (sep
->pdev
->revision
== 4) { /* Only for new chip */
2839 error
= sep_reconfig_shared_area(sep
);
2841 goto end_function_free_irq
;
2843 /* Finally magic up the device nodes */
2844 /* Register driver with the fs */
2845 error
= sep_register_driver_with_fs(sep
);
2850 end_function_free_irq
:
2851 free_irq(pdev
->irq
, sep
);
2853 end_function_deallocate_sep_shared_area
:
2854 /* De-allocate shared area */
2855 sep_unmap_and_free_shared_area(sep
);
2858 iounmap(sep
->reg_addr
);
2860 end_function_free_sep_dev
:
2861 pci_dev_put(sep_dev
->pdev
);
2865 end_function_disable_device
:
2866 pci_disable_device(pdev
);
2872 static void sep_remove(struct pci_dev
*pdev
)
2874 struct sep_device
*sep
= sep_dev
;
2876 /* Unregister from fs */
2877 misc_deregister(&sep
->miscdev_sep
);
2878 misc_deregister(&sep
->miscdev_singleton
);
2879 misc_deregister(&sep
->miscdev_daemon
);
2882 free_irq(sep
->pdev
->irq
, sep
);
2884 /* Free the shared area */
2885 sep_unmap_and_free_shared_area(sep_dev
);
2886 iounmap((void *) sep_dev
->reg_addr
);
2889 static DEFINE_PCI_DEVICE_TABLE(sep_pci_id_tbl
) = {
2890 {PCI_DEVICE(PCI_VENDOR_ID_INTEL
, MFLD_PCI_DEVICE_ID
)},
2894 MODULE_DEVICE_TABLE(pci
, sep_pci_id_tbl
);
2896 /* Field for registering driver to PCI device */
2897 static struct pci_driver sep_pci_driver
= {
2898 .name
= "sep_sec_driver",
2899 .id_table
= sep_pci_id_tbl
,
2901 .remove
= sep_remove
2906 * sep_init - init function
2908 * Module load time. Register the PCI device driver.
2910 static int __init
sep_init(void)
2912 return pci_register_driver(&sep_pci_driver
);
2917 * sep_exit - called to unload driver
2919 * Drop the misc devices then remove and unmap the various resources
2920 * that are not released by the driver remove method.
2922 static void __exit
sep_exit(void)
2924 pci_unregister_driver(&sep_pci_driver
);
2928 module_init(sep_init
);
2929 module_exit(sep_exit
);
2931 MODULE_LICENSE("GPL");