2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2007 Adaptec, Inc. (aacraid@adaptec.com)
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License as published by
12 * the Free Software Foundation; either version 2, or (at your option)
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
20 * You should have received a copy of the GNU General Public License
21 * along with this program; see the file COPYING. If not, write to
22 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
27 * Abstract: Contains all routines for control of the AFA comm layer
31 #include <linux/kernel.h>
32 #include <linux/init.h>
33 #include <linux/types.h>
34 #include <linux/pci.h>
35 #include <linux/spinlock.h>
36 #include <linux/slab.h>
37 #include <linux/completion.h>
38 #include <linux/dma-mapping.h>
39 #include <linux/blkdev.h>
40 #include <linux/delay.h> /* ssleep prototype */
41 #include <linux/kthread.h>
42 #include <linux/semaphore.h>
43 #include <asm/uaccess.h>
44 #include <scsi/scsi_host.h>
49 * ioctl_send_fib - send a FIB from userspace
50 * @dev: adapter is being processed
51 * @arg: arguments to the ioctl call
53 * This routine sends a fib to the adapter on behalf of a user level
56 # define AAC_DEBUG_PREAMBLE KERN_INFO
57 # define AAC_DEBUG_POSTAMBLE
59 static int ioctl_send_fib(struct aac_dev
* dev
, void __user
*arg
)
63 struct hw_fib
* hw_fib
= (struct hw_fib
*)0;
64 dma_addr_t hw_fib_pa
= (dma_addr_t
)0LL;
71 fibptr
= aac_fib_alloc(dev
);
76 kfib
= fibptr
->hw_fib_va
;
78 * First copy in the header so that we can check the size field.
80 if (copy_from_user((void *)kfib
, arg
, sizeof(struct aac_fibhdr
))) {
85 * Since we copy based on the fib header size, make sure that we
86 * will not overrun the buffer when we copy the memory. Return
87 * an error if we would.
89 size
= le16_to_cpu(kfib
->header
.Size
) + sizeof(struct aac_fibhdr
);
90 if (size
< le16_to_cpu(kfib
->header
.SenderSize
))
91 size
= le16_to_cpu(kfib
->header
.SenderSize
);
92 if (size
> dev
->max_fib_size
) {
100 kfib
= pci_alloc_consistent(dev
->pdev
, size
, &daddr
);
106 /* Highjack the hw_fib */
107 hw_fib
= fibptr
->hw_fib_va
;
108 hw_fib_pa
= fibptr
->hw_fib_pa
;
109 fibptr
->hw_fib_va
= kfib
;
110 fibptr
->hw_fib_pa
= daddr
;
111 memset(((char *)kfib
) + dev
->max_fib_size
, 0, size
- dev
->max_fib_size
);
112 memcpy(kfib
, hw_fib
, dev
->max_fib_size
);
115 if (copy_from_user(kfib
, arg
, size
)) {
120 if (kfib
->header
.Command
== cpu_to_le16(TakeABreakPt
)) {
121 aac_adapter_interrupt(dev
);
123 * Since we didn't really send a fib, zero out the state to allow
124 * cleanup code not to assert.
126 kfib
->header
.XferState
= 0;
128 retval
= aac_fib_send(le16_to_cpu(kfib
->header
.Command
), fibptr
,
129 le16_to_cpu(kfib
->header
.Size
) , FsaNormal
,
134 if (aac_fib_complete(fibptr
) != 0) {
140 * Make sure that the size returned by the adapter (which includes
141 * the header) is less than or equal to the size of a fib, so we
142 * don't corrupt application data. Then copy that size to the user
143 * buffer. (Don't try to add the header information again, since it
144 * was already included by the adapter.)
148 if (copy_to_user(arg
, (void *)kfib
, size
))
152 pci_free_consistent(dev
->pdev
, size
, kfib
, fibptr
->hw_fib_pa
);
153 fibptr
->hw_fib_pa
= hw_fib_pa
;
154 fibptr
->hw_fib_va
= hw_fib
;
156 if (retval
!= -ERESTARTSYS
)
157 aac_fib_free(fibptr
);
162 * open_getadapter_fib - Get the next fib
164 * This routine will get the next Fib, if available, from the AdapterFibContext
165 * passed in from the user.
168 static int open_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
170 struct aac_fib_context
* fibctx
;
173 fibctx
= kmalloc(sizeof(struct aac_fib_context
), GFP_KERNEL
);
174 if (fibctx
== NULL
) {
178 struct list_head
* entry
;
179 struct aac_fib_context
* context
;
181 fibctx
->type
= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
;
182 fibctx
->size
= sizeof(struct aac_fib_context
);
184 * Yes yes, I know this could be an index, but we have a
185 * better guarantee of uniqueness for the locked loop below.
186 * Without the aid of a persistent history, this also helps
187 * reduce the chance that the opaque context would be reused.
189 fibctx
->unique
= (u32
)((ulong
)fibctx
& 0xFFFFFFFF);
191 * Initialize the mutex used to wait for the next AIF.
193 init_MUTEX_LOCKED(&fibctx
->wait_sem
);
196 * Initialize the fibs and set the count of fibs on
200 INIT_LIST_HEAD(&fibctx
->fib_list
);
201 fibctx
->jiffies
= jiffies
/HZ
;
203 * Now add this context onto the adapter's
204 * AdapterFibContext list.
206 spin_lock_irqsave(&dev
->fib_lock
, flags
);
207 /* Ensure that we have a unique identifier */
208 entry
= dev
->fib_list
.next
;
209 while (entry
!= &dev
->fib_list
) {
210 context
= list_entry(entry
, struct aac_fib_context
, next
);
211 if (context
->unique
== fibctx
->unique
) {
212 /* Not unique (32 bits) */
214 entry
= dev
->fib_list
.next
;
219 list_add_tail(&fibctx
->next
, &dev
->fib_list
);
220 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
221 if (copy_to_user(arg
, &fibctx
->unique
,
222 sizeof(fibctx
->unique
))) {
232 * next_getadapter_fib - get the next fib
233 * @dev: adapter to use
234 * @arg: ioctl argument
236 * This routine will get the next Fib, if available, from the AdapterFibContext
237 * passed in from the user.
240 static int next_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
244 struct aac_fib_context
*fibctx
;
246 struct list_head
* entry
;
249 if(copy_from_user((void *)&f
, arg
, sizeof(struct fib_ioctl
)))
252 * Verify that the HANDLE passed in was a valid AdapterFibContext
254 * Search the list of AdapterFibContext addresses on the adapter
255 * to be sure this is a valid address
257 spin_lock_irqsave(&dev
->fib_lock
, flags
);
258 entry
= dev
->fib_list
.next
;
261 while (entry
!= &dev
->fib_list
) {
262 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
264 * Extract the AdapterFibContext from the Input parameters.
266 if (fibctx
->unique
== f
.fibctx
) { /* We found a winner */
273 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
274 dprintk ((KERN_INFO
"Fib Context not found\n"));
278 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
279 (fibctx
->size
!= sizeof(struct aac_fib_context
))) {
280 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
281 dprintk ((KERN_INFO
"Fib Context corrupt?\n"));
286 * If there are no fibs to send back, then either wait or return
290 if (!list_empty(&fibctx
->fib_list
)) {
292 * Pull the next fib from the fibs
294 entry
= fibctx
->fib_list
.next
;
297 fib
= list_entry(entry
, struct fib
, fiblink
);
299 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
300 if (copy_to_user(f
.fib
, fib
->hw_fib_va
, sizeof(struct hw_fib
))) {
301 kfree(fib
->hw_fib_va
);
306 * Free the space occupied by this copy of the fib.
308 kfree(fib
->hw_fib_va
);
312 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
313 /* If someone killed the AIF aacraid thread, restart it */
314 status
= !dev
->aif_thread
;
315 if (status
&& !dev
->in_reset
&& dev
->queues
&& dev
->fsa_dev
) {
316 /* Be paranoid, be very paranoid! */
317 kthread_stop(dev
->thread
);
320 dev
->thread
= kthread_run(aac_command_thread
, dev
, dev
->name
);
324 if(down_interruptible(&fibctx
->wait_sem
) < 0) {
325 status
= -ERESTARTSYS
;
327 /* Lock again and retry */
328 spin_lock_irqsave(&dev
->fib_lock
, flags
);
335 fibctx
->jiffies
= jiffies
/HZ
;
339 int aac_close_fib_context(struct aac_dev
* dev
, struct aac_fib_context
* fibctx
)
344 * First free any FIBs that have not been consumed.
346 while (!list_empty(&fibctx
->fib_list
)) {
347 struct list_head
* entry
;
349 * Pull the next fib from the fibs
351 entry
= fibctx
->fib_list
.next
;
353 fib
= list_entry(entry
, struct fib
, fiblink
);
356 * Free the space occupied by this copy of the fib.
358 kfree(fib
->hw_fib_va
);
362 * Remove the Context from the AdapterFibContext List
364 list_del(&fibctx
->next
);
370 * Free the space occupied by the Context
377 * close_getadapter_fib - close down user fib context
379 * @arg: ioctl arguments
381 * This routine will close down the fibctx passed in from the user.
384 static int close_getadapter_fib(struct aac_dev
* dev
, void __user
*arg
)
386 struct aac_fib_context
*fibctx
;
389 struct list_head
* entry
;
392 * Verify that the HANDLE passed in was a valid AdapterFibContext
394 * Search the list of AdapterFibContext addresses on the adapter
395 * to be sure this is a valid address
398 entry
= dev
->fib_list
.next
;
401 while(entry
!= &dev
->fib_list
) {
402 fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
404 * Extract the fibctx from the input parameters
406 if (fibctx
->unique
== (u32
)(uintptr_t)arg
) /* We found a winner */
413 return 0; /* Already gone */
415 if((fibctx
->type
!= FSAFS_NTC_GET_ADAPTER_FIB_CONTEXT
) ||
416 (fibctx
->size
!= sizeof(struct aac_fib_context
)))
418 spin_lock_irqsave(&dev
->fib_lock
, flags
);
419 status
= aac_close_fib_context(dev
, fibctx
);
420 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
425 * check_revision - close down user fib context
427 * @arg: ioctl arguments
429 * This routine returns the driver version.
430 * Under Linux, there have been no version incompatibilities, so this is
434 static int check_revision(struct aac_dev
*dev
, void __user
*arg
)
436 struct revision response
;
437 char *driver_version
= aac_driver_version
;
441 version
= (simple_strtol(driver_version
,
442 &driver_version
, 10) << 24) | 0x00000400;
443 version
+= simple_strtol(driver_version
+ 1, &driver_version
, 10) << 16;
444 version
+= simple_strtol(driver_version
+ 1, NULL
, 10);
445 response
.version
= cpu_to_le32(version
);
446 # ifdef AAC_DRIVER_BUILD
447 response
.build
= cpu_to_le32(AAC_DRIVER_BUILD
);
449 response
.build
= cpu_to_le32(9999);
452 if (copy_to_user(arg
, &response
, sizeof(response
)))
464 static int aac_send_raw_srb(struct aac_dev
* dev
, void __user
* arg
)
468 struct aac_srb
*srbcmd
= NULL
;
469 struct user_aac_srb
*user_srbcmd
= NULL
;
470 struct user_aac_srb __user
*user_srb
= arg
;
471 struct aac_srb_reply __user
*user_reply
;
472 struct aac_srb_reply
* reply
;
477 void __user
*sg_user
[32];
481 u32 actual_fibsize64
, actual_fibsize
= 0;
486 dprintk((KERN_DEBUG
"aacraid: send raw srb -EBUSY\n"));
489 if (!capable(CAP_SYS_ADMIN
)){
490 dprintk((KERN_DEBUG
"aacraid: No permission to send raw srb\n"));
494 * Allocate and initialize a Fib then setup a SRB command
496 if (!(srbfib
= aac_fib_alloc(dev
))) {
499 aac_fib_init(srbfib
);
501 srbcmd
= (struct aac_srb
*) fib_data(srbfib
);
503 memset(sg_list
, 0, sizeof(sg_list
)); /* cleanup may take issue */
504 if(copy_from_user(&fibsize
, &user_srb
->count
,sizeof(u32
))){
505 dprintk((KERN_DEBUG
"aacraid: Could not copy data size from user\n"));
510 if (fibsize
> (dev
->max_fib_size
- sizeof(struct aac_fibhdr
))) {
515 user_srbcmd
= kmalloc(fibsize
, GFP_KERNEL
);
517 dprintk((KERN_DEBUG
"aacraid: Could not make a copy of the srb\n"));
521 if(copy_from_user(user_srbcmd
, user_srb
,fibsize
)){
522 dprintk((KERN_DEBUG
"aacraid: Could not copy srb from user\n"));
527 user_reply
= arg
+fibsize
;
529 flags
= user_srbcmd
->flags
; /* from user in cpu order */
530 // Fix up srb for endian and force some values
532 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
); // Force this
533 srbcmd
->channel
= cpu_to_le32(user_srbcmd
->channel
);
534 srbcmd
->id
= cpu_to_le32(user_srbcmd
->id
);
535 srbcmd
->lun
= cpu_to_le32(user_srbcmd
->lun
);
536 srbcmd
->timeout
= cpu_to_le32(user_srbcmd
->timeout
);
537 srbcmd
->flags
= cpu_to_le32(flags
);
538 srbcmd
->retry_limit
= 0; // Obsolete parameter
539 srbcmd
->cdb_size
= cpu_to_le32(user_srbcmd
->cdb_size
);
540 memcpy(srbcmd
->cdb
, user_srbcmd
->cdb
, sizeof(srbcmd
->cdb
));
542 switch (flags
& (SRB_DataIn
| SRB_DataOut
)) {
544 data_dir
= DMA_TO_DEVICE
;
546 case (SRB_DataIn
| SRB_DataOut
):
547 data_dir
= DMA_BIDIRECTIONAL
;
550 data_dir
= DMA_FROM_DEVICE
;
555 if (user_srbcmd
->sg
.count
> ARRAY_SIZE(sg_list
)) {
556 dprintk((KERN_DEBUG
"aacraid: too many sg entries %d\n",
557 le32_to_cpu(srbcmd
->sg
.count
)));
561 actual_fibsize
= sizeof(struct aac_srb
) - sizeof(struct sgentry
) +
562 ((user_srbcmd
->sg
.count
& 0xff) * sizeof(struct sgentry
));
563 actual_fibsize64
= actual_fibsize
+ (user_srbcmd
->sg
.count
& 0xff) *
564 (sizeof(struct sgentry64
) - sizeof(struct sgentry
));
565 /* User made a mistake - should not continue */
566 if ((actual_fibsize
!= fibsize
) && (actual_fibsize64
!= fibsize
)) {
567 dprintk((KERN_DEBUG
"aacraid: Bad Size specified in "
568 "Raw SRB command calculated fibsize=%lu;%lu "
569 "user_srbcmd->sg.count=%d aac_srb=%lu sgentry=%lu;%lu "
570 "issued fibsize=%d\n",
571 actual_fibsize
, actual_fibsize64
, user_srbcmd
->sg
.count
,
572 sizeof(struct aac_srb
), sizeof(struct sgentry
),
573 sizeof(struct sgentry64
), fibsize
));
577 if ((data_dir
== DMA_NONE
) && user_srbcmd
->sg
.count
) {
578 dprintk((KERN_DEBUG
"aacraid: SG with no direction specified in Raw SRB command\n"));
583 if (dev
->adapter_info
.options
& AAC_OPT_SGMAP_HOST64
) {
584 struct user_sgmap64
* upsg
= (struct user_sgmap64
*)&user_srbcmd
->sg
;
585 struct sgmap64
* psg
= (struct sgmap64
*)&srbcmd
->sg
;
588 * This should also catch if user used the 32 bit sgmap
590 if (actual_fibsize64
== fibsize
) {
591 actual_fibsize
= actual_fibsize64
;
592 for (i
= 0; i
< upsg
->count
; i
++) {
595 if (upsg
->sg
[i
].count
>
596 ((dev
->adapter_info
.options
&
598 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
603 /* Does this really need to be GFP_DMA? */
604 p
= kmalloc(upsg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
606 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
607 upsg
->sg
[i
].count
,i
,upsg
->count
));
611 addr
= (u64
)upsg
->sg
[i
].addr
[0];
612 addr
+= ((u64
)upsg
->sg
[i
].addr
[1]) << 32;
613 sg_user
[i
] = (void __user
*)(uintptr_t)addr
;
614 sg_list
[i
] = p
; // save so we can clean up later
617 if (flags
& SRB_DataOut
) {
618 if(copy_from_user(p
,sg_user
[i
],upsg
->sg
[i
].count
)){
619 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
624 addr
= pci_map_single(dev
->pdev
, p
, upsg
->sg
[i
].count
, data_dir
);
626 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
627 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
628 byte_count
+= upsg
->sg
[i
].count
;
629 psg
->sg
[i
].count
= cpu_to_le32(upsg
->sg
[i
].count
);
632 struct user_sgmap
* usg
;
633 usg
= kmalloc(actual_fibsize
- sizeof(struct aac_srb
)
634 + sizeof(struct sgmap
), GFP_KERNEL
);
636 dprintk((KERN_DEBUG
"aacraid: Allocation error in Raw SRB command\n"));
640 memcpy (usg
, upsg
, actual_fibsize
- sizeof(struct aac_srb
)
641 + sizeof(struct sgmap
));
642 actual_fibsize
= actual_fibsize64
;
644 for (i
= 0; i
< usg
->count
; i
++) {
647 if (usg
->sg
[i
].count
>
648 ((dev
->adapter_info
.options
&
650 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
655 /* Does this really need to be GFP_DMA? */
656 p
= kmalloc(usg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
658 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
659 usg
->sg
[i
].count
,i
,usg
->count
));
664 sg_user
[i
] = (void __user
*)(uintptr_t)usg
->sg
[i
].addr
;
665 sg_list
[i
] = p
; // save so we can clean up later
668 if (flags
& SRB_DataOut
) {
669 if(copy_from_user(p
,sg_user
[i
],upsg
->sg
[i
].count
)){
671 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
676 addr
= pci_map_single(dev
->pdev
, p
, usg
->sg
[i
].count
, data_dir
);
678 psg
->sg
[i
].addr
[0] = cpu_to_le32(addr
& 0xffffffff);
679 psg
->sg
[i
].addr
[1] = cpu_to_le32(addr
>>32);
680 byte_count
+= usg
->sg
[i
].count
;
681 psg
->sg
[i
].count
= cpu_to_le32(usg
->sg
[i
].count
);
685 srbcmd
->count
= cpu_to_le32(byte_count
);
686 psg
->count
= cpu_to_le32(sg_indx
+1);
687 status
= aac_fib_send(ScsiPortCommand64
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1,NULL
,NULL
);
689 struct user_sgmap
* upsg
= &user_srbcmd
->sg
;
690 struct sgmap
* psg
= &srbcmd
->sg
;
692 if (actual_fibsize64
== fibsize
) {
693 struct user_sgmap64
* usg
= (struct user_sgmap64
*)upsg
;
694 for (i
= 0; i
< upsg
->count
; i
++) {
697 if (usg
->sg
[i
].count
>
698 ((dev
->adapter_info
.options
&
700 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
705 /* Does this really need to be GFP_DMA? */
706 p
= kmalloc(usg
->sg
[i
].count
,GFP_KERNEL
|__GFP_DMA
);
708 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
709 usg
->sg
[i
].count
,i
,usg
->count
));
713 addr
= (u64
)usg
->sg
[i
].addr
[0];
714 addr
+= ((u64
)usg
->sg
[i
].addr
[1]) << 32;
715 sg_user
[i
] = (void __user
*)addr
;
716 sg_list
[i
] = p
; // save so we can clean up later
719 if (flags
& SRB_DataOut
) {
720 if(copy_from_user(p
,sg_user
[i
],usg
->sg
[i
].count
)){
721 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
726 addr
= pci_map_single(dev
->pdev
, p
, usg
->sg
[i
].count
, data_dir
);
728 psg
->sg
[i
].addr
= cpu_to_le32(addr
& 0xffffffff);
729 byte_count
+= usg
->sg
[i
].count
;
730 psg
->sg
[i
].count
= cpu_to_le32(usg
->sg
[i
].count
);
733 for (i
= 0; i
< upsg
->count
; i
++) {
736 if (upsg
->sg
[i
].count
>
737 ((dev
->adapter_info
.options
&
739 (dev
->scsi_host_ptr
->max_sectors
<< 9) :
744 p
= kmalloc(upsg
->sg
[i
].count
, GFP_KERNEL
);
746 dprintk((KERN_DEBUG
"aacraid: Could not allocate SG buffer - size = %d buffer number %d of %d\n",
747 upsg
->sg
[i
].count
, i
, upsg
->count
));
751 sg_user
[i
] = (void __user
*)(uintptr_t)upsg
->sg
[i
].addr
;
752 sg_list
[i
] = p
; // save so we can clean up later
755 if (flags
& SRB_DataOut
) {
756 if(copy_from_user(p
, sg_user
[i
],
757 upsg
->sg
[i
].count
)) {
758 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data from user\n"));
763 addr
= pci_map_single(dev
->pdev
, p
,
764 upsg
->sg
[i
].count
, data_dir
);
766 psg
->sg
[i
].addr
= cpu_to_le32(addr
);
767 byte_count
+= upsg
->sg
[i
].count
;
768 psg
->sg
[i
].count
= cpu_to_le32(upsg
->sg
[i
].count
);
771 srbcmd
->count
= cpu_to_le32(byte_count
);
772 psg
->count
= cpu_to_le32(sg_indx
+1);
773 status
= aac_fib_send(ScsiPortCommand
, srbfib
, actual_fibsize
, FsaNormal
, 1, 1, NULL
, NULL
);
775 if (status
== -ERESTARTSYS
) {
776 rcode
= -ERESTARTSYS
;
781 dprintk((KERN_DEBUG
"aacraid: Could not send raw srb fib to hba\n"));
786 if (flags
& SRB_DataIn
) {
787 for(i
= 0 ; i
<= sg_indx
; i
++){
788 byte_count
= le32_to_cpu(
789 (dev
->adapter_info
.options
& AAC_OPT_SGMAP_HOST64
)
790 ? ((struct sgmap64
*)&srbcmd
->sg
)->sg
[i
].count
791 : srbcmd
->sg
.sg
[i
].count
);
792 if(copy_to_user(sg_user
[i
], sg_list
[i
], byte_count
)){
793 dprintk((KERN_DEBUG
"aacraid: Could not copy sg data to user\n"));
801 reply
= (struct aac_srb_reply
*) fib_data(srbfib
);
802 if(copy_to_user(user_reply
,reply
,sizeof(struct aac_srb_reply
))){
803 dprintk((KERN_DEBUG
"aacraid: Could not copy reply to user\n"));
810 for(i
=0; i
<= sg_indx
; i
++){
813 if (rcode
!= -ERESTARTSYS
) {
814 aac_fib_complete(srbfib
);
815 aac_fib_free(srbfib
);
821 struct aac_pci_info
{
827 static int aac_get_pci_info(struct aac_dev
* dev
, void __user
*arg
)
829 struct aac_pci_info pci_info
;
831 pci_info
.bus
= dev
->pdev
->bus
->number
;
832 pci_info
.slot
= PCI_SLOT(dev
->pdev
->devfn
);
834 if (copy_to_user(arg
, &pci_info
, sizeof(struct aac_pci_info
))) {
835 dprintk((KERN_DEBUG
"aacraid: Could not copy pci info\n"));
842 int aac_do_ioctl(struct aac_dev
* dev
, int cmd
, void __user
*arg
)
847 * HBA gets first crack
850 status
= aac_dev_ioctl(dev
, cmd
, arg
);
851 if (status
!= -ENOTTY
)
855 case FSACTL_MINIPORT_REV_CHECK
:
856 status
= check_revision(dev
, arg
);
858 case FSACTL_SEND_LARGE_FIB
:
860 status
= ioctl_send_fib(dev
, arg
);
862 case FSACTL_OPEN_GET_ADAPTER_FIB
:
863 status
= open_getadapter_fib(dev
, arg
);
865 case FSACTL_GET_NEXT_ADAPTER_FIB
:
866 status
= next_getadapter_fib(dev
, arg
);
868 case FSACTL_CLOSE_GET_ADAPTER_FIB
:
869 status
= close_getadapter_fib(dev
, arg
);
871 case FSACTL_SEND_RAW_SRB
:
872 status
= aac_send_raw_srb(dev
,arg
);
874 case FSACTL_GET_PCI_INFO
:
875 status
= aac_get_pci_info(dev
,arg
);