2 * Adaptec AAC series RAID controller driver
3 * (c) Copyright 2001 Red Hat Inc.
5 * based on the old aacraid driver that is..
6 * Adaptec aacraid device driver for Linux.
8 * Copyright (c) 2000-2010 Adaptec, Inc.
9 * 2010-2015 PMC-Sierra, Inc. (aacraid@pmc-sierra.com)
10 * 2016-2017 Microsemi Corp. (aacraid@microsemi.com)
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with this program; see the file COPYING. If not, write to
24 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
29 * Abstract: Contain all routines that are required for FSA host/adapter
34 #include <linux/kernel.h>
35 #include <linux/init.h>
36 #include <linux/crash_dump.h>
37 #include <linux/types.h>
38 #include <linux/sched.h>
39 #include <linux/pci.h>
40 #include <linux/spinlock.h>
41 #include <linux/slab.h>
42 #include <linux/completion.h>
43 #include <linux/blkdev.h>
44 #include <linux/delay.h>
45 #include <linux/kthread.h>
46 #include <linux/interrupt.h>
47 #include <linux/semaphore.h>
48 #include <linux/bcd.h>
49 #include <scsi/scsi.h>
50 #include <scsi/scsi_host.h>
51 #include <scsi/scsi_device.h>
52 #include <scsi/scsi_cmnd.h>
57 * fib_map_alloc - allocate the fib objects
58 * @dev: Adapter to allocate for
60 * Allocate and map the shared PCI space for the FIB blocks used to
61 * talk to the Adaptec firmware.
64 static int fib_map_alloc(struct aac_dev
*dev
)
66 if (dev
->max_fib_size
> AAC_MAX_NATIVE_SIZE
)
67 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
69 dev
->max_cmd_size
= dev
->max_fib_size
;
70 if (dev
->max_fib_size
< AAC_MAX_NATIVE_SIZE
) {
71 dev
->max_cmd_size
= AAC_MAX_NATIVE_SIZE
;
73 dev
->max_cmd_size
= dev
->max_fib_size
;
77 "allocate hardware fibs dma_alloc_coherent(%p, %d * (%d + %d), %p)\n",
78 &dev
->pdev
->dev
, dev
->max_cmd_size
, dev
->scsi_host_ptr
->can_queue
,
79 AAC_NUM_MGT_FIB
, &dev
->hw_fib_pa
));
80 dev
->hw_fib_va
= dma_alloc_coherent(&dev
->pdev
->dev
,
81 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
))
82 * (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
) + (ALIGN32
- 1),
83 &dev
->hw_fib_pa
, GFP_KERNEL
);
84 if (dev
->hw_fib_va
== NULL
)
90 * aac_fib_map_free - free the fib objects
91 * @dev: Adapter to free
93 * Free the PCI mappings and the memory allocated for FIB blocks
97 void aac_fib_map_free(struct aac_dev
*dev
)
103 if(!dev
->hw_fib_va
|| !dev
->max_cmd_size
)
106 num_fibs
= dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
107 fib_size
= dev
->max_fib_size
+ sizeof(struct aac_fib_xporthdr
);
108 alloc_size
= fib_size
* num_fibs
+ ALIGN32
- 1;
110 dma_free_coherent(&dev
->pdev
->dev
, alloc_size
, dev
->hw_fib_va
,
113 dev
->hw_fib_va
= NULL
;
117 void aac_fib_vector_assign(struct aac_dev
*dev
)
121 struct fib
*fibptr
= NULL
;
123 for (i
= 0, fibptr
= &dev
->fibs
[i
];
124 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
126 if ((dev
->max_msix
== 1) ||
127 (i
> ((dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1)
128 - dev
->vector_cap
))) {
129 fibptr
->vector_no
= 0;
131 fibptr
->vector_no
= vector
;
133 if (vector
== dev
->max_msix
)
140 * aac_fib_setup - setup the fibs
141 * @dev: Adapter to set up
143 * Allocate the PCI space for the fibs, map it and then initialise the
144 * fib area, the unmapped fib data and also the free list
147 int aac_fib_setup(struct aac_dev
* dev
)
150 struct hw_fib
*hw_fib
;
151 dma_addr_t hw_fib_pa
;
155 while (((i
= fib_map_alloc(dev
)) == -ENOMEM
)
156 && (dev
->scsi_host_ptr
->can_queue
> (64 - AAC_NUM_MGT_FIB
))) {
157 max_cmds
= (dev
->scsi_host_ptr
->can_queue
+AAC_NUM_MGT_FIB
) >> 1;
158 dev
->scsi_host_ptr
->can_queue
= max_cmds
- AAC_NUM_MGT_FIB
;
159 if (dev
->comm_interface
!= AAC_COMM_MESSAGE_TYPE3
)
160 dev
->init
->r7
.max_io_commands
= cpu_to_le32(max_cmds
);
165 memset(dev
->hw_fib_va
, 0,
166 (dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
)) *
167 (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
));
169 /* 32 byte alignment for PMC */
170 hw_fib_pa
= (dev
->hw_fib_pa
+ (ALIGN32
- 1)) & ~(ALIGN32
- 1);
171 hw_fib
= (struct hw_fib
*)((unsigned char *)dev
->hw_fib_va
+
172 (hw_fib_pa
- dev
->hw_fib_pa
));
174 /* add Xport header */
175 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
176 sizeof(struct aac_fib_xporthdr
));
177 hw_fib_pa
+= sizeof(struct aac_fib_xporthdr
);
180 * Initialise the fibs
182 for (i
= 0, fibptr
= &dev
->fibs
[i
];
183 i
< (dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
);
187 fibptr
->size
= sizeof(struct fib
);
189 fibptr
->hw_fib_va
= hw_fib
;
190 fibptr
->data
= (void *) fibptr
->hw_fib_va
->data
;
191 fibptr
->next
= fibptr
+1; /* Forward chain the fibs */
192 sema_init(&fibptr
->event_wait
, 0);
193 spin_lock_init(&fibptr
->event_lock
);
194 hw_fib
->header
.XferState
= cpu_to_le32(0xffffffff);
195 hw_fib
->header
.SenderSize
=
196 cpu_to_le16(dev
->max_fib_size
); /* ?? max_cmd_size */
197 fibptr
->hw_fib_pa
= hw_fib_pa
;
198 fibptr
->hw_sgl_pa
= hw_fib_pa
+
199 offsetof(struct aac_hba_cmd_req
, sge
[2]);
201 * one element is for the ptr to the separate sg list,
202 * second element for 32 byte alignment
204 fibptr
->hw_error_pa
= hw_fib_pa
+
205 offsetof(struct aac_native_hba
, resp
.resp_bytes
[0]);
207 hw_fib
= (struct hw_fib
*)((unsigned char *)hw_fib
+
208 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
));
209 hw_fib_pa
= hw_fib_pa
+
210 dev
->max_cmd_size
+ sizeof(struct aac_fib_xporthdr
);
214 *Assign vector numbers to fibs
216 aac_fib_vector_assign(dev
);
219 * Add the fib chain to the free list
221 dev
->fibs
[dev
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
- 1].next
= NULL
;
223 * Set 8 fibs aside for management tools
225 dev
->free_fib
= &dev
->fibs
[dev
->scsi_host_ptr
->can_queue
];
230 * aac_fib_alloc_tag-allocate a fib using tags
231 * @dev: Adapter to allocate the fib for
233 * Allocate a fib from the adapter fib pool using tags
234 * from the blk layer.
237 struct fib
*aac_fib_alloc_tag(struct aac_dev
*dev
, struct scsi_cmnd
*scmd
)
241 fibptr
= &dev
->fibs
[scmd
->request
->tag
];
243 * Null out fields that depend on being zero at the start of
246 fibptr
->hw_fib_va
->header
.XferState
= 0;
247 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
248 fibptr
->callback_data
= NULL
;
249 fibptr
->callback
= NULL
;
255 * aac_fib_alloc - allocate a fib
256 * @dev: Adapter to allocate the fib for
258 * Allocate a fib from the adapter fib pool. If the pool is empty we
262 struct fib
*aac_fib_alloc(struct aac_dev
*dev
)
266 spin_lock_irqsave(&dev
->fib_lock
, flags
);
267 fibptr
= dev
->free_fib
;
269 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
272 dev
->free_fib
= fibptr
->next
;
273 spin_unlock_irqrestore(&dev
->fib_lock
, flags
);
275 * Set the proper node type code and node byte size
277 fibptr
->type
= FSAFS_NTC_FIB_CONTEXT
;
278 fibptr
->size
= sizeof(struct fib
);
280 * Null out fields that depend on being zero at the start of
283 fibptr
->hw_fib_va
->header
.XferState
= 0;
285 fibptr
->callback
= NULL
;
286 fibptr
->callback_data
= NULL
;
292 * aac_fib_free - free a fib
293 * @fibptr: fib to free up
295 * Frees up a fib and places it on the appropriate queue
298 void aac_fib_free(struct fib
*fibptr
)
302 if (fibptr
->done
== 2)
305 spin_lock_irqsave(&fibptr
->dev
->fib_lock
, flags
);
306 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
307 aac_config
.fib_timeouts
++;
308 if (!(fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) &&
309 fibptr
->hw_fib_va
->header
.XferState
!= 0) {
310 printk(KERN_WARNING
"aac_fib_free, XferState != 0, fibptr = 0x%p, XferState = 0x%x\n",
312 le32_to_cpu(fibptr
->hw_fib_va
->header
.XferState
));
314 fibptr
->next
= fibptr
->dev
->free_fib
;
315 fibptr
->dev
->free_fib
= fibptr
;
316 spin_unlock_irqrestore(&fibptr
->dev
->fib_lock
, flags
);
320 * aac_fib_init - initialise a fib
321 * @fibptr: The fib to initialize
323 * Set up the generic fib fields ready for use
326 void aac_fib_init(struct fib
*fibptr
)
328 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
330 memset(&hw_fib
->header
, 0, sizeof(struct aac_fibhdr
));
331 hw_fib
->header
.StructType
= FIB_MAGIC
;
332 hw_fib
->header
.Size
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
333 hw_fib
->header
.XferState
= cpu_to_le32(HostOwned
| FibInitialized
| FibEmpty
| FastResponseCapable
);
334 hw_fib
->header
.u
.ReceiverFibAddress
= cpu_to_le32(fibptr
->hw_fib_pa
);
335 hw_fib
->header
.SenderSize
= cpu_to_le16(fibptr
->dev
->max_fib_size
);
339 * fib_deallocate - deallocate a fib
340 * @fibptr: fib to deallocate
342 * Will deallocate and return to the free pool the FIB pointed to by the
346 static void fib_dealloc(struct fib
* fibptr
)
348 struct hw_fib
*hw_fib
= fibptr
->hw_fib_va
;
349 hw_fib
->header
.XferState
= 0;
353 * Commuication primitives define and support the queuing method we use to
354 * support host to adapter commuication. All queue accesses happen through
355 * these routines and are the only routines which have a knowledge of the
356 * how these queues are implemented.
360 * aac_get_entry - get a queue entry
363 * @entry: Entry return
364 * @index: Index return
365 * @nonotify: notification control
367 * With a priority the routine returns a queue entry if the queue has free entries. If the queue
368 * is full(no free entries) than no entry is returned and the function returns 0 otherwise 1 is
372 static int aac_get_entry (struct aac_dev
* dev
, u32 qid
, struct aac_entry
**entry
, u32
* index
, unsigned long *nonotify
)
374 struct aac_queue
* q
;
378 * All of the queues wrap when they reach the end, so we check
379 * to see if they have reached the end and if they have we just
380 * set the index back to zero. This is a wrap. You could or off
381 * the high bits in all updates but this is a bit faster I think.
384 q
= &dev
->queues
->queue
[qid
];
386 idx
= *index
= le32_to_cpu(*(q
->headers
.producer
));
387 /* Interrupt Moderation, only interrupt for first two entries */
388 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
))) {
390 if (qid
== AdapNormCmdQueue
)
391 idx
= ADAP_NORM_CMD_ENTRIES
;
393 idx
= ADAP_NORM_RESP_ENTRIES
;
395 if (idx
!= le32_to_cpu(*(q
->headers
.consumer
)))
399 if (qid
== AdapNormCmdQueue
) {
400 if (*index
>= ADAP_NORM_CMD_ENTRIES
)
401 *index
= 0; /* Wrap to front of the Producer Queue. */
403 if (*index
>= ADAP_NORM_RESP_ENTRIES
)
404 *index
= 0; /* Wrap to front of the Producer Queue. */
408 if ((*index
+ 1) == le32_to_cpu(*(q
->headers
.consumer
))) {
409 printk(KERN_WARNING
"Queue %d full, %u outstanding.\n",
410 qid
, atomic_read(&q
->numpending
));
413 *entry
= q
->base
+ *index
;
419 * aac_queue_get - get the next free QE
421 * @index: Returned index
422 * @priority: Priority of fib
423 * @fib: Fib to associate with the queue entry
424 * @wait: Wait if queue full
425 * @fibptr: Driver fib object to go with fib
426 * @nonotify: Don't notify the adapter
428 * Gets the next free QE off the requested priorty adapter command
429 * queue and associates the Fib with the QE. The QE represented by
430 * index is ready to insert on the queue when this routine returns
434 int aac_queue_get(struct aac_dev
* dev
, u32
* index
, u32 qid
, struct hw_fib
* hw_fib
, int wait
, struct fib
* fibptr
, unsigned long *nonotify
)
436 struct aac_entry
* entry
= NULL
;
439 if (qid
== AdapNormCmdQueue
) {
440 /* if no entries wait for some if caller wants to */
441 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
442 printk(KERN_ERR
"GetEntries failed\n");
445 * Setup queue entry with a command, status and fib mapped
447 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
450 while (!aac_get_entry(dev
, qid
, &entry
, index
, nonotify
)) {
451 /* if no entries wait for some if caller wants to */
454 * Setup queue entry with command, status and fib mapped
456 entry
->size
= cpu_to_le32(le16_to_cpu(hw_fib
->header
.Size
));
457 entry
->addr
= hw_fib
->header
.SenderFibAddress
;
458 /* Restore adapters pointer to the FIB */
459 hw_fib
->header
.u
.ReceiverFibAddress
= hw_fib
->header
.SenderFibAddress
; /* Let the adapter now where to find its data */
463 * If MapFib is true than we need to map the Fib and put pointers
464 * in the queue entry.
467 entry
->addr
= cpu_to_le32(fibptr
->hw_fib_pa
);
472 * Define the highest level of host to adapter communication routines.
473 * These routines will support host to adapter FS commuication. These
474 * routines have no knowledge of the commuication method used. This level
475 * sends and receives FIBs. This level has no knowledge of how these FIBs
476 * get passed back and forth.
480 * aac_fib_send - send a fib to the adapter
481 * @command: Command to send
483 * @size: Size of fib data area
484 * @priority: Priority of Fib
485 * @wait: Async/sync select
486 * @reply: True if a reply is wanted
487 * @callback: Called with reply
488 * @callback_data: Passed to callback
490 * Sends the requested FIB to the adapter and optionally will wait for a
491 * response FIB. If the caller does not wish to wait for a response than
492 * an event to wait on must be supplied. This event will be set when a
493 * response FIB is received from the adapter.
496 int aac_fib_send(u16 command
, struct fib
*fibptr
, unsigned long size
,
497 int priority
, int wait
, int reply
, fib_callback callback
,
500 struct aac_dev
* dev
= fibptr
->dev
;
501 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
502 unsigned long flags
= 0;
503 unsigned long mflags
= 0;
504 unsigned long sflags
= 0;
506 if (!(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)))
509 if (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
))
513 * There are 5 cases with the wait and response requested flags.
514 * The only invalid cases are if the caller requests to wait and
515 * does not request a response and if the caller does not want a
516 * response and the Fib is not allocated from pool. If a response
517 * is not requesed the Fib will just be deallocaed by the DPC
518 * routine when the response comes back from the adapter. No
519 * further processing will be done besides deleting the Fib. We
520 * will have a debug mode where the adapter can notify the host
521 * it had a problem and the host can log that fact.
524 if (wait
&& !reply
) {
526 } else if (!wait
&& reply
) {
527 hw_fib
->header
.XferState
|= cpu_to_le32(Async
| ResponseExpected
);
528 FIB_COUNTER_INCREMENT(aac_config
.AsyncSent
);
529 } else if (!wait
&& !reply
) {
530 hw_fib
->header
.XferState
|= cpu_to_le32(NoResponseExpected
);
531 FIB_COUNTER_INCREMENT(aac_config
.NoResponseSent
);
532 } else if (wait
&& reply
) {
533 hw_fib
->header
.XferState
|= cpu_to_le32(ResponseExpected
);
534 FIB_COUNTER_INCREMENT(aac_config
.NormalSent
);
537 * Map the fib into 32bits by using the fib number
540 hw_fib
->header
.SenderFibAddress
=
541 cpu_to_le32(((u32
)(fibptr
- dev
->fibs
)) << 2);
543 /* use the same shifted value for handle to be compatible
544 * with the new native hba command handle
546 hw_fib
->header
.Handle
=
547 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
550 * Set FIB state to indicate where it came from and if we want a
551 * response from the adapter. Also load the command from the
554 * Map the hw fib pointer as a 32bit value
556 hw_fib
->header
.Command
= cpu_to_le16(command
);
557 hw_fib
->header
.XferState
|= cpu_to_le32(SentFromHost
);
559 * Set the size of the Fib we want to send to the adapter
561 hw_fib
->header
.Size
= cpu_to_le16(sizeof(struct aac_fibhdr
) + size
);
562 if (le16_to_cpu(hw_fib
->header
.Size
) > le16_to_cpu(hw_fib
->header
.SenderSize
)) {
566 * Get a queue entry connect the FIB to it and send an notify
567 * the adapter a command is ready.
569 hw_fib
->header
.XferState
|= cpu_to_le32(NormalPriority
);
572 * Fill in the Callback and CallbackContext if we are not
576 fibptr
->callback
= callback
;
577 fibptr
->callback_data
= callback_data
;
578 fibptr
->flags
= FIB_CONTEXT_FLAG
;
583 FIB_COUNTER_INCREMENT(aac_config
.FibsSent
);
585 dprintk((KERN_DEBUG
"Fib contents:.\n"));
586 dprintk((KERN_DEBUG
" Command = %d.\n", le32_to_cpu(hw_fib
->header
.Command
)));
587 dprintk((KERN_DEBUG
" SubCommand = %d.\n", le32_to_cpu(((struct aac_query_mount
*)fib_data(fibptr
))->command
)));
588 dprintk((KERN_DEBUG
" XferState = %x.\n", le32_to_cpu(hw_fib
->header
.XferState
)));
589 dprintk((KERN_DEBUG
" hw_fib va being sent=%p\n",fibptr
->hw_fib_va
));
590 dprintk((KERN_DEBUG
" hw_fib pa being sent=%lx\n",(ulong
)fibptr
->hw_fib_pa
));
591 dprintk((KERN_DEBUG
" fib being sent=%p\n",fibptr
));
598 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
599 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
600 printk(KERN_INFO
"No management Fibs Available:%d\n",
601 dev
->management_fib_count
);
602 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
605 dev
->management_fib_count
++;
606 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
607 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
610 if (dev
->sync_mode
) {
612 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
613 spin_lock_irqsave(&dev
->sync_lock
, sflags
);
615 list_add_tail(&fibptr
->fiblink
, &dev
->sync_fib_list
);
616 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
618 dev
->sync_fib
= fibptr
;
619 spin_unlock_irqrestore(&dev
->sync_lock
, sflags
);
620 aac_adapter_sync_cmd(dev
, SEND_SYNCHRONOUS_FIB
,
621 (u32
)fibptr
->hw_fib_pa
, 0, 0, 0, 0, 0,
622 NULL
, NULL
, NULL
, NULL
, NULL
);
625 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
626 if (down_interruptible(&fibptr
->event_wait
)) {
627 fibptr
->flags
&= ~FIB_CONTEXT_FLAG_WAIT
;
635 if (aac_adapter_deliver(fibptr
) != 0) {
636 printk(KERN_ERR
"aac_fib_send: returned -EBUSY\n");
638 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
639 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
640 dev
->management_fib_count
--;
641 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
648 * If the caller wanted us to wait for response wait now.
652 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
653 /* Only set for first known interruptable command */
656 * *VERY* Dangerous to time out a command, the
657 * assumption is made that we have no hope of
658 * functioning because an interrupt routing or other
659 * hardware failure has occurred.
661 unsigned long timeout
= jiffies
+ (180 * HZ
); /* 3 minutes */
662 while (down_trylock(&fibptr
->event_wait
)) {
664 if (time_is_before_eq_jiffies(timeout
)) {
665 struct aac_queue
* q
= &dev
->queues
->queue
[AdapNormCmdQueue
];
666 atomic_dec(&q
->numpending
);
668 printk(KERN_ERR
"aacraid: aac_fib_send: first asynchronous command timed out.\n"
669 "Usually a result of a PCI interrupt routing problem;\n"
670 "update mother board BIOS or consider utilizing one of\n"
671 "the SAFE mode kernel options (acpi, apic etc)\n");
676 if (unlikely(pci_channel_offline(dev
->pdev
)))
679 if ((blink
= aac_adapter_check_health(dev
)) > 0) {
681 printk(KERN_ERR
"aacraid: aac_fib_send: adapter blinkLED 0x%x.\n"
682 "Usually a result of a serious unrecoverable hardware problem\n",
688 * Allow other processes / CPUS to use core
692 } else if (down_interruptible(&fibptr
->event_wait
)) {
693 /* Do nothing ... satisfy
694 * down_interruptible must_check */
697 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
698 if (fibptr
->done
== 0) {
699 fibptr
->done
= 2; /* Tell interrupt we aborted */
700 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
703 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
704 BUG_ON(fibptr
->done
== 0);
706 if(unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
711 * If the user does not want a response than return success otherwise
720 int aac_hba_send(u8 command
, struct fib
*fibptr
, fib_callback callback
,
723 struct aac_dev
*dev
= fibptr
->dev
;
725 unsigned long flags
= 0;
726 unsigned long mflags
= 0;
728 fibptr
->flags
= (FIB_CONTEXT_FLAG
| FIB_CONTEXT_FLAG_NATIVE_HBA
);
731 fibptr
->callback
= callback
;
732 fibptr
->callback_data
= callback_data
;
737 if (command
== HBA_IU_TYPE_SCSI_CMD_REQ
) {
738 struct aac_hba_cmd_req
*hbacmd
=
739 (struct aac_hba_cmd_req
*)fibptr
->hw_fib_va
;
741 hbacmd
->iu_type
= command
;
742 /* bit1 of request_id must be 0 */
744 cpu_to_le32((((u32
)(fibptr
- dev
->fibs
)) << 2) + 1);
745 fibptr
->flags
|= FIB_CONTEXT_FLAG_SCSI_CMD
;
746 } else if (command
!= HBA_IU_TYPE_SCSI_TM_REQ
)
751 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
752 if (dev
->management_fib_count
>= AAC_NUM_MGT_FIB
) {
753 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
756 dev
->management_fib_count
++;
757 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
758 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
761 if (aac_adapter_deliver(fibptr
) != 0) {
763 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
764 spin_lock_irqsave(&dev
->manage_lock
, mflags
);
765 dev
->management_fib_count
--;
766 spin_unlock_irqrestore(&dev
->manage_lock
, mflags
);
770 FIB_COUNTER_INCREMENT(aac_config
.NativeSent
);
774 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
776 if (unlikely(pci_channel_offline(dev
->pdev
)))
779 fibptr
->flags
|= FIB_CONTEXT_FLAG_WAIT
;
780 if (down_interruptible(&fibptr
->event_wait
))
782 fibptr
->flags
&= ~(FIB_CONTEXT_FLAG_WAIT
);
784 spin_lock_irqsave(&fibptr
->event_lock
, flags
);
785 if ((fibptr
->done
== 0) || (fibptr
->done
== 2)) {
786 fibptr
->done
= 2; /* Tell interrupt we aborted */
787 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
790 spin_unlock_irqrestore(&fibptr
->event_lock
, flags
);
791 WARN_ON(fibptr
->done
== 0);
793 if (unlikely(fibptr
->flags
& FIB_CONTEXT_FLAG_TIMED_OUT
))
803 * aac_consumer_get - get the top of the queue
806 * @entry: Return entry
808 * Will return a pointer to the entry on the top of the queue requested that
809 * we are a consumer of, and return the address of the queue entry. It does
810 * not change the state of the queue.
813 int aac_consumer_get(struct aac_dev
* dev
, struct aac_queue
* q
, struct aac_entry
**entry
)
817 if (le32_to_cpu(*q
->headers
.producer
) == le32_to_cpu(*q
->headers
.consumer
)) {
821 * The consumer index must be wrapped if we have reached
822 * the end of the queue, else we just use the entry
823 * pointed to by the header index
825 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
828 index
= le32_to_cpu(*q
->headers
.consumer
);
829 *entry
= q
->base
+ index
;
836 * aac_consumer_free - free consumer entry
841 * Frees up the current top of the queue we are a consumer of. If the
842 * queue was full notify the producer that the queue is no longer full.
845 void aac_consumer_free(struct aac_dev
* dev
, struct aac_queue
*q
, u32 qid
)
850 if ((le32_to_cpu(*q
->headers
.producer
)+1) == le32_to_cpu(*q
->headers
.consumer
))
853 if (le32_to_cpu(*q
->headers
.consumer
) >= q
->entries
)
854 *q
->headers
.consumer
= cpu_to_le32(1);
856 le32_add_cpu(q
->headers
.consumer
, 1);
861 case HostNormCmdQueue
:
862 notify
= HostNormCmdNotFull
;
864 case HostNormRespQueue
:
865 notify
= HostNormRespNotFull
;
871 aac_adapter_notify(dev
, notify
);
876 * aac_fib_adapter_complete - complete adapter issued fib
877 * @fibptr: fib to complete
880 * Will do all necessary work to complete a FIB that was sent from
884 int aac_fib_adapter_complete(struct fib
*fibptr
, unsigned short size
)
886 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
887 struct aac_dev
* dev
= fibptr
->dev
;
888 struct aac_queue
* q
;
889 unsigned long nointr
= 0;
890 unsigned long qflags
;
892 if (dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE1
||
893 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE2
||
894 dev
->comm_interface
== AAC_COMM_MESSAGE_TYPE3
) {
899 if (hw_fib
->header
.XferState
== 0) {
900 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
905 * If we plan to do anything check the structure type first.
907 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
908 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
909 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
) {
910 if (dev
->comm_interface
== AAC_COMM_MESSAGE
)
915 * This block handles the case where the adapter had sent us a
916 * command and we have finished processing the command. We
917 * call completeFib when we are done processing the command
918 * and want to send a response back to the adapter. This will
919 * send the completed cdb to the adapter.
921 if (hw_fib
->header
.XferState
& cpu_to_le32(SentFromAdapter
)) {
922 if (dev
->comm_interface
== AAC_COMM_MESSAGE
) {
926 hw_fib
->header
.XferState
|= cpu_to_le32(HostProcessed
);
928 size
+= sizeof(struct aac_fibhdr
);
929 if (size
> le16_to_cpu(hw_fib
->header
.SenderSize
))
931 hw_fib
->header
.Size
= cpu_to_le16(size
);
933 q
= &dev
->queues
->queue
[AdapNormRespQueue
];
934 spin_lock_irqsave(q
->lock
, qflags
);
935 aac_queue_get(dev
, &index
, AdapNormRespQueue
, hw_fib
, 1, NULL
, &nointr
);
936 *(q
->headers
.producer
) = cpu_to_le32(index
+ 1);
937 spin_unlock_irqrestore(q
->lock
, qflags
);
938 if (!(nointr
& (int)aac_config
.irq_mod
))
939 aac_adapter_notify(dev
, AdapNormRespQueue
);
942 printk(KERN_WARNING
"aac_fib_adapter_complete: "
943 "Unknown xferstate detected.\n");
950 * aac_fib_complete - fib completion handler
951 * @fib: FIB to complete
953 * Will do all necessary work to complete a FIB.
956 int aac_fib_complete(struct fib
*fibptr
)
958 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
960 if (fibptr
->flags
& FIB_CONTEXT_FLAG_NATIVE_HBA
) {
966 * Check for a fib which has already been completed or with a
967 * status wait timeout
970 if (hw_fib
->header
.XferState
== 0 || fibptr
->done
== 2)
973 * If we plan to do anything check the structure type first.
976 if (hw_fib
->header
.StructType
!= FIB_MAGIC
&&
977 hw_fib
->header
.StructType
!= FIB_MAGIC2
&&
978 hw_fib
->header
.StructType
!= FIB_MAGIC2_64
)
981 * This block completes a cdb which orginated on the host and we
982 * just need to deallocate the cdb or reinit it. At this point the
983 * command is complete that we had sent to the adapter and this
984 * cdb could be reused.
987 if((hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
)) &&
988 (hw_fib
->header
.XferState
& cpu_to_le32(AdapterProcessed
)))
992 else if(hw_fib
->header
.XferState
& cpu_to_le32(SentFromHost
))
995 * This handles the case when the host has aborted the I/O
996 * to the adapter because the adapter is not responding
999 } else if(hw_fib
->header
.XferState
& cpu_to_le32(HostOwned
)) {
1000 fib_dealloc(fibptr
);
1008 * aac_printf - handle printf from firmware
1010 * @val: Message info
1012 * Print a message passed to us by the controller firmware on the
1016 void aac_printf(struct aac_dev
*dev
, u32 val
)
1018 char *cp
= dev
->printfbuf
;
1019 if (dev
->printf_enabled
)
1021 int length
= val
& 0xffff;
1022 int level
= (val
>> 16) & 0xffff;
1025 * The size of the printfbuf is set in port.c
1026 * There is no variable or define for it
1030 if (cp
[length
] != 0)
1032 if (level
== LOG_AAC_HIGH_ERROR
)
1033 printk(KERN_WARNING
"%s:%s", dev
->name
, cp
);
1035 printk(KERN_INFO
"%s:%s", dev
->name
, cp
);
1040 static inline int aac_aif_data(struct aac_aifcmd
*aifcmd
, uint32_t index
)
1042 return le32_to_cpu(((__le32
*)aifcmd
->data
)[index
]);
1046 static void aac_handle_aif_bu(struct aac_dev
*dev
, struct aac_aifcmd
*aifcmd
)
1048 switch (aac_aif_data(aifcmd
, 1)) {
1049 case AifBuCacheDataLoss
:
1050 if (aac_aif_data(aifcmd
, 2))
1051 dev_info(&dev
->pdev
->dev
, "Backup unit had cache data loss - [%d]\n",
1052 aac_aif_data(aifcmd
, 2));
1054 dev_info(&dev
->pdev
->dev
, "Backup Unit had cache data loss\n");
1056 case AifBuCacheDataRecover
:
1057 if (aac_aif_data(aifcmd
, 2))
1058 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully - [%d]\n",
1059 aac_aif_data(aifcmd
, 2));
1061 dev_info(&dev
->pdev
->dev
, "DDR cache data recovered successfully\n");
1067 * aac_handle_aif - Handle a message from the firmware
1068 * @dev: Which adapter this fib is from
1069 * @fibptr: Pointer to fibptr from adapter
1071 * This routine handles a driver notify fib from the adapter and
1072 * dispatches it to the appropriate routine for handling.
1075 #define AIF_SNIFF_TIMEOUT (500*HZ)
1076 static void aac_handle_aif(struct aac_dev
* dev
, struct fib
* fibptr
)
1078 struct hw_fib
* hw_fib
= fibptr
->hw_fib_va
;
1079 struct aac_aifcmd
* aifcmd
= (struct aac_aifcmd
*)hw_fib
->data
;
1080 u32 channel
, id
, lun
, container
;
1081 struct scsi_device
*device
;
1087 } device_config_needed
= NOTHING
;
1089 /* Sniff for container changes */
1091 if (!dev
|| !dev
->fsa_dev
)
1093 container
= channel
= id
= lun
= (u32
)-1;
1096 * We have set this up to try and minimize the number of
1097 * re-configures that take place. As a result of this when
1098 * certain AIF's come in we will set a flag waiting for another
1099 * type of AIF before setting the re-config flag.
1101 switch (le32_to_cpu(aifcmd
->command
)) {
1102 case AifCmdDriverNotify
:
1103 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1104 case AifRawDeviceRemove
:
1105 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1106 if ((container
>> 28)) {
1107 container
= (u32
)-1;
1110 channel
= (container
>> 24) & 0xF;
1111 if (channel
>= dev
->maximum_num_channels
) {
1112 container
= (u32
)-1;
1115 id
= container
& 0xFFFF;
1116 if (id
>= dev
->maximum_num_physicals
) {
1117 container
= (u32
)-1;
1120 lun
= (container
>> 16) & 0xFF;
1121 container
= (u32
)-1;
1122 channel
= aac_phys_to_logical(channel
);
1123 device_config_needed
= DELETE
;
1127 * Morph or Expand complete
1129 case AifDenMorphComplete
:
1130 case AifDenVolumeExtendComplete
:
1131 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1132 if (container
>= dev
->maximum_num_containers
)
1136 * Find the scsi_device associated with the SCSI
1137 * address. Make sure we have the right array, and if
1138 * so set the flag to initiate a new re-config once we
1139 * see an AifEnConfigChange AIF come through.
1142 if ((dev
!= NULL
) && (dev
->scsi_host_ptr
!= NULL
)) {
1143 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1144 CONTAINER_TO_CHANNEL(container
),
1145 CONTAINER_TO_ID(container
),
1146 CONTAINER_TO_LUN(container
));
1148 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1149 dev
->fsa_dev
[container
].config_waiting_on
= AifEnConfigChange
;
1150 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1151 scsi_device_put(device
);
1157 * If we are waiting on something and this happens to be
1158 * that thing then set the re-configure flag.
1160 if (container
!= (u32
)-1) {
1161 if (container
>= dev
->maximum_num_containers
)
1163 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1164 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1165 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1166 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1167 } else for (container
= 0;
1168 container
< dev
->maximum_num_containers
; ++container
) {
1169 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1170 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1171 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1172 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1176 case AifCmdEventNotify
:
1177 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[0])) {
1178 case AifEnBatteryEvent
:
1179 dev
->cache_protected
=
1180 (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(3));
1185 case AifEnAddContainer
:
1186 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1187 if (container
>= dev
->maximum_num_containers
)
1189 dev
->fsa_dev
[container
].config_needed
= ADD
;
1190 dev
->fsa_dev
[container
].config_waiting_on
=
1192 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1198 case AifEnDeleteContainer
:
1199 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1200 if (container
>= dev
->maximum_num_containers
)
1202 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1203 dev
->fsa_dev
[container
].config_waiting_on
=
1205 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1209 * Container change detected. If we currently are not
1210 * waiting on something else, setup to wait on a Config Change.
1212 case AifEnContainerChange
:
1213 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1214 if (container
>= dev
->maximum_num_containers
)
1216 if (dev
->fsa_dev
[container
].config_waiting_on
&&
1217 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1219 dev
->fsa_dev
[container
].config_needed
= CHANGE
;
1220 dev
->fsa_dev
[container
].config_waiting_on
=
1222 dev
->fsa_dev
[container
].config_waiting_stamp
= jiffies
;
1225 case AifEnConfigChange
:
1229 case AifEnDeleteJBOD
:
1230 container
= le32_to_cpu(((__le32
*)aifcmd
->data
)[1]);
1231 if ((container
>> 28)) {
1232 container
= (u32
)-1;
1235 channel
= (container
>> 24) & 0xF;
1236 if (channel
>= dev
->maximum_num_channels
) {
1237 container
= (u32
)-1;
1240 id
= container
& 0xFFFF;
1241 if (id
>= dev
->maximum_num_physicals
) {
1242 container
= (u32
)-1;
1245 lun
= (container
>> 16) & 0xFF;
1246 container
= (u32
)-1;
1247 channel
= aac_phys_to_logical(channel
);
1248 device_config_needed
=
1249 (((__le32
*)aifcmd
->data
)[0] ==
1250 cpu_to_le32(AifEnAddJBOD
)) ? ADD
: DELETE
;
1251 if (device_config_needed
== ADD
) {
1252 device
= scsi_device_lookup(dev
->scsi_host_ptr
,
1257 scsi_remove_device(device
);
1258 scsi_device_put(device
);
1263 case AifEnEnclosureManagement
:
1265 * If in JBOD mode, automatic exposure of new
1266 * physical target to be suppressed until configured.
1270 switch (le32_to_cpu(((__le32
*)aifcmd
->data
)[3])) {
1271 case EM_DRIVE_INSERTION
:
1272 case EM_DRIVE_REMOVAL
:
1273 case EM_SES_DRIVE_INSERTION
:
1274 case EM_SES_DRIVE_REMOVAL
:
1275 container
= le32_to_cpu(
1276 ((__le32
*)aifcmd
->data
)[2]);
1277 if ((container
>> 28)) {
1278 container
= (u32
)-1;
1281 channel
= (container
>> 24) & 0xF;
1282 if (channel
>= dev
->maximum_num_channels
) {
1283 container
= (u32
)-1;
1286 id
= container
& 0xFFFF;
1287 lun
= (container
>> 16) & 0xFF;
1288 container
= (u32
)-1;
1289 if (id
>= dev
->maximum_num_physicals
) {
1290 /* legacy dev_t ? */
1291 if ((0x2000 <= id
) || lun
|| channel
||
1292 ((channel
= (id
>> 7) & 0x3F) >=
1293 dev
->maximum_num_channels
))
1295 lun
= (id
>> 4) & 7;
1298 channel
= aac_phys_to_logical(channel
);
1299 device_config_needed
=
1300 ((((__le32
*)aifcmd
->data
)[3]
1301 == cpu_to_le32(EM_DRIVE_INSERTION
)) ||
1302 (((__le32
*)aifcmd
->data
)[3]
1303 == cpu_to_le32(EM_SES_DRIVE_INSERTION
))) ?
1307 case AifBuManagerEvent
:
1308 aac_handle_aif_bu(dev
, aifcmd
);
1313 * If we are waiting on something and this happens to be
1314 * that thing then set the re-configure flag.
1316 if (container
!= (u32
)-1) {
1317 if (container
>= dev
->maximum_num_containers
)
1319 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1320 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1321 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1322 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1323 } else for (container
= 0;
1324 container
< dev
->maximum_num_containers
; ++container
) {
1325 if ((dev
->fsa_dev
[container
].config_waiting_on
==
1326 le32_to_cpu(*(__le32
*)aifcmd
->data
)) &&
1327 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
))
1328 dev
->fsa_dev
[container
].config_waiting_on
= 0;
1332 case AifCmdJobProgress
:
1334 * These are job progress AIF's. When a Clear is being
1335 * done on a container it is initially created then hidden from
1336 * the OS. When the clear completes we don't get a config
1337 * change so we monitor the job status complete on a clear then
1338 * wait for a container change.
1341 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1342 (((__le32
*)aifcmd
->data
)[6] == ((__le32
*)aifcmd
->data
)[5] ||
1343 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsSuccess
))) {
1345 container
< dev
->maximum_num_containers
;
1348 * Stomp on all config sequencing for all
1351 dev
->fsa_dev
[container
].config_waiting_on
=
1352 AifEnContainerChange
;
1353 dev
->fsa_dev
[container
].config_needed
= ADD
;
1354 dev
->fsa_dev
[container
].config_waiting_stamp
=
1358 if (((__le32
*)aifcmd
->data
)[1] == cpu_to_le32(AifJobCtrZero
) &&
1359 ((__le32
*)aifcmd
->data
)[6] == 0 &&
1360 ((__le32
*)aifcmd
->data
)[4] == cpu_to_le32(AifJobStsRunning
)) {
1362 container
< dev
->maximum_num_containers
;
1365 * Stomp on all config sequencing for all
1368 dev
->fsa_dev
[container
].config_waiting_on
=
1369 AifEnContainerChange
;
1370 dev
->fsa_dev
[container
].config_needed
= DELETE
;
1371 dev
->fsa_dev
[container
].config_waiting_stamp
=
1380 if (device_config_needed
== NOTHING
)
1381 for (; container
< dev
->maximum_num_containers
; ++container
) {
1382 if ((dev
->fsa_dev
[container
].config_waiting_on
== 0) &&
1383 (dev
->fsa_dev
[container
].config_needed
!= NOTHING
) &&
1384 time_before(jiffies
, dev
->fsa_dev
[container
].config_waiting_stamp
+ AIF_SNIFF_TIMEOUT
)) {
1385 device_config_needed
=
1386 dev
->fsa_dev
[container
].config_needed
;
1387 dev
->fsa_dev
[container
].config_needed
= NOTHING
;
1388 channel
= CONTAINER_TO_CHANNEL(container
);
1389 id
= CONTAINER_TO_ID(container
);
1390 lun
= CONTAINER_TO_LUN(container
);
1394 if (device_config_needed
== NOTHING
)
1398 * If we decided that a re-configuration needs to be done,
1399 * schedule it here on the way out the door, please close the door
1404 * Find the scsi_device associated with the SCSI address,
1405 * and mark it as changed, invalidating the cache. This deals
1406 * with changes to existing device IDs.
1409 if (!dev
|| !dev
->scsi_host_ptr
)
1412 * force reload of disk info via aac_probe_container
1414 if ((channel
== CONTAINER_CHANNEL
) &&
1415 (device_config_needed
!= NOTHING
)) {
1416 if (dev
->fsa_dev
[container
].valid
== 1)
1417 dev
->fsa_dev
[container
].valid
= 2;
1418 aac_probe_container(dev
, container
);
1420 device
= scsi_device_lookup(dev
->scsi_host_ptr
, channel
, id
, lun
);
1422 switch (device_config_needed
) {
1424 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1425 scsi_remove_device(device
);
1427 if (scsi_device_online(device
)) {
1428 scsi_device_set_state(device
, SDEV_OFFLINE
);
1429 sdev_printk(KERN_INFO
, device
,
1430 "Device offlined - %s\n",
1431 (channel
== CONTAINER_CHANNEL
) ?
1433 "enclosure services event");
1438 if (!scsi_device_online(device
)) {
1439 sdev_printk(KERN_INFO
, device
,
1440 "Device online - %s\n",
1441 (channel
== CONTAINER_CHANNEL
) ?
1443 "enclosure services event");
1444 scsi_device_set_state(device
, SDEV_RUNNING
);
1448 if ((channel
== CONTAINER_CHANNEL
)
1449 && (!dev
->fsa_dev
[container
].valid
)) {
1450 #if (defined(AAC_DEBUG_INSTRUMENT_AIF_DELETE))
1451 scsi_remove_device(device
);
1453 if (!scsi_device_online(device
))
1455 scsi_device_set_state(device
, SDEV_OFFLINE
);
1456 sdev_printk(KERN_INFO
, device
,
1457 "Device offlined - %s\n",
1462 scsi_rescan_device(&device
->sdev_gendev
);
1467 scsi_device_put(device
);
1468 device_config_needed
= NOTHING
;
1470 if (device_config_needed
== ADD
)
1471 scsi_add_device(dev
->scsi_host_ptr
, channel
, id
, lun
);
1472 if (channel
== CONTAINER_CHANNEL
) {
1474 device_config_needed
= NOTHING
;
1479 static int _aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1483 struct Scsi_Host
*host
;
1484 struct scsi_device
*dev
;
1485 struct scsi_cmnd
*command
;
1486 struct scsi_cmnd
*command_list
;
1490 int num_of_fibs
= 0;
1494 * - host is locked, unless called by the aacraid thread.
1495 * (a matter of convenience, due to legacy issues surrounding
1496 * eh_host_adapter_reset).
1497 * - in_reset is asserted, so no new i/o is getting to the
1499 * - The card is dead, or will be very shortly ;-/ so no new
1500 * commands are completing in the interrupt service.
1502 host
= aac
->scsi_host_ptr
;
1503 scsi_block_requests(host
);
1504 aac_adapter_disable_int(aac
);
1505 if (aac
->thread
->pid
!= current
->pid
) {
1506 spin_unlock_irq(host
->host_lock
);
1507 kthread_stop(aac
->thread
);
1512 * If a positive health, means in a known DEAD PANIC
1513 * state and the adapter could be reset to `try again'.
1515 bled
= forced
? 0 : aac_adapter_check_health(aac
);
1516 retval
= aac_adapter_restart(aac
, bled
, reset_type
);
1522 * Loop through the fibs, close the synchronous FIBS
1525 num_of_fibs
= aac
->scsi_host_ptr
->can_queue
+ AAC_NUM_MGT_FIB
;
1526 for (index
= 0; index
< num_of_fibs
; index
++) {
1528 struct fib
*fib
= &aac
->fibs
[index
];
1529 __le32 XferState
= fib
->hw_fib_va
->header
.XferState
;
1530 bool is_response_expected
= false;
1532 if (!(XferState
& cpu_to_le32(NoResponseExpected
| Async
)) &&
1533 (XferState
& cpu_to_le32(ResponseExpected
)))
1534 is_response_expected
= true;
1536 if (is_response_expected
1537 || fib
->flags
& FIB_CONTEXT_FLAG_WAIT
) {
1538 unsigned long flagv
;
1539 spin_lock_irqsave(&fib
->event_lock
, flagv
);
1540 up(&fib
->event_wait
);
1541 spin_unlock_irqrestore(&fib
->event_lock
, flagv
);
1546 /* Give some extra time for ioctls to complete. */
1549 index
= aac
->cardtype
;
1552 * Re-initialize the adapter, first free resources, then carefully
1553 * apply the initialization sequence to come back again. Only risk
1554 * is a change in Firmware dropping cache, it is assumed the caller
1555 * will ensure that i/o is queisced and the card is flushed in that
1559 aac_fib_map_free(aac
);
1560 dma_free_coherent(&aac
->pdev
->dev
, aac
->comm_size
, aac
->comm_addr
,
1562 aac
->comm_addr
= NULL
;
1566 kfree(aac
->fsa_dev
);
1567 aac
->fsa_dev
= NULL
;
1569 dmamask
= DMA_BIT_MASK(32);
1570 quirks
= aac_get_driver_ident(index
)->quirks
;
1571 if (quirks
& AAC_QUIRK_31BIT
)
1572 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1573 else if (!(quirks
& AAC_QUIRK_SRC
))
1574 retval
= pci_set_dma_mask(aac
->pdev
, dmamask
);
1576 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1578 if (quirks
& AAC_QUIRK_31BIT
&& !retval
) {
1579 dmamask
= DMA_BIT_MASK(31);
1580 retval
= pci_set_consistent_dma_mask(aac
->pdev
, dmamask
);
1586 if ((retval
= (*(aac_get_driver_ident(index
)->init
))(aac
)))
1590 aac
->thread
= kthread_run(aac_command_thread
, aac
, "%s",
1592 if (IS_ERR(aac
->thread
)) {
1593 retval
= PTR_ERR(aac
->thread
);
1597 (void)aac_get_adapter_info(aac
);
1598 if ((quirks
& AAC_QUIRK_34SG
) && (host
->sg_tablesize
> 34)) {
1599 host
->sg_tablesize
= 34;
1600 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1602 if ((quirks
& AAC_QUIRK_17SG
) && (host
->sg_tablesize
> 17)) {
1603 host
->sg_tablesize
= 17;
1604 host
->max_sectors
= (host
->sg_tablesize
* 8) + 112;
1606 aac_get_config_status(aac
, 1);
1607 aac_get_containers(aac
);
1609 * This is where the assumption that the Adapter is quiesced
1612 command_list
= NULL
;
1613 __shost_for_each_device(dev
, host
) {
1614 unsigned long flags
;
1615 spin_lock_irqsave(&dev
->list_lock
, flags
);
1616 list_for_each_entry(command
, &dev
->cmd_list
, list
)
1617 if (command
->SCp
.phase
== AAC_OWNER_FIRMWARE
) {
1618 command
->SCp
.buffer
= (struct scatterlist
*)command_list
;
1619 command_list
= command
;
1621 spin_unlock_irqrestore(&dev
->list_lock
, flags
);
1623 while ((command
= command_list
)) {
1624 command_list
= (struct scsi_cmnd
*)command
->SCp
.buffer
;
1625 command
->SCp
.buffer
= NULL
;
1626 command
->result
= DID_OK
<< 16
1627 | COMMAND_COMPLETE
<< 8
1628 | SAM_STAT_TASK_SET_FULL
;
1629 command
->SCp
.phase
= AAC_OWNER_ERROR_HANDLER
;
1630 command
->scsi_done(command
);
1633 * Any Device that was already marked offline needs to be marked
1636 __shost_for_each_device(dev
, host
) {
1637 if (!scsi_device_online(dev
))
1638 scsi_device_set_state(dev
, SDEV_RUNNING
);
1644 scsi_unblock_requests(host
);
1647 * Issue bus rescan to catch any configuration that might have
1650 if (!retval
&& !is_kdump_kernel()) {
1651 dev_info(&aac
->pdev
->dev
, "Scheduling bus rescan\n");
1652 aac_schedule_safw_scan_worker(aac
);
1656 spin_lock_irq(host
->host_lock
);
1661 int aac_reset_adapter(struct aac_dev
*aac
, int forced
, u8 reset_type
)
1663 unsigned long flagv
= 0;
1665 struct Scsi_Host
* host
;
1668 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1671 if (aac
->in_reset
) {
1672 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1676 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1679 * Wait for all commands to complete to this specific
1680 * target (block maximum 60 seconds). Although not necessary,
1681 * it does make us a good storage citizen.
1683 host
= aac
->scsi_host_ptr
;
1684 scsi_block_requests(host
);
1686 /* Quiesce build, flush cache, write through mode */
1688 aac_send_shutdown(aac
);
1689 spin_lock_irqsave(host
->host_lock
, flagv
);
1690 bled
= forced
? forced
:
1691 (aac_check_reset
!= 0 && aac_check_reset
!= 1);
1692 retval
= _aac_reset_adapter(aac
, bled
, reset_type
);
1693 spin_unlock_irqrestore(host
->host_lock
, flagv
);
1695 if ((forced
< 2) && (retval
== -ENODEV
)) {
1696 /* Unwind aac_send_shutdown() IOP_RESET unsupported/disabled */
1697 struct fib
* fibctx
= aac_fib_alloc(aac
);
1699 struct aac_pause
*cmd
;
1702 aac_fib_init(fibctx
);
1704 cmd
= (struct aac_pause
*) fib_data(fibctx
);
1706 cmd
->command
= cpu_to_le32(VM_ContainerConfig
);
1707 cmd
->type
= cpu_to_le32(CT_PAUSE_IO
);
1708 cmd
->timeout
= cpu_to_le32(1);
1709 cmd
->min
= cpu_to_le32(1);
1710 cmd
->noRescan
= cpu_to_le32(1);
1711 cmd
->count
= cpu_to_le32(0);
1713 status
= aac_fib_send(ContainerCommand
,
1715 sizeof(struct aac_pause
),
1717 -2 /* Timeout silently */, 1,
1721 aac_fib_complete(fibctx
);
1722 /* FIB should be freed only after getting
1723 * the response from the F/W */
1724 if (status
!= -ERESTARTSYS
)
1725 aac_fib_free(fibctx
);
1732 int aac_check_health(struct aac_dev
* aac
)
1735 unsigned long time_now
, flagv
= 0;
1736 struct list_head
* entry
;
1738 /* Extending the scope of fib_lock slightly to protect aac->in_reset */
1739 if (spin_trylock_irqsave(&aac
->fib_lock
, flagv
) == 0)
1742 if (aac
->in_reset
|| !(BlinkLED
= aac_adapter_check_health(aac
))) {
1743 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1750 * aac_aifcmd.command = AifCmdEventNotify = 1
1751 * aac_aifcmd.seqnum = 0xFFFFFFFF
1752 * aac_aifcmd.data[0] = AifEnExpEvent = 23
1753 * aac_aifcmd.data[1] = AifExeFirmwarePanic = 3
1754 * aac.aifcmd.data[2] = AifHighPriority = 3
1755 * aac.aifcmd.data[3] = BlinkLED
1758 time_now
= jiffies
/HZ
;
1759 entry
= aac
->fib_list
.next
;
1762 * For each Context that is on the
1763 * fibctxList, make a copy of the
1764 * fib, and then set the event to wake up the
1765 * thread that is waiting for it.
1767 while (entry
!= &aac
->fib_list
) {
1769 * Extract the fibctx
1771 struct aac_fib_context
*fibctx
= list_entry(entry
, struct aac_fib_context
, next
);
1772 struct hw_fib
* hw_fib
;
1775 * Check if the queue is getting
1778 if (fibctx
->count
> 20) {
1780 * It's *not* jiffies folks,
1781 * but jiffies / HZ, so do not
1784 u32 time_last
= fibctx
->jiffies
;
1786 * Has it been > 2 minutes
1787 * since the last read off
1790 if ((time_now
- time_last
) > aif_timeout
) {
1791 entry
= entry
->next
;
1792 aac_close_fib_context(aac
, fibctx
);
1797 * Warning: no sleep allowed while
1800 hw_fib
= kzalloc(sizeof(struct hw_fib
), GFP_ATOMIC
);
1801 fib
= kzalloc(sizeof(struct fib
), GFP_ATOMIC
);
1802 if (fib
&& hw_fib
) {
1803 struct aac_aifcmd
* aif
;
1805 fib
->hw_fib_va
= hw_fib
;
1808 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
1809 fib
->size
= sizeof (struct fib
);
1810 fib
->data
= hw_fib
->data
;
1811 aif
= (struct aac_aifcmd
*)hw_fib
->data
;
1812 aif
->command
= cpu_to_le32(AifCmdEventNotify
);
1813 aif
->seqnum
= cpu_to_le32(0xFFFFFFFF);
1814 ((__le32
*)aif
->data
)[0] = cpu_to_le32(AifEnExpEvent
);
1815 ((__le32
*)aif
->data
)[1] = cpu_to_le32(AifExeFirmwarePanic
);
1816 ((__le32
*)aif
->data
)[2] = cpu_to_le32(AifHighPriority
);
1817 ((__le32
*)aif
->data
)[3] = cpu_to_le32(BlinkLED
);
1820 * Put the FIB onto the
1823 list_add_tail(&fib
->fiblink
, &fibctx
->fib_list
);
1826 * Set the event to wake up the
1827 * thread that will waiting.
1829 up(&fibctx
->wait_sem
);
1831 printk(KERN_WARNING
"aifd: didn't allocate NewFib.\n");
1835 entry
= entry
->next
;
1838 spin_unlock_irqrestore(&aac
->fib_lock
, flagv
);
1841 printk(KERN_ERR
"%s: Host adapter is dead (or got a PCI error) %d\n",
1842 aac
->name
, BlinkLED
);
1846 printk(KERN_ERR
"%s: Host adapter BLINK LED 0x%x\n", aac
->name
, BlinkLED
);
1853 static inline int is_safw_raid_volume(struct aac_dev
*aac
, int bus
, int target
)
1855 return bus
== CONTAINER_CHANNEL
&& target
< aac
->maximum_num_containers
;
1858 static struct scsi_device
*aac_lookup_safw_scsi_device(struct aac_dev
*dev
,
1862 if (bus
!= CONTAINER_CHANNEL
)
1863 bus
= aac_phys_to_logical(bus
);
1865 return scsi_device_lookup(dev
->scsi_host_ptr
, bus
, target
, 0);
1868 static int aac_add_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1870 if (bus
!= CONTAINER_CHANNEL
)
1871 bus
= aac_phys_to_logical(bus
);
1873 return scsi_add_device(dev
->scsi_host_ptr
, bus
, target
, 0);
1876 static void aac_put_safw_scsi_device(struct scsi_device
*sdev
)
1879 scsi_device_put(sdev
);
1882 static void aac_remove_safw_device(struct aac_dev
*dev
, int bus
, int target
)
1884 struct scsi_device
*sdev
;
1886 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1887 scsi_remove_device(sdev
);
1888 aac_put_safw_scsi_device(sdev
);
1891 static inline int aac_is_safw_scan_count_equal(struct aac_dev
*dev
,
1892 int bus
, int target
)
1894 return dev
->hba_map
[bus
][target
].scan_counter
== dev
->scan_counter
;
1897 static int aac_is_safw_target_valid(struct aac_dev
*dev
, int bus
, int target
)
1899 if (is_safw_raid_volume(dev
, bus
, target
))
1900 return dev
->fsa_dev
[target
].valid
;
1902 return aac_is_safw_scan_count_equal(dev
, bus
, target
);
1905 static int aac_is_safw_device_exposed(struct aac_dev
*dev
, int bus
, int target
)
1908 struct scsi_device
*sdev
;
1910 sdev
= aac_lookup_safw_scsi_device(dev
, bus
, target
);
1913 aac_put_safw_scsi_device(sdev
);
1918 static int aac_update_safw_host_devices(struct aac_dev
*dev
)
1926 rcode
= aac_setup_safw_adapter(dev
);
1927 if (unlikely(rcode
< 0)) {
1931 for (i
= 0; i
< AAC_BUS_TARGET_LOOP
; i
++) {
1933 bus
= get_bus_number(i
);
1934 target
= get_target_number(i
);
1936 is_exposed
= aac_is_safw_device_exposed(dev
, bus
, target
);
1938 if (aac_is_safw_target_valid(dev
, bus
, target
) && !is_exposed
)
1939 aac_add_safw_device(dev
, bus
, target
);
1940 else if (!aac_is_safw_target_valid(dev
, bus
, target
) &&
1942 aac_remove_safw_device(dev
, bus
, target
);
1948 static int aac_scan_safw_host(struct aac_dev
*dev
)
1952 rcode
= aac_update_safw_host_devices(dev
);
1954 aac_schedule_safw_scan_worker(dev
);
1959 int aac_scan_host(struct aac_dev
*dev
)
1963 mutex_lock(&dev
->scan_mutex
);
1964 if (dev
->sa_firmware
)
1965 rcode
= aac_scan_safw_host(dev
);
1967 scsi_scan_host(dev
->scsi_host_ptr
);
1968 mutex_unlock(&dev
->scan_mutex
);
1974 * aac_handle_sa_aif Handle a message from the firmware
1975 * @dev: Which adapter this fib is from
1976 * @fibptr: Pointer to fibptr from adapter
1978 * This routine handles a driver notify fib from the adapter and
1979 * dispatches it to the appropriate routine for handling.
1981 static void aac_handle_sa_aif(struct aac_dev
*dev
, struct fib
*fibptr
)
1986 if (fibptr
->hbacmd_size
& SA_AIF_HOTPLUG
)
1987 events
= SA_AIF_HOTPLUG
;
1988 else if (fibptr
->hbacmd_size
& SA_AIF_HARDWARE
)
1989 events
= SA_AIF_HARDWARE
;
1990 else if (fibptr
->hbacmd_size
& SA_AIF_PDEV_CHANGE
)
1991 events
= SA_AIF_PDEV_CHANGE
;
1992 else if (fibptr
->hbacmd_size
& SA_AIF_LDEV_CHANGE
)
1993 events
= SA_AIF_LDEV_CHANGE
;
1994 else if (fibptr
->hbacmd_size
& SA_AIF_BPSTAT_CHANGE
)
1995 events
= SA_AIF_BPSTAT_CHANGE
;
1996 else if (fibptr
->hbacmd_size
& SA_AIF_BPCFG_CHANGE
)
1997 events
= SA_AIF_BPCFG_CHANGE
;
2000 case SA_AIF_HOTPLUG
:
2001 case SA_AIF_HARDWARE
:
2002 case SA_AIF_PDEV_CHANGE
:
2003 case SA_AIF_LDEV_CHANGE
:
2004 case SA_AIF_BPCFG_CHANGE
:
2010 case SA_AIF_BPSTAT_CHANGE
:
2011 /* currently do nothing */
2015 for (i
= 1; i
<= 10; ++i
) {
2016 events
= src_readl(dev
, MUnit
.IDR
);
2017 if (events
& (1<<23)) {
2018 pr_warn(" AIF not cleared by firmware - %d/%d)\n",
2025 static int get_fib_count(struct aac_dev
*dev
)
2027 unsigned int num
= 0;
2028 struct list_head
*entry
;
2029 unsigned long flagv
;
2032 * Warning: no sleep allowed while
2033 * holding spinlock. We take the estimate
2034 * and pre-allocate a set of fibs outside the
2037 num
= le32_to_cpu(dev
->init
->r7
.adapter_fibs_size
)
2038 / sizeof(struct hw_fib
); /* some extra */
2039 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2040 entry
= dev
->fib_list
.next
;
2041 while (entry
!= &dev
->fib_list
) {
2042 entry
= entry
->next
;
2045 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2050 static int fillup_pools(struct aac_dev
*dev
, struct hw_fib
**hw_fib_pool
,
2051 struct fib
**fib_pool
,
2054 struct hw_fib
**hw_fib_p
;
2057 hw_fib_p
= hw_fib_pool
;
2059 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2060 *(hw_fib_p
) = kmalloc(sizeof(struct hw_fib
), GFP_KERNEL
);
2061 if (!(*(hw_fib_p
++))) {
2066 *(fib_p
) = kmalloc(sizeof(struct fib
), GFP_KERNEL
);
2067 if (!(*(fib_p
++))) {
2068 kfree(*(--hw_fib_p
));
2074 * Get the actual number of allocated fibs
2076 num
= hw_fib_p
- hw_fib_pool
;
2080 static void wakeup_fibctx_threads(struct aac_dev
*dev
,
2081 struct hw_fib
**hw_fib_pool
,
2082 struct fib
**fib_pool
,
2084 struct hw_fib
*hw_fib
,
2087 unsigned long flagv
;
2088 struct list_head
*entry
;
2089 struct hw_fib
**hw_fib_p
;
2091 u32 time_now
, time_last
;
2092 struct hw_fib
*hw_newfib
;
2094 struct aac_fib_context
*fibctx
;
2096 time_now
= jiffies
/HZ
;
2097 spin_lock_irqsave(&dev
->fib_lock
, flagv
);
2098 entry
= dev
->fib_list
.next
;
2100 * For each Context that is on the
2101 * fibctxList, make a copy of the
2102 * fib, and then set the event to wake up the
2103 * thread that is waiting for it.
2106 hw_fib_p
= hw_fib_pool
;
2108 while (entry
!= &dev
->fib_list
) {
2110 * Extract the fibctx
2112 fibctx
= list_entry(entry
, struct aac_fib_context
,
2115 * Check if the queue is getting
2118 if (fibctx
->count
> 20) {
2120 * It's *not* jiffies folks,
2121 * but jiffies / HZ so do not
2124 time_last
= fibctx
->jiffies
;
2126 * Has it been > 2 minutes
2127 * since the last read off
2130 if ((time_now
- time_last
) > aif_timeout
) {
2131 entry
= entry
->next
;
2132 aac_close_fib_context(dev
, fibctx
);
2137 * Warning: no sleep allowed while
2140 if (hw_fib_p
>= &hw_fib_pool
[num
]) {
2141 pr_warn("aifd: didn't allocate NewFib\n");
2142 entry
= entry
->next
;
2146 hw_newfib
= *hw_fib_p
;
2147 *(hw_fib_p
++) = NULL
;
2151 * Make the copy of the FIB
2153 memcpy(hw_newfib
, hw_fib
, sizeof(struct hw_fib
));
2154 memcpy(newfib
, fib
, sizeof(struct fib
));
2155 newfib
->hw_fib_va
= hw_newfib
;
2157 * Put the FIB onto the
2160 list_add_tail(&newfib
->fiblink
, &fibctx
->fib_list
);
2163 * Set the event to wake up the
2164 * thread that is waiting.
2166 up(&fibctx
->wait_sem
);
2168 entry
= entry
->next
;
2171 * Set the status of this FIB
2173 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2174 aac_fib_adapter_complete(fib
, sizeof(u32
));
2175 spin_unlock_irqrestore(&dev
->fib_lock
, flagv
);
2179 static void aac_process_events(struct aac_dev
*dev
)
2181 struct hw_fib
*hw_fib
;
2183 unsigned long flags
;
2186 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2187 spin_lock_irqsave(t_lock
, flags
);
2189 while (!list_empty(&(dev
->queues
->queue
[HostNormCmdQueue
].cmdq
))) {
2190 struct list_head
*entry
;
2191 struct aac_aifcmd
*aifcmd
;
2193 struct hw_fib
**hw_fib_pool
, **hw_fib_p
;
2194 struct fib
**fib_pool
, **fib_p
;
2196 set_current_state(TASK_RUNNING
);
2198 entry
= dev
->queues
->queue
[HostNormCmdQueue
].cmdq
.next
;
2201 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2202 spin_unlock_irqrestore(t_lock
, flags
);
2204 fib
= list_entry(entry
, struct fib
, fiblink
);
2205 hw_fib
= fib
->hw_fib_va
;
2206 if (dev
->sa_firmware
) {
2208 aac_handle_sa_aif(dev
, fib
);
2209 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2213 * We will process the FIB here or pass it to a
2214 * worker thread that is TBD. We Really can't
2215 * do anything at this point since we don't have
2216 * anything defined for this thread to do.
2218 memset(fib
, 0, sizeof(struct fib
));
2219 fib
->type
= FSAFS_NTC_FIB_CONTEXT
;
2220 fib
->size
= sizeof(struct fib
);
2221 fib
->hw_fib_va
= hw_fib
;
2222 fib
->data
= hw_fib
->data
;
2225 * We only handle AifRequest fibs from the adapter.
2228 aifcmd
= (struct aac_aifcmd
*) hw_fib
->data
;
2229 if (aifcmd
->command
== cpu_to_le32(AifCmdDriverNotify
)) {
2230 /* Handle Driver Notify Events */
2231 aac_handle_aif(dev
, fib
);
2232 *(__le32
*)hw_fib
->data
= cpu_to_le32(ST_OK
);
2233 aac_fib_adapter_complete(fib
, (u16
)sizeof(u32
));
2237 * The u32 here is important and intended. We are using
2238 * 32bit wrapping time to fit the adapter field
2242 if (aifcmd
->command
== cpu_to_le32(AifCmdEventNotify
)
2243 || aifcmd
->command
== cpu_to_le32(AifCmdJobProgress
)) {
2244 aac_handle_aif(dev
, fib
);
2248 * get number of fibs to process
2250 num
= get_fib_count(dev
);
2254 hw_fib_pool
= kmalloc_array(num
, sizeof(struct hw_fib
*),
2259 fib_pool
= kmalloc_array(num
, sizeof(struct fib
*), GFP_KERNEL
);
2261 goto free_hw_fib_pool
;
2264 * Fill up fib pointer pools with actual fibs
2267 num
= fillup_pools(dev
, hw_fib_pool
, fib_pool
, num
);
2272 * wakeup the thread that is waiting for
2273 * the response from fw (ioctl)
2275 wakeup_fibctx_threads(dev
, hw_fib_pool
, fib_pool
,
2279 /* Free up the remaining resources */
2280 hw_fib_p
= hw_fib_pool
;
2282 while (hw_fib_p
< &hw_fib_pool
[num
]) {
2293 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2294 spin_lock_irqsave(t_lock
, flags
);
2297 * There are no more AIF's
2299 t_lock
= dev
->queues
->queue
[HostNormCmdQueue
].lock
;
2300 spin_unlock_irqrestore(t_lock
, flags
);
2303 static int aac_send_wellness_command(struct aac_dev
*dev
, char *wellness_str
,
2306 struct aac_srb
*srbcmd
;
2307 struct sgmap64
*sg64
;
2314 fibptr
= aac_fib_alloc(dev
);
2318 dma_buf
= dma_alloc_coherent(&dev
->pdev
->dev
, datasize
, &addr
,
2323 aac_fib_init(fibptr
);
2325 vbus
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_bus
);
2326 vid
= (u32
)le16_to_cpu(dev
->supplement_adapter_info
.virt_device_target
);
2328 srbcmd
= (struct aac_srb
*)fib_data(fibptr
);
2330 srbcmd
->function
= cpu_to_le32(SRBF_ExecuteScsi
);
2331 srbcmd
->channel
= cpu_to_le32(vbus
);
2332 srbcmd
->id
= cpu_to_le32(vid
);
2334 srbcmd
->flags
= cpu_to_le32(SRB_DataOut
);
2335 srbcmd
->timeout
= cpu_to_le32(10);
2336 srbcmd
->retry_limit
= 0;
2337 srbcmd
->cdb_size
= cpu_to_le32(12);
2338 srbcmd
->count
= cpu_to_le32(datasize
);
2340 memset(srbcmd
->cdb
, 0, sizeof(srbcmd
->cdb
));
2341 srbcmd
->cdb
[0] = BMIC_OUT
;
2342 srbcmd
->cdb
[6] = WRITE_HOST_WELLNESS
;
2343 memcpy(dma_buf
, (char *)wellness_str
, datasize
);
2345 sg64
= (struct sgmap64
*)&srbcmd
->sg
;
2346 sg64
->count
= cpu_to_le32(1);
2347 sg64
->sg
[0].addr
[1] = cpu_to_le32((u32
)(((addr
) >> 16) >> 16));
2348 sg64
->sg
[0].addr
[0] = cpu_to_le32((u32
)(addr
& 0xffffffff));
2349 sg64
->sg
[0].count
= cpu_to_le32(datasize
);
2351 ret
= aac_fib_send(ScsiPortCommand64
, fibptr
, sizeof(struct aac_srb
),
2352 FsaNormal
, 1, 1, NULL
, NULL
);
2354 dma_free_coherent(&dev
->pdev
->dev
, datasize
, dma_buf
, addr
);
2357 * Do not set XferState to zero unless
2358 * receives a response from F/W
2361 aac_fib_complete(fibptr
);
2364 * FIB should be freed only after
2365 * getting the response from the F/W
2367 if (ret
!= -ERESTARTSYS
)
2373 aac_fib_free(fibptr
);
2377 int aac_send_safw_hostttime(struct aac_dev
*dev
, struct timespec64
*now
)
2380 char wellness_str
[] = "<HW>TD\010\0\0\0\0\0\0\0\0\0DW\0\0ZZ";
2381 u32 datasize
= sizeof(wellness_str
);
2382 time64_t local_time
;
2385 if (!dev
->sa_firmware
)
2388 local_time
= (now
->tv_sec
- (sys_tz
.tz_minuteswest
* 60));
2389 time64_to_tm(local_time
, 0, &cur_tm
);
2391 cur_tm
.tm_year
+= 1900;
2392 wellness_str
[8] = bin2bcd(cur_tm
.tm_hour
);
2393 wellness_str
[9] = bin2bcd(cur_tm
.tm_min
);
2394 wellness_str
[10] = bin2bcd(cur_tm
.tm_sec
);
2395 wellness_str
[12] = bin2bcd(cur_tm
.tm_mon
);
2396 wellness_str
[13] = bin2bcd(cur_tm
.tm_mday
);
2397 wellness_str
[14] = bin2bcd(cur_tm
.tm_year
/ 100);
2398 wellness_str
[15] = bin2bcd(cur_tm
.tm_year
% 100);
2400 ret
= aac_send_wellness_command(dev
, wellness_str
, datasize
);
2406 int aac_send_hosttime(struct aac_dev
*dev
, struct timespec64
*now
)
2412 fibptr
= aac_fib_alloc(dev
);
2416 aac_fib_init(fibptr
);
2417 info
= (__le32
*)fib_data(fibptr
);
2418 *info
= cpu_to_le32(now
->tv_sec
); /* overflow in y2106 */
2419 ret
= aac_fib_send(SendHostTime
, fibptr
, sizeof(*info
), FsaNormal
,
2423 * Do not set XferState to zero unless
2424 * receives a response from F/W
2427 aac_fib_complete(fibptr
);
2430 * FIB should be freed only after
2431 * getting the response from the F/W
2433 if (ret
!= -ERESTARTSYS
)
2434 aac_fib_free(fibptr
);
2441 * aac_command_thread - command processing thread
2442 * @dev: Adapter to monitor
2444 * Waits on the commandready event in it's queue. When the event gets set
2445 * it will pull FIBs off it's queue. It will continue to pull FIBs off
2446 * until the queue is empty. When the queue is empty it will wait for
2450 int aac_command_thread(void *data
)
2452 struct aac_dev
*dev
= data
;
2453 DECLARE_WAITQUEUE(wait
, current
);
2454 unsigned long next_jiffies
= jiffies
+ HZ
;
2455 unsigned long next_check_jiffies
= next_jiffies
;
2456 long difference
= HZ
;
2459 * We can only have one thread per adapter for AIF's.
2461 if (dev
->aif_thread
)
2465 * Let the DPC know it has a place to send the AIF's to.
2467 dev
->aif_thread
= 1;
2468 add_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2469 set_current_state(TASK_INTERRUPTIBLE
);
2470 dprintk ((KERN_INFO
"aac_command_thread start\n"));
2473 aac_process_events(dev
);
2476 * Background activity
2478 if ((time_before(next_check_jiffies
,next_jiffies
))
2479 && ((difference
= next_check_jiffies
- jiffies
) <= 0)) {
2480 next_check_jiffies
= next_jiffies
;
2481 if (aac_adapter_check_health(dev
) == 0) {
2482 difference
= ((long)(unsigned)check_interval
)
2484 next_check_jiffies
= jiffies
+ difference
;
2485 } else if (!dev
->queues
)
2488 if (!time_before(next_check_jiffies
,next_jiffies
)
2489 && ((difference
= next_jiffies
- jiffies
) <= 0)) {
2490 struct timespec64 now
;
2493 /* Don't even try to talk to adapter if its sick */
2494 ret
= aac_adapter_check_health(dev
);
2495 if (ret
|| !dev
->queues
)
2497 next_check_jiffies
= jiffies
2498 + ((long)(unsigned)check_interval
)
2500 ktime_get_real_ts64(&now
);
2502 /* Synchronize our watches */
2503 if (((NSEC_PER_SEC
- (NSEC_PER_SEC
/ HZ
)) > now
.tv_nsec
)
2504 && (now
.tv_nsec
> (NSEC_PER_SEC
/ HZ
)))
2505 difference
= HZ
+ HZ
/ 2 -
2506 now
.tv_nsec
/ (NSEC_PER_SEC
/ HZ
);
2508 if (now
.tv_nsec
> NSEC_PER_SEC
/ 2)
2511 if (dev
->sa_firmware
)
2513 aac_send_safw_hostttime(dev
, &now
);
2515 ret
= aac_send_hosttime(dev
, &now
);
2517 difference
= (long)(unsigned)update_interval
*HZ
;
2519 next_jiffies
= jiffies
+ difference
;
2520 if (time_before(next_check_jiffies
,next_jiffies
))
2521 difference
= next_check_jiffies
- jiffies
;
2523 if (difference
<= 0)
2525 set_current_state(TASK_INTERRUPTIBLE
);
2527 if (kthread_should_stop())
2531 * we probably want usleep_range() here instead of the
2532 * jiffies computation
2534 schedule_timeout(difference
);
2536 if (kthread_should_stop())
2540 remove_wait_queue(&dev
->queues
->queue
[HostNormCmdQueue
].cmdready
, &wait
);
2541 dev
->aif_thread
= 0;
2545 int aac_acquire_irq(struct aac_dev
*dev
)
2551 if (!dev
->sync_mode
&& dev
->msi_enabled
&& dev
->max_msix
> 1) {
2552 for (i
= 0; i
< dev
->max_msix
; i
++) {
2553 dev
->aac_msix
[i
].vector_no
= i
;
2554 dev
->aac_msix
[i
].dev
= dev
;
2555 if (request_irq(pci_irq_vector(dev
->pdev
, i
),
2556 dev
->a_ops
.adapter_intr
,
2557 0, "aacraid", &(dev
->aac_msix
[i
]))) {
2558 printk(KERN_ERR
"%s%d: Failed to register IRQ for vector %d.\n",
2559 dev
->name
, dev
->id
, i
);
2560 for (j
= 0 ; j
< i
; j
++)
2561 free_irq(pci_irq_vector(dev
->pdev
, j
),
2562 &(dev
->aac_msix
[j
]));
2563 pci_disable_msix(dev
->pdev
);
2568 dev
->aac_msix
[0].vector_no
= 0;
2569 dev
->aac_msix
[0].dev
= dev
;
2571 if (request_irq(dev
->pdev
->irq
, dev
->a_ops
.adapter_intr
,
2572 IRQF_SHARED
, "aacraid",
2573 &(dev
->aac_msix
[0])) < 0) {
2575 pci_disable_msi(dev
->pdev
);
2576 printk(KERN_ERR
"%s%d: Interrupt unavailable.\n",
2577 dev
->name
, dev
->id
);
2584 void aac_free_irq(struct aac_dev
*dev
)
2589 cpu
= cpumask_first(cpu_online_mask
);
2590 if (aac_is_src(dev
)) {
2591 if (dev
->max_msix
> 1) {
2592 for (i
= 0; i
< dev
->max_msix
; i
++)
2593 free_irq(pci_irq_vector(dev
->pdev
, i
),
2594 &(dev
->aac_msix
[i
]));
2596 free_irq(dev
->pdev
->irq
, &(dev
->aac_msix
[0]));
2599 free_irq(dev
->pdev
->irq
, dev
);
2602 pci_disable_msi(dev
->pdev
);
2603 else if (dev
->max_msix
> 1)
2604 pci_disable_msix(dev
->pdev
);