2 *******************************************************************************
4 ** FILE NAME : arcmsr_hba.c
6 ** Description: SCSI RAID Device Driver for
7 ** ARECA RAID Host adapter
8 *******************************************************************************
9 ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved
11 ** Web site: www.areca.com.tw
12 ** E-mail: support@areca.com.tw
14 ** This program is free software; you can redistribute it and/or modify
15 ** it under the terms of the GNU General Public License version 2 as
16 ** published by the Free Software Foundation.
17 ** This program is distributed in the hope that it will be useful,
18 ** but WITHOUT ANY WARRANTY; without even the implied warranty of
19 ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 ** GNU General Public License for more details.
21 *******************************************************************************
22 ** Redistribution and use in source and binary forms, with or without
23 ** modification, are permitted provided that the following conditions
25 ** 1. Redistributions of source code must retain the above copyright
26 ** notice, this list of conditions and the following disclaimer.
27 ** 2. Redistributions in binary form must reproduce the above copyright
28 ** notice, this list of conditions and the following disclaimer in the
29 ** documentation and/or other materials provided with the distribution.
30 ** 3. The name of the author may not be used to endorse or promote products
31 ** derived from this software without specific prior written permission.
33 ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
34 ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
35 ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
36 ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
37 ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT
38 ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
39 ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY
40 ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
41 ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF
42 ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
43 *******************************************************************************
44 ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr
45 ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt
46 *******************************************************************************
48 #include <linux/module.h>
49 #include <linux/reboot.h>
50 #include <linux/spinlock.h>
51 #include <linux/pci_ids.h>
52 #include <linux/interrupt.h>
53 #include <linux/moduleparam.h>
54 #include <linux/errno.h>
55 #include <linux/types.h>
56 #include <linux/delay.h>
57 #include <linux/dma-mapping.h>
58 #include <linux/timer.h>
59 #include <linux/slab.h>
60 #include <linux/pci.h>
61 #include <linux/aer.h>
64 #include <asm/system.h>
65 #include <asm/uaccess.h>
66 #include <scsi/scsi_host.h>
67 #include <scsi/scsi.h>
68 #include <scsi/scsi_cmnd.h>
69 #include <scsi/scsi_tcq.h>
70 #include <scsi/scsi_device.h>
71 #include <scsi/scsi_transport.h>
72 #include <scsi/scsicam.h>
74 MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>");
75 MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter");
76 MODULE_LICENSE("Dual BSD/GPL");
77 MODULE_VERSION(ARCMSR_DRIVER_VERSION
);
78 static int sleeptime
= 10;
79 static int retrycount
= 30;
80 wait_queue_head_t wait_q
;
81 static int arcmsr_iop_message_xfer(struct AdapterControlBlock
*acb
,
82 struct scsi_cmnd
*cmd
);
83 static int arcmsr_iop_confirm(struct AdapterControlBlock
*acb
);
84 static int arcmsr_abort(struct scsi_cmnd
*);
85 static int arcmsr_bus_reset(struct scsi_cmnd
*);
86 static int arcmsr_bios_param(struct scsi_device
*sdev
,
87 struct block_device
*bdev
, sector_t capacity
, int *info
);
88 static int arcmsr_queue_command(struct scsi_cmnd
*cmd
,
89 void (*done
) (struct scsi_cmnd
*));
90 static int arcmsr_probe(struct pci_dev
*pdev
,
91 const struct pci_device_id
*id
);
92 static void arcmsr_remove(struct pci_dev
*pdev
);
93 static void arcmsr_shutdown(struct pci_dev
*pdev
);
94 static void arcmsr_iop_init(struct AdapterControlBlock
*acb
);
95 static void arcmsr_free_ccb_pool(struct AdapterControlBlock
*acb
);
96 static u32
arcmsr_disable_outbound_ints(struct AdapterControlBlock
*acb
);
97 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock
*acb
);
98 static void arcmsr_flush_hba_cache(struct AdapterControlBlock
*acb
);
99 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock
*acb
);
100 static void arcmsr_request_device_map(unsigned long pacb
);
101 static void arcmsr_request_hba_device_map(struct AdapterControlBlock
*acb
);
102 static void arcmsr_request_hbb_device_map(struct AdapterControlBlock
*acb
);
103 static void arcmsr_request_hbc_device_map(struct AdapterControlBlock
*acb
);
104 static void arcmsr_message_isr_bh_fn(struct work_struct
*work
);
105 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock
*acb
);
106 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock
*acb
);
107 static void arcmsr_hbc_message_isr(struct AdapterControlBlock
*pACB
);
108 static void arcmsr_hardware_reset(struct AdapterControlBlock
*acb
);
109 static const char *arcmsr_info(struct Scsi_Host
*);
110 static irqreturn_t
arcmsr_interrupt(struct AdapterControlBlock
*acb
);
111 static int arcmsr_adjust_disk_queue_depth(struct scsi_device
*sdev
,
112 int queue_depth
, int reason
)
114 if (reason
!= SCSI_QDEPTH_DEFAULT
)
117 if (queue_depth
> ARCMSR_MAX_CMD_PERLUN
)
118 queue_depth
= ARCMSR_MAX_CMD_PERLUN
;
119 scsi_adjust_queue_depth(sdev
, MSG_ORDERED_TAG
, queue_depth
);
123 static struct scsi_host_template arcmsr_scsi_host_template
= {
124 .module
= THIS_MODULE
,
125 .name
= "ARCMSR ARECA SATA/SAS RAID Controller"
126 ARCMSR_DRIVER_VERSION
,
128 .queuecommand
= arcmsr_queue_command
,
129 .eh_abort_handler
= arcmsr_abort
,
130 .eh_bus_reset_handler
= arcmsr_bus_reset
,
131 .bios_param
= arcmsr_bios_param
,
132 .change_queue_depth
= arcmsr_adjust_disk_queue_depth
,
133 .can_queue
= ARCMSR_MAX_FREECCB_NUM
,
134 .this_id
= ARCMSR_SCSI_INITIATOR_ID
,
135 .sg_tablesize
= ARCMSR_DEFAULT_SG_ENTRIES
,
136 .max_sectors
= ARCMSR_MAX_XFER_SECTORS_C
,
137 .cmd_per_lun
= ARCMSR_MAX_CMD_PERLUN
,
138 .use_clustering
= ENABLE_CLUSTERING
,
139 .shost_attrs
= arcmsr_host_attrs
,
141 static struct pci_device_id arcmsr_device_id_table
[] = {
142 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1110
)},
143 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1120
)},
144 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1130
)},
145 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1160
)},
146 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1170
)},
147 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1200
)},
148 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1201
)},
149 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1202
)},
150 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1210
)},
151 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1220
)},
152 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1230
)},
153 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1260
)},
154 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1270
)},
155 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1280
)},
156 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1380
)},
157 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1381
)},
158 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1680
)},
159 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1681
)},
160 {PCI_DEVICE(PCI_VENDOR_ID_ARECA
, PCI_DEVICE_ID_ARECA_1880
)},
161 {0, 0}, /* Terminating entry */
163 MODULE_DEVICE_TABLE(pci
, arcmsr_device_id_table
);
164 static struct pci_driver arcmsr_pci_driver
= {
166 .id_table
= arcmsr_device_id_table
,
167 .probe
= arcmsr_probe
,
168 .remove
= arcmsr_remove
,
169 .shutdown
= arcmsr_shutdown
,
172 ****************************************************************************
173 ****************************************************************************
175 int arcmsr_sleep_for_bus_reset(struct scsi_cmnd
*cmd
)
177 struct Scsi_Host
*shost
= NULL
;
179 shost
= cmd
->device
->host
;
180 isleep
= sleeptime
/ 10;
182 for (i
= 0; i
< isleep
; i
++) {
187 isleep
= sleeptime
% 10;
191 printk(KERN_NOTICE
"wake-up\n");
195 static void arcmsr_free_hbb_mu(struct AdapterControlBlock
*acb
)
197 switch (acb
->adapter_type
) {
198 case ACB_ADAPTER_TYPE_A
:
199 case ACB_ADAPTER_TYPE_C
:
201 case ACB_ADAPTER_TYPE_B
:{
202 dma_free_coherent(&acb
->pdev
->dev
,
203 sizeof(struct MessageUnit_B
),
204 acb
->pmuB
, acb
->dma_coherent_handle_hbb_mu
);
209 static bool arcmsr_remap_pciregion(struct AdapterControlBlock
*acb
)
211 struct pci_dev
*pdev
= acb
->pdev
;
212 switch (acb
->adapter_type
){
213 case ACB_ADAPTER_TYPE_A
:{
214 acb
->pmuA
= ioremap(pci_resource_start(pdev
,0), pci_resource_len(pdev
,0));
216 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
221 case ACB_ADAPTER_TYPE_B
:{
222 void __iomem
*mem_base0
, *mem_base1
;
223 mem_base0
= ioremap(pci_resource_start(pdev
, 0), pci_resource_len(pdev
, 0));
225 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
228 mem_base1
= ioremap(pci_resource_start(pdev
, 2), pci_resource_len(pdev
, 2));
231 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
234 acb
->mem_base0
= mem_base0
;
235 acb
->mem_base1
= mem_base1
;
238 case ACB_ADAPTER_TYPE_C
:{
239 acb
->pmuC
= ioremap_nocache(pci_resource_start(pdev
, 1), pci_resource_len(pdev
, 1));
241 printk(KERN_NOTICE
"arcmsr%d: memory mapping region fail \n", acb
->host
->host_no
);
244 if (readl(&acb
->pmuC
->outbound_doorbell
) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
245 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, &acb
->pmuC
->outbound_doorbell_clear
);/*clear interrupt*/
254 static void arcmsr_unmap_pciregion(struct AdapterControlBlock
*acb
)
256 switch (acb
->adapter_type
) {
257 case ACB_ADAPTER_TYPE_A
:{
261 case ACB_ADAPTER_TYPE_B
:{
262 iounmap(acb
->mem_base0
);
263 iounmap(acb
->mem_base1
);
267 case ACB_ADAPTER_TYPE_C
:{
273 static irqreturn_t
arcmsr_do_interrupt(int irq
, void *dev_id
)
275 irqreturn_t handle_state
;
276 struct AdapterControlBlock
*acb
= dev_id
;
278 handle_state
= arcmsr_interrupt(acb
);
282 static int arcmsr_bios_param(struct scsi_device
*sdev
,
283 struct block_device
*bdev
, sector_t capacity
, int *geom
)
285 int ret
, heads
, sectors
, cylinders
, total_capacity
;
286 unsigned char *buffer
;/* return copy of block device's partition table */
288 buffer
= scsi_bios_ptable(bdev
);
290 ret
= scsi_partsize(buffer
, capacity
, &geom
[2], &geom
[0], &geom
[1]);
295 total_capacity
= capacity
;
298 cylinders
= total_capacity
/ (heads
* sectors
);
299 if (cylinders
> 1024) {
302 cylinders
= total_capacity
/ (heads
* sectors
);
310 static void arcmsr_define_adapter_type(struct AdapterControlBlock
*acb
)
312 struct pci_dev
*pdev
= acb
->pdev
;
314 pci_read_config_word(pdev
, PCI_DEVICE_ID
, &dev_id
);
315 acb
->dev_id
= dev_id
;
318 acb
->adapter_type
= ACB_ADAPTER_TYPE_C
;
322 acb
->adapter_type
= ACB_ADAPTER_TYPE_B
;
326 default: acb
->adapter_type
= ACB_ADAPTER_TYPE_A
;
330 static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock
*acb
)
332 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
334 uint8_t Retries
= 0x00;
336 for (Index
= 0; Index
< 100; Index
++) {
337 if (readl(®
->outbound_intstatus
) &
338 ARCMSR_MU_OUTBOUND_MESSAGE0_INT
) {
339 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
,
340 ®
->outbound_intstatus
);
346 } while (Retries
++ < 20);/*max 20 sec*/
350 static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock
*acb
)
352 struct MessageUnit_B
*reg
= acb
->pmuB
;
354 uint8_t Retries
= 0x00;
356 for (Index
= 0; Index
< 100; Index
++) {
357 if (readl(reg
->iop2drv_doorbell
)
358 & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
) {
359 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
360 , reg
->iop2drv_doorbell
);
361 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
, reg
->drv2iop_doorbell
);
367 } while (Retries
++ < 20);/*max 20 sec*/
371 static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock
*pACB
)
373 struct MessageUnit_C
*phbcmu
= (struct MessageUnit_C
*)pACB
->pmuC
;
374 unsigned char Retries
= 0x00;
377 for (Index
= 0; Index
< 100; Index
++) {
378 if (readl(&phbcmu
->outbound_doorbell
) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
379 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, &phbcmu
->outbound_doorbell_clear
);/*clear interrupt*/
385 } while (Retries
++ < 20); /*max 20 sec*/
388 static void arcmsr_flush_hba_cache(struct AdapterControlBlock
*acb
)
390 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
391 int retry_count
= 30;
392 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, ®
->inbound_msgaddr0
);
394 if (arcmsr_hba_wait_msgint_ready(acb
))
398 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
399 timeout, retry count down = %d \n", acb
->host
->host_no
, retry_count
);
401 } while (retry_count
!= 0);
404 static void arcmsr_flush_hbb_cache(struct AdapterControlBlock
*acb
)
406 struct MessageUnit_B
*reg
= acb
->pmuB
;
407 int retry_count
= 30;
408 writel(ARCMSR_MESSAGE_FLUSH_CACHE
, reg
->drv2iop_doorbell
);
410 if (arcmsr_hbb_wait_msgint_ready(acb
))
414 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
415 timeout,retry count down = %d \n", acb
->host
->host_no
, retry_count
);
417 } while (retry_count
!= 0);
420 static void arcmsr_flush_hbc_cache(struct AdapterControlBlock
*pACB
)
422 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)pACB
->pmuC
;
423 int retry_count
= 30;/* enlarge wait flush adapter cache time: 10 minute */
424 writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE
, ®
->inbound_msgaddr0
);
425 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
427 if (arcmsr_hbc_wait_msgint_ready(pACB
)) {
431 printk(KERN_NOTICE
"arcmsr%d: wait 'flush adapter cache' \
432 timeout,retry count down = %d \n", pACB
->host
->host_no
, retry_count
);
434 } while (retry_count
!= 0);
437 static void arcmsr_flush_adapter_cache(struct AdapterControlBlock
*acb
)
439 switch (acb
->adapter_type
) {
441 case ACB_ADAPTER_TYPE_A
: {
442 arcmsr_flush_hba_cache(acb
);
446 case ACB_ADAPTER_TYPE_B
: {
447 arcmsr_flush_hbb_cache(acb
);
450 case ACB_ADAPTER_TYPE_C
: {
451 arcmsr_flush_hbc_cache(acb
);
456 static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock
*acb
)
458 struct pci_dev
*pdev
= acb
->pdev
;
460 dma_addr_t dma_coherent_handle
;
461 struct CommandControlBlock
*ccb_tmp
;
463 dma_addr_t cdb_phyaddr
;
464 unsigned long roundup_ccbsize
= 0, offset
;
465 unsigned long max_xfer_len
;
466 unsigned long max_sg_entrys
;
467 uint32_t firm_config_version
;
468 for (i
= 0; i
< ARCMSR_MAX_TARGETID
; i
++)
469 for (j
= 0; j
< ARCMSR_MAX_TARGETLUN
; j
++)
470 acb
->devstate
[i
][j
] = ARECA_RAID_GONE
;
472 max_xfer_len
= ARCMSR_MAX_XFER_LEN
;
473 max_sg_entrys
= ARCMSR_DEFAULT_SG_ENTRIES
;
474 firm_config_version
= acb
->firm_cfg_version
;
475 if((firm_config_version
& 0xFF) >= 3){
476 max_xfer_len
= (ARCMSR_CDB_SG_PAGE_LENGTH
<< ((firm_config_version
>> 8) & 0xFF)) * 1024;/* max 4M byte */
477 max_sg_entrys
= (max_xfer_len
/4096);
479 acb
->host
->max_sectors
= max_xfer_len
/512;
480 acb
->host
->sg_tablesize
= max_sg_entrys
;
481 roundup_ccbsize
= roundup(sizeof(struct CommandControlBlock
) + (max_sg_entrys
- 1) * sizeof(struct SG64ENTRY
), 32);
482 acb
->uncache_size
= roundup_ccbsize
* ARCMSR_MAX_FREECCB_NUM
+ 32;
483 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, acb
->uncache_size
, &dma_coherent_handle
, GFP_KERNEL
);
485 printk(KERN_NOTICE
"arcmsr%d: dma_alloc_coherent got error \n", acb
->host
->host_no
);
488 acb
->dma_coherent
= dma_coherent
;
489 acb
->dma_coherent_handle
= dma_coherent_handle
;
490 memset(dma_coherent
, 0, acb
->uncache_size
);
491 offset
= roundup((unsigned long)dma_coherent
, 32) - (unsigned long)dma_coherent
;
492 dma_coherent_handle
= dma_coherent_handle
+ offset
;
493 dma_coherent
= (struct CommandControlBlock
*)dma_coherent
+ offset
;
494 ccb_tmp
= dma_coherent
;
495 acb
->vir2phy_offset
= (unsigned long)dma_coherent
- (unsigned long)dma_coherent_handle
;
496 for(i
= 0; i
< ARCMSR_MAX_FREECCB_NUM
; i
++){
497 cdb_phyaddr
= dma_coherent_handle
+ offsetof(struct CommandControlBlock
, arcmsr_cdb
);
498 ccb_tmp
->cdb_phyaddr_pattern
= ((acb
->adapter_type
== ACB_ADAPTER_TYPE_C
) ? cdb_phyaddr
: (cdb_phyaddr
>> 5));
499 acb
->pccb_pool
[i
] = ccb_tmp
;
501 INIT_LIST_HEAD(&ccb_tmp
->list
);
502 list_add_tail(&ccb_tmp
->list
, &acb
->ccb_free_list
);
503 ccb_tmp
= (struct CommandControlBlock
*)((unsigned long)ccb_tmp
+ roundup_ccbsize
);
504 dma_coherent_handle
= dma_coherent_handle
+ roundup_ccbsize
;
509 static void arcmsr_message_isr_bh_fn(struct work_struct
*work
)
511 struct AdapterControlBlock
*acb
= container_of(work
,struct AdapterControlBlock
, arcmsr_do_message_isr_bh
);
512 switch (acb
->adapter_type
) {
513 case ACB_ADAPTER_TYPE_A
: {
515 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
516 char *acb_dev_map
= (char *)acb
->device_map
;
517 uint32_t __iomem
*signature
= (uint32_t __iomem
*) (®
->message_rwbuffer
[0]);
518 char __iomem
*devicemap
= (char __iomem
*) (®
->message_rwbuffer
[21]);
520 struct scsi_device
*psdev
;
523 atomic_inc(&acb
->rq_map_token
);
524 if (readl(signature
) == ARCMSR_SIGNATURE_GET_CONFIG
) {
525 for(target
= 0; target
< ARCMSR_MAX_TARGETID
-1; target
++) {
526 diff
= (*acb_dev_map
)^readb(devicemap
);
529 *acb_dev_map
= readb(devicemap
);
531 for(lun
= 0; lun
< ARCMSR_MAX_TARGETLUN
; lun
++) {
532 if((temp
& 0x01)==1 && (diff
& 0x01) == 1) {
533 scsi_add_device(acb
->host
, 0, target
, lun
);
534 }else if((temp
& 0x01) == 0 && (diff
& 0x01) == 1) {
535 psdev
= scsi_device_lookup(acb
->host
, 0, target
, lun
);
536 if (psdev
!= NULL
) {
537 scsi_remove_device(psdev
);
538 scsi_device_put(psdev
);
552 case ACB_ADAPTER_TYPE_B
: {
553 struct MessageUnit_B
*reg
= acb
->pmuB
;
554 char *acb_dev_map
= (char *)acb
->device_map
;
555 uint32_t __iomem
*signature
= (uint32_t __iomem
*)(®
->message_rwbuffer
[0]);
556 char __iomem
*devicemap
= (char __iomem
*)(®
->message_rwbuffer
[21]);
558 struct scsi_device
*psdev
;
561 atomic_inc(&acb
->rq_map_token
);
562 if (readl(signature
) == ARCMSR_SIGNATURE_GET_CONFIG
) {
563 for(target
= 0; target
< ARCMSR_MAX_TARGETID
-1; target
++) {
564 diff
= (*acb_dev_map
)^readb(devicemap
);
567 *acb_dev_map
= readb(devicemap
);
569 for(lun
= 0; lun
< ARCMSR_MAX_TARGETLUN
; lun
++) {
570 if((temp
& 0x01)==1 && (diff
& 0x01) == 1) {
571 scsi_add_device(acb
->host
, 0, target
, lun
);
572 }else if((temp
& 0x01) == 0 && (diff
& 0x01) == 1) {
573 psdev
= scsi_device_lookup(acb
->host
, 0, target
, lun
);
574 if (psdev
!= NULL
) {
575 scsi_remove_device(psdev
);
576 scsi_device_put(psdev
);
589 case ACB_ADAPTER_TYPE_C
: {
590 struct MessageUnit_C
*reg
= acb
->pmuC
;
591 char *acb_dev_map
= (char *)acb
->device_map
;
592 uint32_t __iomem
*signature
= (uint32_t __iomem
*)(®
->msgcode_rwbuffer
[0]);
593 char __iomem
*devicemap
= (char __iomem
*)(®
->msgcode_rwbuffer
[21]);
595 struct scsi_device
*psdev
;
598 atomic_inc(&acb
->rq_map_token
);
599 if (readl(signature
) == ARCMSR_SIGNATURE_GET_CONFIG
) {
600 for (target
= 0; target
< ARCMSR_MAX_TARGETID
- 1; target
++) {
601 diff
= (*acb_dev_map
)^readb(devicemap
);
604 *acb_dev_map
= readb(devicemap
);
606 for (lun
= 0; lun
< ARCMSR_MAX_TARGETLUN
; lun
++) {
607 if ((temp
& 0x01) == 1 && (diff
& 0x01) == 1) {
608 scsi_add_device(acb
->host
, 0, target
, lun
);
609 } else if ((temp
& 0x01) == 0 && (diff
& 0x01) == 1) {
610 psdev
= scsi_device_lookup(acb
->host
, 0, target
, lun
);
612 scsi_remove_device(psdev
);
613 scsi_device_put(psdev
);
628 static int arcmsr_probe(struct pci_dev
*pdev
, const struct pci_device_id
*id
)
630 struct Scsi_Host
*host
;
631 struct AdapterControlBlock
*acb
;
634 error
= pci_enable_device(pdev
);
638 host
= scsi_host_alloc(&arcmsr_scsi_host_template
, sizeof(struct AdapterControlBlock
));
640 goto pci_disable_dev
;
642 error
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(64));
644 error
= pci_set_dma_mask(pdev
, DMA_BIT_MASK(32));
647 "scsi%d: No suitable DMA mask available\n",
649 goto scsi_host_release
;
652 init_waitqueue_head(&wait_q
);
653 bus
= pdev
->bus
->number
;
654 dev_fun
= pdev
->devfn
;
655 acb
= (struct AdapterControlBlock
*) host
->hostdata
;
656 memset(acb
,0,sizeof(struct AdapterControlBlock
));
659 host
->max_lun
= ARCMSR_MAX_TARGETLUN
;
660 host
->max_id
= ARCMSR_MAX_TARGETID
; /*16:8*/
661 host
->max_cmd_len
= 16; /*this is issue of 64bit LBA ,over 2T byte*/
662 host
->can_queue
= ARCMSR_MAX_FREECCB_NUM
; /* max simultaneous cmds */
663 host
->cmd_per_lun
= ARCMSR_MAX_CMD_PERLUN
;
664 host
->this_id
= ARCMSR_SCSI_INITIATOR_ID
;
665 host
->unique_id
= (bus
<< 8) | dev_fun
;
666 pci_set_drvdata(pdev
, host
);
667 pci_set_master(pdev
);
668 error
= pci_request_regions(pdev
, "arcmsr");
670 goto scsi_host_release
;
672 spin_lock_init(&acb
->eh_lock
);
673 spin_lock_init(&acb
->ccblist_lock
);
674 acb
->acb_flags
|= (ACB_F_MESSAGE_WQBUFFER_CLEARED
|
675 ACB_F_MESSAGE_RQBUFFER_CLEARED
|
676 ACB_F_MESSAGE_WQBUFFER_READED
);
677 acb
->acb_flags
&= ~ACB_F_SCSISTOPADAPTER
;
678 INIT_LIST_HEAD(&acb
->ccb_free_list
);
679 arcmsr_define_adapter_type(acb
);
680 error
= arcmsr_remap_pciregion(acb
);
682 goto pci_release_regs
;
684 error
= arcmsr_get_firmware_spec(acb
);
686 goto unmap_pci_region
;
688 error
= arcmsr_alloc_ccb_pool(acb
);
692 arcmsr_iop_init(acb
);
693 error
= scsi_add_host(host
, &pdev
->dev
);
695 goto RAID_controller_stop
;
697 error
= request_irq(pdev
->irq
, arcmsr_do_interrupt
, IRQF_SHARED
, "arcmsr", acb
);
699 goto scsi_host_remove
;
701 host
->irq
= pdev
->irq
;
702 scsi_scan_host(host
);
703 INIT_WORK(&acb
->arcmsr_do_message_isr_bh
, arcmsr_message_isr_bh_fn
);
704 atomic_set(&acb
->rq_map_token
, 16);
705 atomic_set(&acb
->ante_token_value
, 16);
706 acb
->fw_flag
= FW_NORMAL
;
707 init_timer(&acb
->eternal_timer
);
708 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6 * HZ
);
709 acb
->eternal_timer
.data
= (unsigned long) acb
;
710 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
711 add_timer(&acb
->eternal_timer
);
712 if(arcmsr_alloc_sysfs_attr(acb
))
717 scsi_remove_host(host
);
718 RAID_controller_stop
:
719 arcmsr_stop_adapter_bgrb(acb
);
720 arcmsr_flush_adapter_cache(acb
);
721 arcmsr_free_ccb_pool(acb
);
723 arcmsr_free_hbb_mu(acb
);
725 arcmsr_unmap_pciregion(acb
);
727 pci_release_regions(pdev
);
731 pci_disable_device(pdev
);
735 static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock
*acb
)
737 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
738 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, ®
->inbound_msgaddr0
);
739 if (!arcmsr_hba_wait_msgint_ready(acb
)) {
741 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
742 , acb
->host
->host_no
);
748 static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock
*acb
)
750 struct MessageUnit_B
*reg
= acb
->pmuB
;
752 writel(ARCMSR_MESSAGE_ABORT_CMD
, reg
->drv2iop_doorbell
);
753 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
755 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
756 , acb
->host
->host_no
);
761 static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock
*pACB
)
763 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)pACB
->pmuC
;
764 writel(ARCMSR_INBOUND_MESG0_ABORT_CMD
, ®
->inbound_msgaddr0
);
765 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
766 if (!arcmsr_hbc_wait_msgint_ready(pACB
)) {
768 "arcmsr%d: wait 'abort all outstanding command' timeout \n"
769 , pACB
->host
->host_no
);
774 static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock
*acb
)
777 switch (acb
->adapter_type
) {
778 case ACB_ADAPTER_TYPE_A
: {
779 rtnval
= arcmsr_abort_hba_allcmd(acb
);
783 case ACB_ADAPTER_TYPE_B
: {
784 rtnval
= arcmsr_abort_hbb_allcmd(acb
);
788 case ACB_ADAPTER_TYPE_C
: {
789 rtnval
= arcmsr_abort_hbc_allcmd(acb
);
795 static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock
*pacb
)
797 struct MessageUnit_B
*reg
= pacb
->pmuB
;
798 writel(ARCMSR_MESSAGE_START_DRIVER_MODE
, reg
->drv2iop_doorbell
);
799 if (!arcmsr_hbb_wait_msgint_ready(pacb
)) {
800 printk(KERN_ERR
"arcmsr%d: can't set driver mode. \n", pacb
->host
->host_no
);
806 static void arcmsr_pci_unmap_dma(struct CommandControlBlock
*ccb
)
808 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
810 scsi_dma_unmap(pcmd
);
813 static void arcmsr_ccb_complete(struct CommandControlBlock
*ccb
)
815 struct AdapterControlBlock
*acb
= ccb
->acb
;
816 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
818 atomic_dec(&acb
->ccboutstandingcount
);
819 arcmsr_pci_unmap_dma(ccb
);
820 ccb
->startdone
= ARCMSR_CCB_DONE
;
821 spin_lock_irqsave(&acb
->ccblist_lock
, flags
);
822 list_add_tail(&ccb
->list
, &acb
->ccb_free_list
);
823 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
824 pcmd
->scsi_done(pcmd
);
827 static void arcmsr_report_sense_info(struct CommandControlBlock
*ccb
)
830 struct scsi_cmnd
*pcmd
= ccb
->pcmd
;
831 struct SENSE_DATA
*sensebuffer
= (struct SENSE_DATA
*)pcmd
->sense_buffer
;
832 pcmd
->result
= DID_OK
<< 16;
834 int sense_data_length
=
835 sizeof(struct SENSE_DATA
) < SCSI_SENSE_BUFFERSIZE
836 ? sizeof(struct SENSE_DATA
) : SCSI_SENSE_BUFFERSIZE
;
837 memset(sensebuffer
, 0, SCSI_SENSE_BUFFERSIZE
);
838 memcpy(sensebuffer
, ccb
->arcmsr_cdb
.SenseData
, sense_data_length
);
839 sensebuffer
->ErrorCode
= SCSI_SENSE_CURRENT_ERRORS
;
840 sensebuffer
->Valid
= 1;
844 static u32
arcmsr_disable_outbound_ints(struct AdapterControlBlock
*acb
)
847 switch (acb
->adapter_type
) {
848 case ACB_ADAPTER_TYPE_A
: {
849 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
850 orig_mask
= readl(®
->outbound_intmask
);
851 writel(orig_mask
|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE
, \
852 ®
->outbound_intmask
);
855 case ACB_ADAPTER_TYPE_B
: {
856 struct MessageUnit_B
*reg
= acb
->pmuB
;
857 orig_mask
= readl(reg
->iop2drv_doorbell_mask
);
858 writel(0, reg
->iop2drv_doorbell_mask
);
861 case ACB_ADAPTER_TYPE_C
:{
862 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
863 /* disable all outbound interrupt */
864 orig_mask
= readl(®
->host_int_mask
); /* disable outbound message0 int */
865 writel(orig_mask
|ARCMSR_HBCMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
872 static void arcmsr_report_ccb_state(struct AdapterControlBlock
*acb
,
873 struct CommandControlBlock
*ccb
, bool error
)
876 id
= ccb
->pcmd
->device
->id
;
877 lun
= ccb
->pcmd
->device
->lun
;
879 if (acb
->devstate
[id
][lun
] == ARECA_RAID_GONE
)
880 acb
->devstate
[id
][lun
] = ARECA_RAID_GOOD
;
881 ccb
->pcmd
->result
= DID_OK
<< 16;
882 arcmsr_ccb_complete(ccb
);
884 switch (ccb
->arcmsr_cdb
.DeviceStatus
) {
885 case ARCMSR_DEV_SELECT_TIMEOUT
: {
886 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
887 ccb
->pcmd
->result
= DID_NO_CONNECT
<< 16;
888 arcmsr_ccb_complete(ccb
);
892 case ARCMSR_DEV_ABORTED
:
894 case ARCMSR_DEV_INIT_FAIL
: {
895 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
896 ccb
->pcmd
->result
= DID_BAD_TARGET
<< 16;
897 arcmsr_ccb_complete(ccb
);
901 case ARCMSR_DEV_CHECK_CONDITION
: {
902 acb
->devstate
[id
][lun
] = ARECA_RAID_GOOD
;
903 arcmsr_report_sense_info(ccb
);
904 arcmsr_ccb_complete(ccb
);
910 "arcmsr%d: scsi id = %d lun = %d isr get command error done, \
911 but got unknown DeviceStatus = 0x%x \n"
915 , ccb
->arcmsr_cdb
.DeviceStatus
);
916 acb
->devstate
[id
][lun
] = ARECA_RAID_GONE
;
917 ccb
->pcmd
->result
= DID_NO_CONNECT
<< 16;
918 arcmsr_ccb_complete(ccb
);
924 static void arcmsr_drain_donequeue(struct AdapterControlBlock
*acb
, struct CommandControlBlock
*pCCB
, bool error
)
928 if ((pCCB
->acb
!= acb
) || (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
929 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
930 struct scsi_cmnd
*abortcmd
= pCCB
->pcmd
;
932 id
= abortcmd
->device
->id
;
933 lun
= abortcmd
->device
->lun
;
934 abortcmd
->result
|= DID_ABORT
<< 16;
935 arcmsr_ccb_complete(pCCB
);
936 printk(KERN_NOTICE
"arcmsr%d: pCCB ='0x%p' isr got aborted command \n",
937 acb
->host
->host_no
, pCCB
);
941 printk(KERN_NOTICE
"arcmsr%d: isr get an illegal ccb command \
943 "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x"
944 " ccboutstandingcount = %d \n"
950 , atomic_read(&acb
->ccboutstandingcount
));
953 arcmsr_report_ccb_state(acb
, pCCB
, error
);
956 static void arcmsr_done4abort_postqueue(struct AdapterControlBlock
*acb
)
960 struct ARCMSR_CDB
*pARCMSR_CDB
;
962 struct CommandControlBlock
*pCCB
;
963 switch (acb
->adapter_type
) {
965 case ACB_ADAPTER_TYPE_A
: {
966 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
967 uint32_t outbound_intstatus
;
968 outbound_intstatus
= readl(®
->outbound_intstatus
) &
969 acb
->outbound_int_enable
;
970 /*clear and abort all outbound posted Q*/
971 writel(outbound_intstatus
, ®
->outbound_intstatus
);/*clear interrupt*/
972 while(((flag_ccb
= readl(®
->outbound_queueport
)) != 0xFFFFFFFF)
973 && (i
++ < ARCMSR_MAX_OUTSTANDING_CMD
)) {
974 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ (flag_ccb
<< 5));/*frame must be 32 bytes aligned*/
975 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
976 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
977 arcmsr_drain_donequeue(acb
, pCCB
, error
);
982 case ACB_ADAPTER_TYPE_B
: {
983 struct MessageUnit_B
*reg
= acb
->pmuB
;
984 /*clear all outbound posted Q*/
985 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, ®
->iop2drv_doorbell
); /* clear doorbell interrupt */
986 for (i
= 0; i
< ARCMSR_MAX_HBB_POSTQUEUE
; i
++) {
987 if ((flag_ccb
= readl(®
->done_qbuffer
[i
])) != 0) {
988 writel(0, ®
->done_qbuffer
[i
]);
989 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+(flag_ccb
<< 5));/*frame must be 32 bytes aligned*/
990 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
991 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
992 arcmsr_drain_donequeue(acb
, pCCB
, error
);
994 reg
->post_qbuffer
[i
] = 0;
996 reg
->doneq_index
= 0;
997 reg
->postq_index
= 0;
1000 case ACB_ADAPTER_TYPE_C
: {
1001 struct MessageUnit_C
*reg
= acb
->pmuC
;
1002 struct ARCMSR_CDB
*pARCMSR_CDB
;
1003 uint32_t flag_ccb
, ccb_cdb_phy
;
1005 struct CommandControlBlock
*pCCB
;
1006 while ((readl(®
->host_int_status
) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
) && (i
++ < ARCMSR_MAX_OUTSTANDING_CMD
)) {
1008 flag_ccb
= readl(®
->outbound_queueport_low
);
1009 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
1010 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ccb_cdb_phy
);/*frame must be 32 bytes aligned*/
1011 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1012 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
1013 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1018 static void arcmsr_remove(struct pci_dev
*pdev
)
1020 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1021 struct AdapterControlBlock
*acb
=
1022 (struct AdapterControlBlock
*) host
->hostdata
;
1024 arcmsr_free_sysfs_attr(acb
);
1025 scsi_remove_host(host
);
1026 flush_scheduled_work();
1027 del_timer_sync(&acb
->eternal_timer
);
1028 arcmsr_disable_outbound_ints(acb
);
1029 arcmsr_stop_adapter_bgrb(acb
);
1030 arcmsr_flush_adapter_cache(acb
);
1031 acb
->acb_flags
|= ACB_F_SCSISTOPADAPTER
;
1032 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
1034 for (poll_count
= 0; poll_count
< ARCMSR_MAX_OUTSTANDING_CMD
; poll_count
++){
1035 if (!atomic_read(&acb
->ccboutstandingcount
))
1037 arcmsr_interrupt(acb
);/* FIXME: need spinlock */
1041 if (atomic_read(&acb
->ccboutstandingcount
)) {
1044 arcmsr_abort_allcmd(acb
);
1045 arcmsr_done4abort_postqueue(acb
);
1046 for (i
= 0; i
< ARCMSR_MAX_FREECCB_NUM
; i
++) {
1047 struct CommandControlBlock
*ccb
= acb
->pccb_pool
[i
];
1048 if (ccb
->startdone
== ARCMSR_CCB_START
) {
1049 ccb
->startdone
= ARCMSR_CCB_ABORTED
;
1050 ccb
->pcmd
->result
= DID_ABORT
<< 16;
1051 arcmsr_ccb_complete(ccb
);
1055 free_irq(pdev
->irq
, acb
);
1056 arcmsr_free_ccb_pool(acb
);
1057 arcmsr_free_hbb_mu(acb
);
1058 arcmsr_unmap_pciregion(acb
);
1059 pci_release_regions(pdev
);
1060 scsi_host_put(host
);
1061 pci_disable_device(pdev
);
1062 pci_set_drvdata(pdev
, NULL
);
1065 static void arcmsr_shutdown(struct pci_dev
*pdev
)
1067 struct Scsi_Host
*host
= pci_get_drvdata(pdev
);
1068 struct AdapterControlBlock
*acb
=
1069 (struct AdapterControlBlock
*)host
->hostdata
;
1070 del_timer_sync(&acb
->eternal_timer
);
1071 arcmsr_disable_outbound_ints(acb
);
1072 flush_scheduled_work();
1073 arcmsr_stop_adapter_bgrb(acb
);
1074 arcmsr_flush_adapter_cache(acb
);
1077 static int arcmsr_module_init(void)
1080 error
= pci_register_driver(&arcmsr_pci_driver
);
1084 static void arcmsr_module_exit(void)
1086 pci_unregister_driver(&arcmsr_pci_driver
);
1088 module_init(arcmsr_module_init
);
1089 module_exit(arcmsr_module_exit
);
1091 static void arcmsr_enable_outbound_ints(struct AdapterControlBlock
*acb
,
1095 switch (acb
->adapter_type
) {
1097 case ACB_ADAPTER_TYPE_A
: {
1098 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1099 mask
= intmask_org
& ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE
|
1100 ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE
|
1101 ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE
);
1102 writel(mask
, ®
->outbound_intmask
);
1103 acb
->outbound_int_enable
= ~(intmask_org
& mask
) & 0x000000ff;
1107 case ACB_ADAPTER_TYPE_B
: {
1108 struct MessageUnit_B
*reg
= acb
->pmuB
;
1109 mask
= intmask_org
| (ARCMSR_IOP2DRV_DATA_WRITE_OK
|
1110 ARCMSR_IOP2DRV_DATA_READ_OK
|
1111 ARCMSR_IOP2DRV_CDB_DONE
|
1112 ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
);
1113 writel(mask
, reg
->iop2drv_doorbell_mask
);
1114 acb
->outbound_int_enable
= (intmask_org
| mask
) & 0x0000000f;
1117 case ACB_ADAPTER_TYPE_C
: {
1118 struct MessageUnit_C
*reg
= acb
->pmuC
;
1119 mask
= ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK
| ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK
|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK
);
1120 writel(intmask_org
& mask
, ®
->host_int_mask
);
1121 acb
->outbound_int_enable
= ~(intmask_org
& mask
) & 0x0000000f;
1126 static int arcmsr_build_ccb(struct AdapterControlBlock
*acb
,
1127 struct CommandControlBlock
*ccb
, struct scsi_cmnd
*pcmd
)
1129 struct ARCMSR_CDB
*arcmsr_cdb
= (struct ARCMSR_CDB
*)&ccb
->arcmsr_cdb
;
1130 int8_t *psge
= (int8_t *)&arcmsr_cdb
->u
;
1131 __le32 address_lo
, address_hi
;
1132 int arccdbsize
= 0x30;
1135 struct scatterlist
*sg
;
1138 memset(arcmsr_cdb
, 0, sizeof(struct ARCMSR_CDB
));
1139 arcmsr_cdb
->TargetID
= pcmd
->device
->id
;
1140 arcmsr_cdb
->LUN
= pcmd
->device
->lun
;
1141 arcmsr_cdb
->Function
= 1;
1142 arcmsr_cdb
->Context
= 0;
1143 memcpy(arcmsr_cdb
->Cdb
, pcmd
->cmnd
, pcmd
->cmd_len
);
1145 nseg
= scsi_dma_map(pcmd
);
1146 if (unlikely(nseg
> acb
->host
->sg_tablesize
|| nseg
< 0))
1148 scsi_for_each_sg(pcmd
, sg
, nseg
, i
) {
1149 /* Get the physical address of the current data pointer */
1150 length
= cpu_to_le32(sg_dma_len(sg
));
1151 address_lo
= cpu_to_le32(dma_addr_lo32(sg_dma_address(sg
)));
1152 address_hi
= cpu_to_le32(dma_addr_hi32(sg_dma_address(sg
)));
1153 if (address_hi
== 0) {
1154 struct SG32ENTRY
*pdma_sg
= (struct SG32ENTRY
*)psge
;
1156 pdma_sg
->address
= address_lo
;
1157 pdma_sg
->length
= length
;
1158 psge
+= sizeof (struct SG32ENTRY
);
1159 arccdbsize
+= sizeof (struct SG32ENTRY
);
1161 struct SG64ENTRY
*pdma_sg
= (struct SG64ENTRY
*)psge
;
1163 pdma_sg
->addresshigh
= address_hi
;
1164 pdma_sg
->address
= address_lo
;
1165 pdma_sg
->length
= length
|cpu_to_le32(IS_SG64_ADDR
);
1166 psge
+= sizeof (struct SG64ENTRY
);
1167 arccdbsize
+= sizeof (struct SG64ENTRY
);
1170 arcmsr_cdb
->sgcount
= (uint8_t)nseg
;
1171 arcmsr_cdb
->DataLength
= scsi_bufflen(pcmd
);
1172 arcmsr_cdb
->msgPages
= arccdbsize
/0x100 + (arccdbsize
% 0x100 ? 1 : 0);
1173 if ( arccdbsize
> 256)
1174 arcmsr_cdb
->Flags
|= ARCMSR_CDB_FLAG_SGL_BSIZE
;
1175 if (pcmd
->cmnd
[0]|WRITE_6
|| pcmd
->cmnd
[0]|WRITE_10
|| pcmd
->cmnd
[0]|WRITE_12
){
1176 arcmsr_cdb
->Flags
|= ARCMSR_CDB_FLAG_WRITE
;
1178 ccb
->arc_cdb_size
= arccdbsize
;
1182 static void arcmsr_post_ccb(struct AdapterControlBlock
*acb
, struct CommandControlBlock
*ccb
)
1184 uint32_t cdb_phyaddr_pattern
= ccb
->cdb_phyaddr_pattern
;
1185 struct ARCMSR_CDB
*arcmsr_cdb
= (struct ARCMSR_CDB
*)&ccb
->arcmsr_cdb
;
1186 atomic_inc(&acb
->ccboutstandingcount
);
1187 ccb
->startdone
= ARCMSR_CCB_START
;
1188 switch (acb
->adapter_type
) {
1189 case ACB_ADAPTER_TYPE_A
: {
1190 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1192 if (arcmsr_cdb
->Flags
& ARCMSR_CDB_FLAG_SGL_BSIZE
)
1193 writel(cdb_phyaddr_pattern
| ARCMSR_CCBPOST_FLAG_SGL_BSIZE
,
1194 ®
->inbound_queueport
);
1196 writel(cdb_phyaddr_pattern
, ®
->inbound_queueport
);
1201 case ACB_ADAPTER_TYPE_B
: {
1202 struct MessageUnit_B
*reg
= acb
->pmuB
;
1203 uint32_t ending_index
, index
= reg
->postq_index
;
1205 ending_index
= ((index
+ 1) % ARCMSR_MAX_HBB_POSTQUEUE
);
1206 writel(0, ®
->post_qbuffer
[ending_index
]);
1207 if (arcmsr_cdb
->Flags
& ARCMSR_CDB_FLAG_SGL_BSIZE
) {
1208 writel(cdb_phyaddr_pattern
| ARCMSR_CCBPOST_FLAG_SGL_BSIZE
,\
1209 ®
->post_qbuffer
[index
]);
1211 writel(cdb_phyaddr_pattern
, ®
->post_qbuffer
[index
]);
1214 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;/*if last index number set it to 0 */
1215 reg
->postq_index
= index
;
1216 writel(ARCMSR_DRV2IOP_CDB_POSTED
, reg
->drv2iop_doorbell
);
1219 case ACB_ADAPTER_TYPE_C
: {
1220 struct MessageUnit_C
*phbcmu
= (struct MessageUnit_C
*)acb
->pmuC
;
1221 uint32_t ccb_post_stamp
, arc_cdb_size
;
1223 arc_cdb_size
= (ccb
->arc_cdb_size
> 0x300) ? 0x300 : ccb
->arc_cdb_size
;
1224 ccb_post_stamp
= (cdb_phyaddr_pattern
| ((arc_cdb_size
- 1) >> 6) | 1);
1225 if (acb
->cdb_phyaddr_hi32
) {
1226 writel(acb
->cdb_phyaddr_hi32
, &phbcmu
->inbound_queueport_high
);
1227 writel(ccb_post_stamp
, &phbcmu
->inbound_queueport_low
);
1229 writel(ccb_post_stamp
, &phbcmu
->inbound_queueport_low
);
1235 static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock
*acb
)
1237 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1238 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1239 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, ®
->inbound_msgaddr0
);
1240 if (!arcmsr_hba_wait_msgint_ready(acb
)) {
1242 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1243 , acb
->host
->host_no
);
1247 static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock
*acb
)
1249 struct MessageUnit_B
*reg
= acb
->pmuB
;
1250 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1251 writel(ARCMSR_MESSAGE_STOP_BGRB
, reg
->drv2iop_doorbell
);
1253 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
1255 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1256 , acb
->host
->host_no
);
1260 static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock
*pACB
)
1262 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)pACB
->pmuC
;
1263 pACB
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1264 writel(ARCMSR_INBOUND_MESG0_STOP_BGRB
, ®
->inbound_msgaddr0
);
1265 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
1266 if (!arcmsr_hbc_wait_msgint_ready(pACB
)) {
1268 "arcmsr%d: wait 'stop adapter background rebulid' timeout \n"
1269 , pACB
->host
->host_no
);
1273 static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock
*acb
)
1275 switch (acb
->adapter_type
) {
1276 case ACB_ADAPTER_TYPE_A
: {
1277 arcmsr_stop_hba_bgrb(acb
);
1281 case ACB_ADAPTER_TYPE_B
: {
1282 arcmsr_stop_hbb_bgrb(acb
);
1285 case ACB_ADAPTER_TYPE_C
: {
1286 arcmsr_stop_hbc_bgrb(acb
);
1291 static void arcmsr_free_ccb_pool(struct AdapterControlBlock
*acb
)
1293 dma_free_coherent(&acb
->pdev
->dev
, acb
->uncache_size
, acb
->dma_coherent
, acb
->dma_coherent_handle
);
1296 void arcmsr_iop_message_read(struct AdapterControlBlock
*acb
)
1298 switch (acb
->adapter_type
) {
1299 case ACB_ADAPTER_TYPE_A
: {
1300 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1301 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
, ®
->inbound_doorbell
);
1305 case ACB_ADAPTER_TYPE_B
: {
1306 struct MessageUnit_B
*reg
= acb
->pmuB
;
1307 writel(ARCMSR_DRV2IOP_DATA_READ_OK
, reg
->drv2iop_doorbell
);
1310 case ACB_ADAPTER_TYPE_C
: {
1311 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
1312 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
, ®
->inbound_doorbell
);
1317 static void arcmsr_iop_message_wrote(struct AdapterControlBlock
*acb
)
1319 switch (acb
->adapter_type
) {
1320 case ACB_ADAPTER_TYPE_A
: {
1321 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1323 ** push inbound doorbell tell iop, driver data write ok
1324 ** and wait reply on next hwinterrupt for next Qbuffer post
1326 writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK
, ®
->inbound_doorbell
);
1330 case ACB_ADAPTER_TYPE_B
: {
1331 struct MessageUnit_B
*reg
= acb
->pmuB
;
1333 ** push inbound doorbell tell iop, driver data write ok
1334 ** and wait reply on next hwinterrupt for next Qbuffer post
1336 writel(ARCMSR_DRV2IOP_DATA_WRITE_OK
, reg
->drv2iop_doorbell
);
1339 case ACB_ADAPTER_TYPE_C
: {
1340 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
1342 ** push inbound doorbell tell iop, driver data write ok
1343 ** and wait reply on next hwinterrupt for next Qbuffer post
1345 writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK
, ®
->inbound_doorbell
);
1351 struct QBUFFER __iomem
*arcmsr_get_iop_rqbuffer(struct AdapterControlBlock
*acb
)
1353 struct QBUFFER __iomem
*qbuffer
= NULL
;
1354 switch (acb
->adapter_type
) {
1356 case ACB_ADAPTER_TYPE_A
: {
1357 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1358 qbuffer
= (struct QBUFFER __iomem
*)®
->message_rbuffer
;
1362 case ACB_ADAPTER_TYPE_B
: {
1363 struct MessageUnit_B
*reg
= acb
->pmuB
;
1364 qbuffer
= (struct QBUFFER __iomem
*)reg
->message_rbuffer
;
1367 case ACB_ADAPTER_TYPE_C
: {
1368 struct MessageUnit_C
*phbcmu
= (struct MessageUnit_C
*)acb
->pmuC
;
1369 qbuffer
= (struct QBUFFER __iomem
*)&phbcmu
->message_rbuffer
;
1375 static struct QBUFFER __iomem
*arcmsr_get_iop_wqbuffer(struct AdapterControlBlock
*acb
)
1377 struct QBUFFER __iomem
*pqbuffer
= NULL
;
1378 switch (acb
->adapter_type
) {
1380 case ACB_ADAPTER_TYPE_A
: {
1381 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1382 pqbuffer
= (struct QBUFFER __iomem
*) ®
->message_wbuffer
;
1386 case ACB_ADAPTER_TYPE_B
: {
1387 struct MessageUnit_B
*reg
= acb
->pmuB
;
1388 pqbuffer
= (struct QBUFFER __iomem
*)reg
->message_wbuffer
;
1391 case ACB_ADAPTER_TYPE_C
: {
1392 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
1393 pqbuffer
= (struct QBUFFER __iomem
*)®
->message_wbuffer
;
1400 static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock
*acb
)
1402 struct QBUFFER __iomem
*prbuffer
;
1403 struct QBUFFER
*pQbuffer
;
1404 uint8_t __iomem
*iop_data
;
1405 int32_t my_empty_len
, iop_len
, rqbuf_firstindex
, rqbuf_lastindex
;
1406 rqbuf_lastindex
= acb
->rqbuf_lastindex
;
1407 rqbuf_firstindex
= acb
->rqbuf_firstindex
;
1408 prbuffer
= arcmsr_get_iop_rqbuffer(acb
);
1409 iop_data
= (uint8_t __iomem
*)prbuffer
->data
;
1410 iop_len
= prbuffer
->data_len
;
1411 my_empty_len
= (rqbuf_firstindex
- rqbuf_lastindex
- 1) & (ARCMSR_MAX_QBUFFER
- 1);
1413 if (my_empty_len
>= iop_len
)
1415 while (iop_len
> 0) {
1416 pQbuffer
= (struct QBUFFER
*)&acb
->rqbuffer
[rqbuf_lastindex
];
1417 memcpy(pQbuffer
, iop_data
, 1);
1419 rqbuf_lastindex
%= ARCMSR_MAX_QBUFFER
;
1423 acb
->rqbuf_lastindex
= rqbuf_lastindex
;
1424 arcmsr_iop_message_read(acb
);
1428 acb
->acb_flags
|= ACB_F_IOPDATA_OVERFLOW
;
1432 static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock
*acb
)
1434 acb
->acb_flags
|= ACB_F_MESSAGE_WQBUFFER_READED
;
1435 if (acb
->wqbuf_firstindex
!= acb
->wqbuf_lastindex
) {
1437 struct QBUFFER __iomem
*pwbuffer
;
1438 uint8_t __iomem
*iop_data
;
1439 int32_t allxfer_len
= 0;
1441 acb
->acb_flags
&= (~ACB_F_MESSAGE_WQBUFFER_READED
);
1442 pwbuffer
= arcmsr_get_iop_wqbuffer(acb
);
1443 iop_data
= (uint8_t __iomem
*)pwbuffer
->data
;
1445 while ((acb
->wqbuf_firstindex
!= acb
->wqbuf_lastindex
) && \
1446 (allxfer_len
< 124)) {
1447 pQbuffer
= &acb
->wqbuffer
[acb
->wqbuf_firstindex
];
1448 memcpy(iop_data
, pQbuffer
, 1);
1449 acb
->wqbuf_firstindex
++;
1450 acb
->wqbuf_firstindex
%= ARCMSR_MAX_QBUFFER
;
1454 pwbuffer
->data_len
= allxfer_len
;
1456 arcmsr_iop_message_wrote(acb
);
1459 if (acb
->wqbuf_firstindex
== acb
->wqbuf_lastindex
) {
1460 acb
->acb_flags
|= ACB_F_MESSAGE_WQBUFFER_CLEARED
;
1464 static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock
*acb
)
1466 uint32_t outbound_doorbell
;
1467 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1468 outbound_doorbell
= readl(®
->outbound_doorbell
);
1469 writel(outbound_doorbell
, ®
->outbound_doorbell
);
1470 if (outbound_doorbell
& ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK
) {
1471 arcmsr_iop2drv_data_wrote_handle(acb
);
1474 if (outbound_doorbell
& ARCMSR_OUTBOUND_IOP331_DATA_READ_OK
) {
1475 arcmsr_iop2drv_data_read_handle(acb
);
1478 static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock
*pACB
)
1480 uint32_t outbound_doorbell
;
1481 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)pACB
->pmuC
;
1483 *******************************************************************
1484 ** Maybe here we need to check wrqbuffer_lock is lock or not
1485 ** DOORBELL: din! don!
1486 ** check if there are any mail need to pack from firmware
1487 *******************************************************************
1489 outbound_doorbell
= readl(®
->outbound_doorbell
);
1490 writel(outbound_doorbell
, ®
->outbound_doorbell_clear
);/*clear interrupt*/
1491 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK
) {
1492 arcmsr_iop2drv_data_wrote_handle(pACB
);
1494 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK
) {
1495 arcmsr_iop2drv_data_read_handle(pACB
);
1497 if (outbound_doorbell
& ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
1498 arcmsr_hbc_message_isr(pACB
); /* messenger of "driver to iop commands" */
1502 static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock
*acb
)
1505 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1506 struct ARCMSR_CDB
*pARCMSR_CDB
;
1507 struct CommandControlBlock
*pCCB
;
1509 while ((flag_ccb
= readl(®
->outbound_queueport
)) != 0xFFFFFFFF) {
1510 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ (flag_ccb
<< 5));/*frame must be 32 bytes aligned*/
1511 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1512 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
1513 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1517 static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock
*acb
)
1521 struct MessageUnit_B
*reg
= acb
->pmuB
;
1522 struct ARCMSR_CDB
*pARCMSR_CDB
;
1523 struct CommandControlBlock
*pCCB
;
1525 index
= reg
->doneq_index
;
1526 while ((flag_ccb
= readl(®
->done_qbuffer
[index
])) != 0) {
1527 writel(0, ®
->done_qbuffer
[index
]);
1528 pARCMSR_CDB
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+(flag_ccb
<< 5));/*frame must be 32 bytes aligned*/
1529 pCCB
= container_of(pARCMSR_CDB
, struct CommandControlBlock
, arcmsr_cdb
);
1530 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
1531 arcmsr_drain_donequeue(acb
, pCCB
, error
);
1533 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;
1534 reg
->doneq_index
= index
;
1538 static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock
*acb
)
1540 struct MessageUnit_C
*phbcmu
;
1541 struct ARCMSR_CDB
*arcmsr_cdb
;
1542 struct CommandControlBlock
*ccb
;
1543 uint32_t flag_ccb
, ccb_cdb_phy
, throttling
= 0;
1546 phbcmu
= (struct MessageUnit_C
*)acb
->pmuC
;
1547 /* areca cdb command done */
1548 /* Use correct offset and size for syncing */
1550 while (readl(&phbcmu
->host_int_status
) &
1551 ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
){
1552 /* check if command done with no error*/
1553 flag_ccb
= readl(&phbcmu
->outbound_queueport_low
);
1554 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);/*frame must be 32 bytes aligned*/
1555 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);
1556 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
1557 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
1558 /* check if command done with no error */
1559 arcmsr_drain_donequeue(acb
, ccb
, error
);
1560 if (throttling
== ARCMSR_HBC_ISR_THROTTLING_LEVEL
) {
1561 writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING
, &phbcmu
->inbound_doorbell
);
1568 **********************************************************************************
1569 ** Handle a message interrupt
1571 ** The only message interrupt we expect is in response to a query for the current adapter config.
1572 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1573 **********************************************************************************
1575 static void arcmsr_hba_message_isr(struct AdapterControlBlock
*acb
)
1577 struct MessageUnit_A
*reg
= acb
->pmuA
;
1578 /*clear interrupt and message state*/
1579 writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT
, ®
->outbound_intstatus
);
1580 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
1582 static void arcmsr_hbb_message_isr(struct AdapterControlBlock
*acb
)
1584 struct MessageUnit_B
*reg
= acb
->pmuB
;
1586 /*clear interrupt and message state*/
1587 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
1588 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
1591 **********************************************************************************
1592 ** Handle a message interrupt
1594 ** The only message interrupt we expect is in response to a query for the
1595 ** current adapter config.
1596 ** We want this in order to compare the drivemap so that we can detect newly-attached drives.
1597 **********************************************************************************
1599 static void arcmsr_hbc_message_isr(struct AdapterControlBlock
*acb
)
1601 struct MessageUnit_C
*reg
= acb
->pmuC
;
1602 /*clear interrupt and message state*/
1603 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, ®
->outbound_doorbell_clear
);
1604 schedule_work(&acb
->arcmsr_do_message_isr_bh
);
1607 static int arcmsr_handle_hba_isr(struct AdapterControlBlock
*acb
)
1609 uint32_t outbound_intstatus
;
1610 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
1611 outbound_intstatus
= readl(®
->outbound_intstatus
) &
1612 acb
->outbound_int_enable
;
1613 if (!(outbound_intstatus
& ARCMSR_MU_OUTBOUND_HANDLE_INT
)) {
1616 writel(outbound_intstatus
, ®
->outbound_intstatus
);
1617 if (outbound_intstatus
& ARCMSR_MU_OUTBOUND_DOORBELL_INT
) {
1618 arcmsr_hba_doorbell_isr(acb
);
1620 if (outbound_intstatus
& ARCMSR_MU_OUTBOUND_POSTQUEUE_INT
) {
1621 arcmsr_hba_postqueue_isr(acb
);
1623 if(outbound_intstatus
& ARCMSR_MU_OUTBOUND_MESSAGE0_INT
) {
1624 /* messenger of "driver to iop commands" */
1625 arcmsr_hba_message_isr(acb
);
1630 static int arcmsr_handle_hbb_isr(struct AdapterControlBlock
*acb
)
1632 uint32_t outbound_doorbell
;
1633 struct MessageUnit_B
*reg
= acb
->pmuB
;
1634 outbound_doorbell
= readl(reg
->iop2drv_doorbell
) &
1635 acb
->outbound_int_enable
;
1636 if (!outbound_doorbell
)
1639 writel(~outbound_doorbell
, reg
->iop2drv_doorbell
);
1640 /*in case the last action of doorbell interrupt clearance is cached,
1641 this action can push HW to write down the clear bit*/
1642 readl(reg
->iop2drv_doorbell
);
1643 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
, reg
->drv2iop_doorbell
);
1644 if (outbound_doorbell
& ARCMSR_IOP2DRV_DATA_WRITE_OK
) {
1645 arcmsr_iop2drv_data_wrote_handle(acb
);
1647 if (outbound_doorbell
& ARCMSR_IOP2DRV_DATA_READ_OK
) {
1648 arcmsr_iop2drv_data_read_handle(acb
);
1650 if (outbound_doorbell
& ARCMSR_IOP2DRV_CDB_DONE
) {
1651 arcmsr_hbb_postqueue_isr(acb
);
1653 if(outbound_doorbell
& ARCMSR_IOP2DRV_MESSAGE_CMD_DONE
) {
1654 /* messenger of "driver to iop commands" */
1655 arcmsr_hbb_message_isr(acb
);
1660 static int arcmsr_handle_hbc_isr(struct AdapterControlBlock
*pACB
)
1662 uint32_t host_interrupt_status
;
1663 struct MessageUnit_C
*phbcmu
= (struct MessageUnit_C
*)pACB
->pmuC
;
1665 *********************************************
1666 ** check outbound intstatus
1667 *********************************************
1669 host_interrupt_status
= readl(&phbcmu
->host_int_status
);
1670 if (!host_interrupt_status
) {
1671 /*it must be share irq*/
1674 /* MU ioctl transfer doorbell interrupts*/
1675 if (host_interrupt_status
& ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR
) {
1676 arcmsr_hbc_doorbell_isr(pACB
); /* messenger of "ioctl message read write" */
1678 /* MU post queue interrupts*/
1679 if (host_interrupt_status
& ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
) {
1680 arcmsr_hbc_postqueue_isr(pACB
); /* messenger of "scsi commands" */
1684 static irqreturn_t
arcmsr_interrupt(struct AdapterControlBlock
*acb
)
1686 switch (acb
->adapter_type
) {
1687 case ACB_ADAPTER_TYPE_A
: {
1688 if (arcmsr_handle_hba_isr(acb
)) {
1694 case ACB_ADAPTER_TYPE_B
: {
1695 if (arcmsr_handle_hbb_isr(acb
)) {
1700 case ACB_ADAPTER_TYPE_C
: {
1701 if (arcmsr_handle_hbc_isr(acb
)) {
1709 static void arcmsr_iop_parking(struct AdapterControlBlock
*acb
)
1712 /* stop adapter background rebuild */
1713 if (acb
->acb_flags
& ACB_F_MSG_START_BGRB
) {
1714 uint32_t intmask_org
;
1715 acb
->acb_flags
&= ~ACB_F_MSG_START_BGRB
;
1716 intmask_org
= arcmsr_disable_outbound_ints(acb
);
1717 arcmsr_stop_adapter_bgrb(acb
);
1718 arcmsr_flush_adapter_cache(acb
);
1719 arcmsr_enable_outbound_ints(acb
, intmask_org
);
1724 void arcmsr_post_ioctldata2iop(struct AdapterControlBlock
*acb
)
1726 int32_t wqbuf_firstindex
, wqbuf_lastindex
;
1728 struct QBUFFER __iomem
*pwbuffer
;
1729 uint8_t __iomem
*iop_data
;
1730 int32_t allxfer_len
= 0;
1731 pwbuffer
= arcmsr_get_iop_wqbuffer(acb
);
1732 iop_data
= (uint8_t __iomem
*)pwbuffer
->data
;
1733 if (acb
->acb_flags
& ACB_F_MESSAGE_WQBUFFER_READED
) {
1734 acb
->acb_flags
&= (~ACB_F_MESSAGE_WQBUFFER_READED
);
1735 wqbuf_firstindex
= acb
->wqbuf_firstindex
;
1736 wqbuf_lastindex
= acb
->wqbuf_lastindex
;
1737 while ((wqbuf_firstindex
!= wqbuf_lastindex
) && (allxfer_len
< 124)) {
1738 pQbuffer
= &acb
->wqbuffer
[wqbuf_firstindex
];
1739 memcpy(iop_data
, pQbuffer
, 1);
1741 wqbuf_firstindex
%= ARCMSR_MAX_QBUFFER
;
1745 acb
->wqbuf_firstindex
= wqbuf_firstindex
;
1746 pwbuffer
->data_len
= allxfer_len
;
1747 arcmsr_iop_message_wrote(acb
);
1751 static int arcmsr_iop_message_xfer(struct AdapterControlBlock
*acb
,
1752 struct scsi_cmnd
*cmd
)
1754 struct CMD_MESSAGE_FIELD
*pcmdmessagefld
;
1755 int retvalue
= 0, transfer_len
= 0;
1757 struct scatterlist
*sg
;
1758 uint32_t controlcode
= (uint32_t ) cmd
->cmnd
[5] << 24 |
1759 (uint32_t ) cmd
->cmnd
[6] << 16 |
1760 (uint32_t ) cmd
->cmnd
[7] << 8 |
1761 (uint32_t ) cmd
->cmnd
[8];
1762 /* 4 bytes: Areca io control code */
1763 sg
= scsi_sglist(cmd
);
1764 buffer
= kmap_atomic(sg_page(sg
), KM_IRQ0
) + sg
->offset
;
1765 if (scsi_sg_count(cmd
) > 1) {
1766 retvalue
= ARCMSR_MESSAGE_FAIL
;
1769 transfer_len
+= sg
->length
;
1771 if (transfer_len
> sizeof(struct CMD_MESSAGE_FIELD
)) {
1772 retvalue
= ARCMSR_MESSAGE_FAIL
;
1775 pcmdmessagefld
= (struct CMD_MESSAGE_FIELD
*) buffer
;
1776 switch(controlcode
) {
1778 case ARCMSR_MESSAGE_READ_RQBUFFER
: {
1779 unsigned char *ver_addr
;
1780 uint8_t *pQbuffer
, *ptmpQbuffer
;
1781 int32_t allxfer_len
= 0;
1783 ver_addr
= kmalloc(1032, GFP_ATOMIC
);
1785 retvalue
= ARCMSR_MESSAGE_FAIL
;
1789 ptmpQbuffer
= ver_addr
;
1790 while ((acb
->rqbuf_firstindex
!= acb
->rqbuf_lastindex
)
1791 && (allxfer_len
< 1031)) {
1792 pQbuffer
= &acb
->rqbuffer
[acb
->rqbuf_firstindex
];
1793 memcpy(ptmpQbuffer
, pQbuffer
, 1);
1794 acb
->rqbuf_firstindex
++;
1795 acb
->rqbuf_firstindex
%= ARCMSR_MAX_QBUFFER
;
1799 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
1801 struct QBUFFER __iomem
*prbuffer
;
1802 uint8_t __iomem
*iop_data
;
1805 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
1806 prbuffer
= arcmsr_get_iop_rqbuffer(acb
);
1807 iop_data
= prbuffer
->data
;
1808 iop_len
= readl(&prbuffer
->data_len
);
1809 while (iop_len
> 0) {
1810 acb
->rqbuffer
[acb
->rqbuf_lastindex
] = readb(iop_data
);
1811 acb
->rqbuf_lastindex
++;
1812 acb
->rqbuf_lastindex
%= ARCMSR_MAX_QBUFFER
;
1816 arcmsr_iop_message_read(acb
);
1818 memcpy(pcmdmessagefld
->messagedatabuffer
, ver_addr
, allxfer_len
);
1819 pcmdmessagefld
->cmdmessage
.Length
= allxfer_len
;
1820 if(acb
->fw_flag
== FW_DEADLOCK
) {
1821 pcmdmessagefld
->cmdmessage
.ReturnCode
= ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1823 pcmdmessagefld
->cmdmessage
.ReturnCode
= ARCMSR_MESSAGE_RETURNCODE_OK
;
1829 case ARCMSR_MESSAGE_WRITE_WQBUFFER
: {
1830 unsigned char *ver_addr
;
1831 int32_t my_empty_len
, user_len
, wqbuf_firstindex
, wqbuf_lastindex
;
1832 uint8_t *pQbuffer
, *ptmpuserbuffer
;
1834 ver_addr
= kmalloc(1032, GFP_ATOMIC
);
1836 retvalue
= ARCMSR_MESSAGE_FAIL
;
1839 if(acb
->fw_flag
== FW_DEADLOCK
) {
1840 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1841 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1843 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1844 ARCMSR_MESSAGE_RETURNCODE_OK
;
1846 ptmpuserbuffer
= ver_addr
;
1847 user_len
= pcmdmessagefld
->cmdmessage
.Length
;
1848 memcpy(ptmpuserbuffer
, pcmdmessagefld
->messagedatabuffer
, user_len
);
1849 wqbuf_lastindex
= acb
->wqbuf_lastindex
;
1850 wqbuf_firstindex
= acb
->wqbuf_firstindex
;
1851 if (wqbuf_lastindex
!= wqbuf_firstindex
) {
1852 struct SENSE_DATA
*sensebuffer
=
1853 (struct SENSE_DATA
*)cmd
->sense_buffer
;
1854 arcmsr_post_ioctldata2iop(acb
);
1855 /* has error report sensedata */
1856 sensebuffer
->ErrorCode
= 0x70;
1857 sensebuffer
->SenseKey
= ILLEGAL_REQUEST
;
1858 sensebuffer
->AdditionalSenseLength
= 0x0A;
1859 sensebuffer
->AdditionalSenseCode
= 0x20;
1860 sensebuffer
->Valid
= 1;
1861 retvalue
= ARCMSR_MESSAGE_FAIL
;
1863 my_empty_len
= (wqbuf_firstindex
-wqbuf_lastindex
- 1)
1864 &(ARCMSR_MAX_QBUFFER
- 1);
1865 if (my_empty_len
>= user_len
) {
1866 while (user_len
> 0) {
1868 &acb
->wqbuffer
[acb
->wqbuf_lastindex
];
1869 memcpy(pQbuffer
, ptmpuserbuffer
, 1);
1870 acb
->wqbuf_lastindex
++;
1871 acb
->wqbuf_lastindex
%= ARCMSR_MAX_QBUFFER
;
1875 if (acb
->acb_flags
& ACB_F_MESSAGE_WQBUFFER_CLEARED
) {
1877 ~ACB_F_MESSAGE_WQBUFFER_CLEARED
;
1878 arcmsr_post_ioctldata2iop(acb
);
1881 /* has error report sensedata */
1882 struct SENSE_DATA
*sensebuffer
=
1883 (struct SENSE_DATA
*)cmd
->sense_buffer
;
1884 sensebuffer
->ErrorCode
= 0x70;
1885 sensebuffer
->SenseKey
= ILLEGAL_REQUEST
;
1886 sensebuffer
->AdditionalSenseLength
= 0x0A;
1887 sensebuffer
->AdditionalSenseCode
= 0x20;
1888 sensebuffer
->Valid
= 1;
1889 retvalue
= ARCMSR_MESSAGE_FAIL
;
1896 case ARCMSR_MESSAGE_CLEAR_RQBUFFER
: {
1897 uint8_t *pQbuffer
= acb
->rqbuffer
;
1898 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
1899 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
1900 arcmsr_iop_message_read(acb
);
1902 acb
->acb_flags
|= ACB_F_MESSAGE_RQBUFFER_CLEARED
;
1903 acb
->rqbuf_firstindex
= 0;
1904 acb
->rqbuf_lastindex
= 0;
1905 memset(pQbuffer
, 0, ARCMSR_MAX_QBUFFER
);
1906 if(acb
->fw_flag
== FW_DEADLOCK
) {
1907 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1908 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1910 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1911 ARCMSR_MESSAGE_RETURNCODE_OK
;
1916 case ARCMSR_MESSAGE_CLEAR_WQBUFFER
: {
1917 uint8_t *pQbuffer
= acb
->wqbuffer
;
1918 if(acb
->fw_flag
== FW_DEADLOCK
) {
1919 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1920 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1922 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1923 ARCMSR_MESSAGE_RETURNCODE_OK
;
1926 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
1927 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
1928 arcmsr_iop_message_read(acb
);
1931 (ACB_F_MESSAGE_WQBUFFER_CLEARED
|
1932 ACB_F_MESSAGE_WQBUFFER_READED
);
1933 acb
->wqbuf_firstindex
= 0;
1934 acb
->wqbuf_lastindex
= 0;
1935 memset(pQbuffer
, 0, ARCMSR_MAX_QBUFFER
);
1939 case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER
: {
1942 if (acb
->acb_flags
& ACB_F_IOPDATA_OVERFLOW
) {
1943 acb
->acb_flags
&= ~ACB_F_IOPDATA_OVERFLOW
;
1944 arcmsr_iop_message_read(acb
);
1947 (ACB_F_MESSAGE_WQBUFFER_CLEARED
1948 | ACB_F_MESSAGE_RQBUFFER_CLEARED
1949 | ACB_F_MESSAGE_WQBUFFER_READED
);
1950 acb
->rqbuf_firstindex
= 0;
1951 acb
->rqbuf_lastindex
= 0;
1952 acb
->wqbuf_firstindex
= 0;
1953 acb
->wqbuf_lastindex
= 0;
1954 pQbuffer
= acb
->rqbuffer
;
1955 memset(pQbuffer
, 0, sizeof(struct QBUFFER
));
1956 pQbuffer
= acb
->wqbuffer
;
1957 memset(pQbuffer
, 0, sizeof(struct QBUFFER
));
1958 if(acb
->fw_flag
== FW_DEADLOCK
) {
1959 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1960 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1962 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1963 ARCMSR_MESSAGE_RETURNCODE_OK
;
1968 case ARCMSR_MESSAGE_RETURN_CODE_3F
: {
1969 if(acb
->fw_flag
== FW_DEADLOCK
) {
1970 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1971 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1973 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1974 ARCMSR_MESSAGE_RETURNCODE_3F
;
1978 case ARCMSR_MESSAGE_SAY_HELLO
: {
1979 int8_t *hello_string
= "Hello! I am ARCMSR";
1980 if(acb
->fw_flag
== FW_DEADLOCK
) {
1981 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1982 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1984 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1985 ARCMSR_MESSAGE_RETURNCODE_OK
;
1987 memcpy(pcmdmessagefld
->messagedatabuffer
, hello_string
1988 , (int16_t)strlen(hello_string
));
1992 case ARCMSR_MESSAGE_SAY_GOODBYE
:
1993 if(acb
->fw_flag
== FW_DEADLOCK
) {
1994 pcmdmessagefld
->cmdmessage
.ReturnCode
=
1995 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
1997 arcmsr_iop_parking(acb
);
2000 case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE
:
2001 if(acb
->fw_flag
== FW_DEADLOCK
) {
2002 pcmdmessagefld
->cmdmessage
.ReturnCode
=
2003 ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON
;
2005 arcmsr_flush_adapter_cache(acb
);
2009 retvalue
= ARCMSR_MESSAGE_FAIL
;
2012 sg
= scsi_sglist(cmd
);
2013 kunmap_atomic(buffer
- sg
->offset
, KM_IRQ0
);
2017 static struct CommandControlBlock
*arcmsr_get_freeccb(struct AdapterControlBlock
*acb
)
2019 struct list_head
*head
= &acb
->ccb_free_list
;
2020 struct CommandControlBlock
*ccb
= NULL
;
2021 unsigned long flags
;
2022 spin_lock_irqsave(&acb
->ccblist_lock
, flags
);
2023 if (!list_empty(head
)) {
2024 ccb
= list_entry(head
->next
, struct CommandControlBlock
, list
);
2025 list_del_init(&ccb
->list
);
2027 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
2030 spin_unlock_irqrestore(&acb
->ccblist_lock
, flags
);
2034 static void arcmsr_handle_virtual_command(struct AdapterControlBlock
*acb
,
2035 struct scsi_cmnd
*cmd
)
2037 switch (cmd
->cmnd
[0]) {
2039 unsigned char inqdata
[36];
2041 struct scatterlist
*sg
;
2043 if (cmd
->device
->lun
) {
2044 cmd
->result
= (DID_TIME_OUT
<< 16);
2045 cmd
->scsi_done(cmd
);
2048 inqdata
[0] = TYPE_PROCESSOR
;
2049 /* Periph Qualifier & Periph Dev Type */
2051 /* rem media bit & Dev Type Modifier */
2053 /* ISO, ECMA, & ANSI versions */
2055 /* length of additional data */
2056 strncpy(&inqdata
[8], "Areca ", 8);
2057 /* Vendor Identification */
2058 strncpy(&inqdata
[16], "RAID controller ", 16);
2059 /* Product Identification */
2060 strncpy(&inqdata
[32], "R001", 4); /* Product Revision */
2062 sg
= scsi_sglist(cmd
);
2063 buffer
= kmap_atomic(sg_page(sg
), KM_IRQ0
) + sg
->offset
;
2065 memcpy(buffer
, inqdata
, sizeof(inqdata
));
2066 sg
= scsi_sglist(cmd
);
2067 kunmap_atomic(buffer
- sg
->offset
, KM_IRQ0
);
2069 cmd
->scsi_done(cmd
);
2074 if (arcmsr_iop_message_xfer(acb
, cmd
))
2075 cmd
->result
= (DID_ERROR
<< 16);
2076 cmd
->scsi_done(cmd
);
2080 cmd
->scsi_done(cmd
);
2084 static int arcmsr_queue_command(struct scsi_cmnd
*cmd
,
2085 void (* done
)(struct scsi_cmnd
*))
2087 struct Scsi_Host
*host
= cmd
->device
->host
;
2088 struct AdapterControlBlock
*acb
= (struct AdapterControlBlock
*) host
->hostdata
;
2089 struct CommandControlBlock
*ccb
;
2090 int target
= cmd
->device
->id
;
2091 int lun
= cmd
->device
->lun
;
2092 uint8_t scsicmd
= cmd
->cmnd
[0];
2093 cmd
->scsi_done
= done
;
2094 cmd
->host_scribble
= NULL
;
2096 if ((scsicmd
== SYNCHRONIZE_CACHE
) ||(scsicmd
== SEND_DIAGNOSTIC
)){
2097 if(acb
->devstate
[target
][lun
] == ARECA_RAID_GONE
) {
2098 cmd
->result
= (DID_NO_CONNECT
<< 16);
2100 cmd
->scsi_done(cmd
);
2104 /* virtual device for iop message transfer */
2105 arcmsr_handle_virtual_command(acb
, cmd
);
2108 if (atomic_read(&acb
->ccboutstandingcount
) >=
2109 ARCMSR_MAX_OUTSTANDING_CMD
)
2110 return SCSI_MLQUEUE_HOST_BUSY
;
2111 if ((scsicmd
== SCSI_CMD_ARECA_SPECIFIC
)) {
2112 printk(KERN_NOTICE
"Receiveing SCSI_CMD_ARECA_SPECIFIC command..\n");
2115 ccb
= arcmsr_get_freeccb(acb
);
2117 return SCSI_MLQUEUE_HOST_BUSY
;
2118 if (arcmsr_build_ccb( acb
, ccb
, cmd
) == FAILED
) {
2119 cmd
->result
= (DID_ERROR
<< 16) | (RESERVATION_CONFLICT
<< 1);
2120 cmd
->scsi_done(cmd
);
2123 arcmsr_post_ccb(acb
, ccb
);
2127 static bool arcmsr_get_hba_config(struct AdapterControlBlock
*acb
)
2129 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2130 char *acb_firm_model
= acb
->firm_model
;
2131 char *acb_firm_version
= acb
->firm_version
;
2132 char *acb_device_map
= acb
->device_map
;
2133 char __iomem
*iop_firm_model
= (char __iomem
*)(®
->message_rwbuffer
[15]);
2134 char __iomem
*iop_firm_version
= (char __iomem
*)(®
->message_rwbuffer
[17]);
2135 char __iomem
*iop_device_map
= (char __iomem
*)(®
->message_rwbuffer
[21]);
2137 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
2138 if (!arcmsr_hba_wait_msgint_ready(acb
)) {
2139 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
2140 miscellaneous data' timeout \n", acb
->host
->host_no
);
2145 *acb_firm_model
= readb(iop_firm_model
);
2153 *acb_firm_version
= readb(iop_firm_version
);
2161 *acb_device_map
= readb(iop_device_map
);
2166 printk(KERN_NOTICE
"Areca RAID Controller%d: F/W %s & Model %s\n",
2170 acb
->signature
= readl(®
->message_rwbuffer
[0]);
2171 acb
->firm_request_len
= readl(®
->message_rwbuffer
[1]);
2172 acb
->firm_numbers_queue
= readl(®
->message_rwbuffer
[2]);
2173 acb
->firm_sdram_size
= readl(®
->message_rwbuffer
[3]);
2174 acb
->firm_hd_channels
= readl(®
->message_rwbuffer
[4]);
2175 acb
->firm_cfg_version
= readl(®
->message_rwbuffer
[25]); /*firm_cfg_version,25,100-103*/
2178 static bool arcmsr_get_hbb_config(struct AdapterControlBlock
*acb
)
2180 struct MessageUnit_B
*reg
= acb
->pmuB
;
2181 struct pci_dev
*pdev
= acb
->pdev
;
2183 dma_addr_t dma_coherent_handle
;
2184 char *acb_firm_model
= acb
->firm_model
;
2185 char *acb_firm_version
= acb
->firm_version
;
2186 char *acb_device_map
= acb
->device_map
;
2187 char __iomem
*iop_firm_model
;
2188 /*firm_model,15,60-67*/
2189 char __iomem
*iop_firm_version
;
2190 /*firm_version,17,68-83*/
2191 char __iomem
*iop_device_map
;
2192 /*firm_version,21,84-99*/
2194 dma_coherent
= dma_alloc_coherent(&pdev
->dev
, sizeof(struct MessageUnit_B
), &dma_coherent_handle
, GFP_KERNEL
);
2196 printk(KERN_NOTICE
"arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb
->host
->host_no
);
2199 acb
->dma_coherent_handle_hbb_mu
= dma_coherent_handle
;
2200 reg
= (struct MessageUnit_B
*)dma_coherent
;
2202 reg
->drv2iop_doorbell
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base0
+ ARCMSR_DRV2IOP_DOORBELL
);
2203 reg
->drv2iop_doorbell_mask
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base0
+ ARCMSR_DRV2IOP_DOORBELL_MASK
);
2204 reg
->iop2drv_doorbell
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base0
+ ARCMSR_IOP2DRV_DOORBELL
);
2205 reg
->iop2drv_doorbell_mask
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base0
+ ARCMSR_IOP2DRV_DOORBELL_MASK
);
2206 reg
->message_wbuffer
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base1
+ ARCMSR_MESSAGE_WBUFFER
);
2207 reg
->message_rbuffer
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base1
+ ARCMSR_MESSAGE_RBUFFER
);
2208 reg
->message_rwbuffer
= (uint32_t __iomem
*)((unsigned long)acb
->mem_base1
+ ARCMSR_MESSAGE_RWBUFFER
);
2209 iop_firm_model
= (char __iomem
*)(®
->message_rwbuffer
[15]); /*firm_model,15,60-67*/
2210 iop_firm_version
= (char __iomem
*)(®
->message_rwbuffer
[17]); /*firm_version,17,68-83*/
2211 iop_device_map
= (char __iomem
*)(®
->message_rwbuffer
[21]); /*firm_version,21,84-99*/
2213 writel(ARCMSR_MESSAGE_GET_CONFIG
, reg
->drv2iop_doorbell
);
2214 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
2215 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
2216 miscellaneous data' timeout \n", acb
->host
->host_no
);
2221 *acb_firm_model
= readb(iop_firm_model
);
2228 *acb_firm_version
= readb(iop_firm_version
);
2236 *acb_device_map
= readb(iop_device_map
);
2242 printk(KERN_NOTICE
"Areca RAID Controller%d: F/W %s & Model %s\n",
2247 acb
->signature
= readl(®
->message_rwbuffer
[1]);
2248 /*firm_signature,1,00-03*/
2249 acb
->firm_request_len
= readl(®
->message_rwbuffer
[2]);
2250 /*firm_request_len,1,04-07*/
2251 acb
->firm_numbers_queue
= readl(®
->message_rwbuffer
[3]);
2252 /*firm_numbers_queue,2,08-11*/
2253 acb
->firm_sdram_size
= readl(®
->message_rwbuffer
[4]);
2254 /*firm_sdram_size,3,12-15*/
2255 acb
->firm_hd_channels
= readl(®
->message_rwbuffer
[5]);
2256 /*firm_ide_channels,4,16-19*/
2257 acb
->firm_cfg_version
= readl(®
->message_rwbuffer
[25]); /*firm_cfg_version,25,100-103*/
2258 /*firm_ide_channels,4,16-19*/
2262 static bool arcmsr_get_hbc_config(struct AdapterControlBlock
*pACB
)
2264 uint32_t intmask_org
, Index
, firmware_state
= 0;
2265 struct MessageUnit_C
*reg
= pACB
->pmuC
;
2266 char *acb_firm_model
= pACB
->firm_model
;
2267 char *acb_firm_version
= pACB
->firm_version
;
2268 char *iop_firm_model
= (char *)(®
->msgcode_rwbuffer
[15]); /*firm_model,15,60-67*/
2269 char *iop_firm_version
= (char *)(®
->msgcode_rwbuffer
[17]); /*firm_version,17,68-83*/
2271 /* disable all outbound interrupt */
2272 intmask_org
= readl(®
->host_int_mask
); /* disable outbound message0 int */
2273 writel(intmask_org
|ARCMSR_HBCMU_ALL_INTMASKENABLE
, ®
->host_int_mask
);
2274 /* wait firmware ready */
2276 firmware_state
= readl(®
->outbound_msgaddr1
);
2277 } while ((firmware_state
& ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK
) == 0);
2278 /* post "get config" instruction */
2279 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
2280 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
2281 /* wait message ready */
2282 for (Index
= 0; Index
< 2000; Index
++) {
2283 if (readl(®
->outbound_doorbell
) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE
) {
2284 writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR
, ®
->outbound_doorbell_clear
);/*clear interrupt*/
2289 if (Index
>= 2000) {
2290 printk(KERN_NOTICE
"arcmsr%d: wait 'get adapter firmware \
2291 miscellaneous data' timeout \n", pACB
->host
->host_no
);
2296 *acb_firm_model
= readb(iop_firm_model
);
2303 *acb_firm_version
= readb(iop_firm_version
);
2308 printk(KERN_NOTICE
"Areca RAID Controller%d: F/W %s & Model %s\n",
2309 pACB
->host
->host_no
,
2312 pACB
->firm_request_len
= readl(®
->msgcode_rwbuffer
[1]); /*firm_request_len,1,04-07*/
2313 pACB
->firm_numbers_queue
= readl(®
->msgcode_rwbuffer
[2]); /*firm_numbers_queue,2,08-11*/
2314 pACB
->firm_sdram_size
= readl(®
->msgcode_rwbuffer
[3]); /*firm_sdram_size,3,12-15*/
2315 pACB
->firm_hd_channels
= readl(®
->msgcode_rwbuffer
[4]); /*firm_ide_channels,4,16-19*/
2316 pACB
->firm_cfg_version
= readl(®
->msgcode_rwbuffer
[25]); /*firm_cfg_version,25,100-103*/
2317 /*all interrupt service will be enable at arcmsr_iop_init*/
2320 static bool arcmsr_get_firmware_spec(struct AdapterControlBlock
*acb
)
2322 if (acb
->adapter_type
== ACB_ADAPTER_TYPE_A
)
2323 return arcmsr_get_hba_config(acb
);
2324 else if (acb
->adapter_type
== ACB_ADAPTER_TYPE_B
)
2325 return arcmsr_get_hbb_config(acb
);
2327 return arcmsr_get_hbc_config(acb
);
2330 static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock
*acb
,
2331 struct CommandControlBlock
*poll_ccb
)
2333 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2334 struct CommandControlBlock
*ccb
;
2335 struct ARCMSR_CDB
*arcmsr_cdb
;
2336 uint32_t flag_ccb
, outbound_intstatus
, poll_ccb_done
= 0, poll_count
= 0;
2339 polling_hba_ccb_retry
:
2341 outbound_intstatus
= readl(®
->outbound_intstatus
) & acb
->outbound_int_enable
;
2342 writel(outbound_intstatus
, ®
->outbound_intstatus
);/*clear interrupt*/
2344 if ((flag_ccb
= readl(®
->outbound_queueport
)) == 0xFFFFFFFF) {
2350 if (poll_count
> 100){
2354 goto polling_hba_ccb_retry
;
2357 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ (flag_ccb
<< 5));
2358 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
2359 poll_ccb_done
= (ccb
== poll_ccb
) ? 1:0;
2360 if ((ccb
->acb
!= acb
) || (ccb
->startdone
!= ARCMSR_CCB_START
)) {
2361 if ((ccb
->startdone
== ARCMSR_CCB_ABORTED
) || (ccb
== poll_ccb
)) {
2362 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2363 " poll command abort successfully \n"
2364 , acb
->host
->host_no
2365 , ccb
->pcmd
->device
->id
2366 , ccb
->pcmd
->device
->lun
2368 ccb
->pcmd
->result
= DID_ABORT
<< 16;
2369 arcmsr_ccb_complete(ccb
);
2372 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
2373 " command done ccb = '0x%p'"
2374 "ccboutstandingcount = %d \n"
2375 , acb
->host
->host_no
2377 , atomic_read(&acb
->ccboutstandingcount
));
2380 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
2381 arcmsr_report_ccb_state(acb
, ccb
, error
);
2386 static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock
*acb
,
2387 struct CommandControlBlock
*poll_ccb
)
2389 struct MessageUnit_B
*reg
= acb
->pmuB
;
2390 struct ARCMSR_CDB
*arcmsr_cdb
;
2391 struct CommandControlBlock
*ccb
;
2392 uint32_t flag_ccb
, poll_ccb_done
= 0, poll_count
= 0;
2395 polling_hbb_ccb_retry
:
2397 /* clear doorbell interrupt */
2398 writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
2400 index
= reg
->doneq_index
;
2401 if ((flag_ccb
= readl(®
->done_qbuffer
[index
])) == 0) {
2407 if (poll_count
> 100){
2411 goto polling_hbb_ccb_retry
;
2414 writel(0, ®
->done_qbuffer
[index
]);
2416 /*if last index number set it to 0 */
2417 index
%= ARCMSR_MAX_HBB_POSTQUEUE
;
2418 reg
->doneq_index
= index
;
2419 /* check if command done with no error*/
2420 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ (flag_ccb
<< 5));
2421 ccb
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
2422 poll_ccb_done
= (ccb
== poll_ccb
) ? 1:0;
2423 if ((ccb
->acb
!= acb
) || (ccb
->startdone
!= ARCMSR_CCB_START
)) {
2424 if ((ccb
->startdone
== ARCMSR_CCB_ABORTED
) || (ccb
== poll_ccb
)) {
2425 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2426 " poll command abort successfully \n"
2428 ,ccb
->pcmd
->device
->id
2429 ,ccb
->pcmd
->device
->lun
2431 ccb
->pcmd
->result
= DID_ABORT
<< 16;
2432 arcmsr_ccb_complete(ccb
);
2435 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
2436 " command done ccb = '0x%p'"
2437 "ccboutstandingcount = %d \n"
2438 , acb
->host
->host_no
2440 , atomic_read(&acb
->ccboutstandingcount
));
2443 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE0
) ? true : false;
2444 arcmsr_report_ccb_state(acb
, ccb
, error
);
2449 static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock
*acb
, struct CommandControlBlock
*poll_ccb
)
2451 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
2452 uint32_t flag_ccb
, ccb_cdb_phy
;
2453 struct ARCMSR_CDB
*arcmsr_cdb
;
2455 struct CommandControlBlock
*pCCB
;
2456 uint32_t poll_ccb_done
= 0, poll_count
= 0;
2458 polling_hbc_ccb_retry
:
2461 if ((readl(®
->host_int_status
) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR
) == 0) {
2462 if (poll_ccb_done
) {
2467 if (poll_count
> 100) {
2471 goto polling_hbc_ccb_retry
;
2474 flag_ccb
= readl(®
->outbound_queueport_low
);
2475 ccb_cdb_phy
= (flag_ccb
& 0xFFFFFFF0);
2476 arcmsr_cdb
= (struct ARCMSR_CDB
*)(acb
->vir2phy_offset
+ ccb_cdb_phy
);/*frame must be 32 bytes aligned*/
2477 pCCB
= container_of(arcmsr_cdb
, struct CommandControlBlock
, arcmsr_cdb
);
2478 poll_ccb_done
= (pCCB
== poll_ccb
) ? 1 : 0;
2479 /* check ifcommand done with no error*/
2480 if ((pCCB
->acb
!= acb
) || (pCCB
->startdone
!= ARCMSR_CCB_START
)) {
2481 if (pCCB
->startdone
== ARCMSR_CCB_ABORTED
) {
2482 printk(KERN_NOTICE
"arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'"
2483 " poll command abort successfully \n"
2484 , acb
->host
->host_no
2485 , pCCB
->pcmd
->device
->id
2486 , pCCB
->pcmd
->device
->lun
2488 pCCB
->pcmd
->result
= DID_ABORT
<< 16;
2489 arcmsr_ccb_complete(pCCB
);
2492 printk(KERN_NOTICE
"arcmsr%d: polling get an illegal ccb"
2493 " command done ccb = '0x%p'"
2494 "ccboutstandingcount = %d \n"
2495 , acb
->host
->host_no
2497 , atomic_read(&acb
->ccboutstandingcount
));
2500 error
= (flag_ccb
& ARCMSR_CCBREPLY_FLAG_ERROR_MODE1
) ? true : false;
2501 arcmsr_report_ccb_state(acb
, pCCB
, error
);
2505 static int arcmsr_polling_ccbdone(struct AdapterControlBlock
*acb
,
2506 struct CommandControlBlock
*poll_ccb
)
2509 switch (acb
->adapter_type
) {
2511 case ACB_ADAPTER_TYPE_A
: {
2512 rtn
= arcmsr_polling_hba_ccbdone(acb
, poll_ccb
);
2516 case ACB_ADAPTER_TYPE_B
: {
2517 rtn
= arcmsr_polling_hbb_ccbdone(acb
, poll_ccb
);
2520 case ACB_ADAPTER_TYPE_C
: {
2521 rtn
= arcmsr_polling_hbc_ccbdone(acb
, poll_ccb
);
2527 static int arcmsr_iop_confirm(struct AdapterControlBlock
*acb
)
2529 uint32_t cdb_phyaddr
, cdb_phyaddr_hi32
;
2530 dma_addr_t dma_coherent_handle
;
2532 ********************************************************************
2533 ** here we need to tell iop 331 our freeccb.HighPart
2534 ** if freeccb.HighPart is not zero
2535 ********************************************************************
2537 dma_coherent_handle
= acb
->dma_coherent_handle
;
2538 cdb_phyaddr
= (uint32_t)(dma_coherent_handle
);
2539 cdb_phyaddr_hi32
= (uint32_t)((cdb_phyaddr
>> 16) >> 16);
2540 acb
->cdb_phyaddr_hi32
= cdb_phyaddr_hi32
;
2542 ***********************************************************************
2543 ** if adapter type B, set window of "post command Q"
2544 ***********************************************************************
2546 switch (acb
->adapter_type
) {
2548 case ACB_ADAPTER_TYPE_A
: {
2549 if (cdb_phyaddr_hi32
!= 0) {
2550 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2551 uint32_t intmask_org
;
2552 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2553 writel(ARCMSR_SIGNATURE_SET_CONFIG
, \
2554 ®
->message_rwbuffer
[0]);
2555 writel(cdb_phyaddr_hi32
, ®
->message_rwbuffer
[1]);
2556 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, \
2557 ®
->inbound_msgaddr0
);
2558 if (!arcmsr_hba_wait_msgint_ready(acb
)) {
2559 printk(KERN_NOTICE
"arcmsr%d: ""set ccb high \
2560 part physical address timeout\n",
2561 acb
->host
->host_no
);
2564 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2569 case ACB_ADAPTER_TYPE_B
: {
2570 unsigned long post_queue_phyaddr
;
2571 uint32_t __iomem
*rwbuffer
;
2573 struct MessageUnit_B
*reg
= acb
->pmuB
;
2574 uint32_t intmask_org
;
2575 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2576 reg
->postq_index
= 0;
2577 reg
->doneq_index
= 0;
2578 writel(ARCMSR_MESSAGE_SET_POST_WINDOW
, reg
->drv2iop_doorbell
);
2579 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
2580 printk(KERN_NOTICE
"arcmsr%d:can not set diver mode\n", \
2581 acb
->host
->host_no
);
2584 post_queue_phyaddr
= acb
->dma_coherent_handle_hbb_mu
;
2585 rwbuffer
= reg
->message_rwbuffer
;
2586 /* driver "set config" signature */
2587 writel(ARCMSR_SIGNATURE_SET_CONFIG
, rwbuffer
++);
2588 /* normal should be zero */
2589 writel(cdb_phyaddr_hi32
, rwbuffer
++);
2590 /* postQ size (256 + 8)*4 */
2591 writel(post_queue_phyaddr
, rwbuffer
++);
2592 /* doneQ size (256 + 8)*4 */
2593 writel(post_queue_phyaddr
+ 1056, rwbuffer
++);
2594 /* ccb maxQ size must be --> [(256 + 8)*4]*/
2595 writel(1056, rwbuffer
);
2597 writel(ARCMSR_MESSAGE_SET_CONFIG
, reg
->drv2iop_doorbell
);
2598 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
2599 printk(KERN_NOTICE
"arcmsr%d: 'set command Q window' \
2600 timeout \n",acb
->host
->host_no
);
2603 arcmsr_hbb_enable_driver_mode(acb
);
2604 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2607 case ACB_ADAPTER_TYPE_C
: {
2608 if (cdb_phyaddr_hi32
!= 0) {
2609 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
2611 if (cdb_phyaddr_hi32
!= 0) {
2612 unsigned char Retries
= 0x00;
2614 printk(KERN_NOTICE
"arcmsr%d: cdb_phyaddr_hi32=0x%x \n", acb
->adapter_index
, cdb_phyaddr_hi32
);
2615 } while (Retries
++ < 100);
2617 writel(ARCMSR_SIGNATURE_SET_CONFIG
, ®
->msgcode_rwbuffer
[0]);
2618 writel(cdb_phyaddr_hi32
, ®
->msgcode_rwbuffer
[1]);
2619 writel(ARCMSR_INBOUND_MESG0_SET_CONFIG
, ®
->inbound_msgaddr0
);
2620 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
2621 if (!arcmsr_hbc_wait_msgint_ready(acb
)) {
2622 printk(KERN_NOTICE
"arcmsr%d: 'set command Q window' \
2623 timeout \n", acb
->host
->host_no
);
2632 static void arcmsr_wait_firmware_ready(struct AdapterControlBlock
*acb
)
2634 uint32_t firmware_state
= 0;
2635 switch (acb
->adapter_type
) {
2637 case ACB_ADAPTER_TYPE_A
: {
2638 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2640 firmware_state
= readl(®
->outbound_msgaddr1
);
2641 } while ((firmware_state
& ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK
) == 0);
2645 case ACB_ADAPTER_TYPE_B
: {
2646 struct MessageUnit_B
*reg
= acb
->pmuB
;
2648 firmware_state
= readl(reg
->iop2drv_doorbell
);
2649 } while ((firmware_state
& ARCMSR_MESSAGE_FIRMWARE_OK
) == 0);
2650 writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT
, reg
->drv2iop_doorbell
);
2653 case ACB_ADAPTER_TYPE_C
: {
2654 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
2656 firmware_state
= readl(®
->outbound_msgaddr1
);
2657 } while ((firmware_state
& ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK
) == 0);
2662 static void arcmsr_request_hba_device_map(struct AdapterControlBlock
*acb
)
2664 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2665 if (unlikely(atomic_read(&acb
->rq_map_token
) == 0) || ((acb
->acb_flags
& ACB_F_BUS_RESET
) != 0 ) || ((acb
->acb_flags
& ACB_F_ABORT
) != 0 )){
2668 acb
->fw_flag
= FW_NORMAL
;
2669 if (atomic_read(&acb
->ante_token_value
) == atomic_read(&acb
->rq_map_token
)){
2670 atomic_set(&acb
->rq_map_token
, 16);
2672 atomic_set(&acb
->ante_token_value
, atomic_read(&acb
->rq_map_token
));
2673 if (atomic_dec_and_test(&acb
->rq_map_token
))
2675 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
2676 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6 * HZ
));
2681 static void arcmsr_request_hbb_device_map(struct AdapterControlBlock
*acb
)
2683 struct MessageUnit_B __iomem
*reg
= acb
->pmuB
;
2684 if (unlikely(atomic_read(&acb
->rq_map_token
) == 0) || ((acb
->acb_flags
& ACB_F_BUS_RESET
) != 0 ) || ((acb
->acb_flags
& ACB_F_ABORT
) != 0 )){
2687 acb
->fw_flag
= FW_NORMAL
;
2688 if (atomic_read(&acb
->ante_token_value
) == atomic_read(&acb
->rq_map_token
)) {
2689 atomic_set(&acb
->rq_map_token
,16);
2691 atomic_set(&acb
->ante_token_value
, atomic_read(&acb
->rq_map_token
));
2692 if(atomic_dec_and_test(&acb
->rq_map_token
))
2694 writel(ARCMSR_MESSAGE_GET_CONFIG
, reg
->drv2iop_doorbell
);
2695 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6 * HZ
));
2700 static void arcmsr_request_hbc_device_map(struct AdapterControlBlock
*acb
)
2702 struct MessageUnit_C __iomem
*reg
= acb
->pmuC
;
2703 if (unlikely(atomic_read(&acb
->rq_map_token
) == 0) || ((acb
->acb_flags
& ACB_F_BUS_RESET
) != 0) || ((acb
->acb_flags
& ACB_F_ABORT
) != 0)) {
2706 acb
->fw_flag
= FW_NORMAL
;
2707 if (atomic_read(&acb
->ante_token_value
) == atomic_read(&acb
->rq_map_token
)) {
2708 atomic_set(&acb
->rq_map_token
, 16);
2710 atomic_set(&acb
->ante_token_value
, atomic_read(&acb
->rq_map_token
));
2711 if (atomic_dec_and_test(&acb
->rq_map_token
))
2713 writel(ARCMSR_INBOUND_MESG0_GET_CONFIG
, ®
->inbound_msgaddr0
);
2714 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, ®
->inbound_doorbell
);
2715 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6 * HZ
));
2720 static void arcmsr_request_device_map(unsigned long pacb
)
2722 struct AdapterControlBlock
*acb
= (struct AdapterControlBlock
*)pacb
;
2723 switch (acb
->adapter_type
) {
2724 case ACB_ADAPTER_TYPE_A
: {
2725 arcmsr_request_hba_device_map(acb
);
2728 case ACB_ADAPTER_TYPE_B
: {
2729 arcmsr_request_hbb_device_map(acb
);
2732 case ACB_ADAPTER_TYPE_C
: {
2733 arcmsr_request_hbc_device_map(acb
);
2738 static void arcmsr_start_hba_bgrb(struct AdapterControlBlock
*acb
)
2740 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2741 acb
->acb_flags
|= ACB_F_MSG_START_BGRB
;
2742 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, ®
->inbound_msgaddr0
);
2743 if (!arcmsr_hba_wait_msgint_ready(acb
)) {
2744 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
2745 rebulid' timeout \n", acb
->host
->host_no
);
2749 static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock
*acb
)
2751 struct MessageUnit_B
*reg
= acb
->pmuB
;
2752 acb
->acb_flags
|= ACB_F_MSG_START_BGRB
;
2753 writel(ARCMSR_MESSAGE_START_BGRB
, reg
->drv2iop_doorbell
);
2754 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
2755 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
2756 rebulid' timeout \n",acb
->host
->host_no
);
2760 static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock
*pACB
)
2762 struct MessageUnit_C
*phbcmu
= (struct MessageUnit_C
*)pACB
->pmuC
;
2763 pACB
->acb_flags
|= ACB_F_MSG_START_BGRB
;
2764 writel(ARCMSR_INBOUND_MESG0_START_BGRB
, &phbcmu
->inbound_msgaddr0
);
2765 writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE
, &phbcmu
->inbound_doorbell
);
2766 if (!arcmsr_hbc_wait_msgint_ready(pACB
)) {
2767 printk(KERN_NOTICE
"arcmsr%d: wait 'start adapter background \
2768 rebulid' timeout \n", pACB
->host
->host_no
);
2772 static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock
*acb
)
2774 switch (acb
->adapter_type
) {
2775 case ACB_ADAPTER_TYPE_A
:
2776 arcmsr_start_hba_bgrb(acb
);
2778 case ACB_ADAPTER_TYPE_B
:
2779 arcmsr_start_hbb_bgrb(acb
);
2781 case ACB_ADAPTER_TYPE_C
:
2782 arcmsr_start_hbc_bgrb(acb
);
2786 static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock
*acb
)
2788 switch (acb
->adapter_type
) {
2789 case ACB_ADAPTER_TYPE_A
: {
2790 struct MessageUnit_A __iomem
*reg
= acb
->pmuA
;
2791 uint32_t outbound_doorbell
;
2792 /* empty doorbell Qbuffer if door bell ringed */
2793 outbound_doorbell
= readl(®
->outbound_doorbell
);
2794 /*clear doorbell interrupt */
2795 writel(outbound_doorbell
, ®
->outbound_doorbell
);
2796 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
, ®
->inbound_doorbell
);
2800 case ACB_ADAPTER_TYPE_B
: {
2801 struct MessageUnit_B
*reg
= acb
->pmuB
;
2802 /*clear interrupt and message state*/
2803 writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN
, reg
->iop2drv_doorbell
);
2804 writel(ARCMSR_DRV2IOP_DATA_READ_OK
, reg
->drv2iop_doorbell
);
2805 /* let IOP know data has been read */
2808 case ACB_ADAPTER_TYPE_C
: {
2809 struct MessageUnit_C
*reg
= (struct MessageUnit_C
*)acb
->pmuC
;
2810 uint32_t outbound_doorbell
;
2811 /* empty doorbell Qbuffer if door bell ringed */
2812 outbound_doorbell
= readl(®
->outbound_doorbell
);
2813 writel(outbound_doorbell
, ®
->outbound_doorbell_clear
);
2814 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
, ®
->inbound_doorbell
);
2819 static void arcmsr_enable_eoi_mode(struct AdapterControlBlock
*acb
)
2821 switch (acb
->adapter_type
) {
2822 case ACB_ADAPTER_TYPE_A
:
2824 case ACB_ADAPTER_TYPE_B
:
2826 struct MessageUnit_B
*reg
= acb
->pmuB
;
2827 writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE
, reg
->drv2iop_doorbell
);
2828 if (!arcmsr_hbb_wait_msgint_ready(acb
)) {
2829 printk(KERN_NOTICE
"ARCMSR IOP enables EOI_MODE TIMEOUT");
2834 case ACB_ADAPTER_TYPE_C
:
2840 static void arcmsr_hardware_reset(struct AdapterControlBlock
*acb
)
2844 struct MessageUnit_A __iomem
*pmuA
= acb
->pmuA
;
2845 struct MessageUnit_C __iomem
*pmuC
= acb
->pmuC
;
2847 /* backup pci config data */
2848 printk(KERN_NOTICE
"arcmsr%d: executing hw bus reset .....\n", acb
->host
->host_no
);
2849 for (i
= 0; i
< 64; i
++) {
2850 pci_read_config_byte(acb
->pdev
, i
, &value
[i
]);
2852 /* hardware reset signal */
2853 if ((acb
->dev_id
== 0x1680)) {
2854 writel(ARCMSR_ARC1680_BUS_RESET
, &pmuA
->reserved1
[0]);
2855 } else if ((acb
->dev_id
== 0x1880)) {
2858 writel(0xF, &pmuC
->write_sequence
);
2859 writel(0x4, &pmuC
->write_sequence
);
2860 writel(0xB, &pmuC
->write_sequence
);
2861 writel(0x2, &pmuC
->write_sequence
);
2862 writel(0x7, &pmuC
->write_sequence
);
2863 writel(0xD, &pmuC
->write_sequence
);
2864 } while ((((temp
= readl(&pmuC
->host_diagnostic
)) | ARCMSR_ARC1880_DiagWrite_ENABLE
) == 0) && (count
< 5));
2865 writel(ARCMSR_ARC1880_RESET_ADAPTER
, &pmuC
->host_diagnostic
);
2867 pci_write_config_byte(acb
->pdev
, 0x84, 0x20);
2870 /* write back pci config data */
2871 for (i
= 0; i
< 64; i
++) {
2872 pci_write_config_byte(acb
->pdev
, i
, value
[i
]);
2877 static void arcmsr_iop_init(struct AdapterControlBlock
*acb
)
2879 uint32_t intmask_org
;
2880 /* disable all outbound interrupt */
2881 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2882 arcmsr_wait_firmware_ready(acb
);
2883 arcmsr_iop_confirm(acb
);
2884 /*start background rebuild*/
2885 arcmsr_start_adapter_bgrb(acb
);
2886 /* empty doorbell Qbuffer if door bell ringed */
2887 arcmsr_clear_doorbell_queue_buffer(acb
);
2888 arcmsr_enable_eoi_mode(acb
);
2889 /* enable outbound Post Queue,outbound doorbell Interrupt */
2890 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2891 acb
->acb_flags
|= ACB_F_IOP_INITED
;
2894 static uint8_t arcmsr_iop_reset(struct AdapterControlBlock
*acb
)
2896 struct CommandControlBlock
*ccb
;
2897 uint32_t intmask_org
;
2898 uint8_t rtnval
= 0x00;
2900 if (atomic_read(&acb
->ccboutstandingcount
) != 0) {
2901 /* disable all outbound interrupt */
2902 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2903 /* talk to iop 331 outstanding command aborted */
2904 rtnval
= arcmsr_abort_allcmd(acb
);
2905 /* clear all outbound posted Q */
2906 arcmsr_done4abort_postqueue(acb
);
2907 for (i
= 0; i
< ARCMSR_MAX_FREECCB_NUM
; i
++) {
2908 ccb
= acb
->pccb_pool
[i
];
2909 if (ccb
->startdone
== ARCMSR_CCB_START
) {
2910 arcmsr_ccb_complete(ccb
);
2913 atomic_set(&acb
->ccboutstandingcount
, 0);
2914 /* enable all outbound interrupt */
2915 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2921 static int arcmsr_bus_reset(struct scsi_cmnd
*cmd
)
2923 struct AdapterControlBlock
*acb
=
2924 (struct AdapterControlBlock
*)cmd
->device
->host
->hostdata
;
2925 uint32_t intmask_org
, outbound_doorbell
;
2926 int retry_count
= 0;
2928 acb
= (struct AdapterControlBlock
*) cmd
->device
->host
->hostdata
;
2929 printk(KERN_ERR
"arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb
->num_resets
, acb
->num_aborts
);
2932 switch(acb
->adapter_type
){
2933 case ACB_ADAPTER_TYPE_A
:{
2934 if (acb
->acb_flags
& ACB_F_BUS_RESET
){
2936 printk(KERN_ERR
"arcmsr: there is an bus reset eh proceeding.......\n");
2937 timeout
= wait_event_timeout(wait_q
, (acb
->acb_flags
& ACB_F_BUS_RESET
) == 0, 220*HZ
);
2942 acb
->acb_flags
|= ACB_F_BUS_RESET
;
2943 if (!arcmsr_iop_reset(acb
)) {
2944 struct MessageUnit_A __iomem
*reg
;
2946 arcmsr_hardware_reset(acb
);
2947 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
2949 arcmsr_sleep_for_bus_reset(cmd
);
2950 if ((readl(®
->outbound_msgaddr1
) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK
) == 0) {
2951 printk(KERN_ERR
"arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb
->host
->host_no
, retry_count
);
2952 if (retry_count
> retrycount
) {
2953 acb
->fw_flag
= FW_DEADLOCK
;
2954 printk(KERN_ERR
"arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb
->host
->host_no
);
2960 acb
->acb_flags
|= ACB_F_IOP_INITED
;
2961 /* disable all outbound interrupt */
2962 intmask_org
= arcmsr_disable_outbound_ints(acb
);
2963 arcmsr_get_firmware_spec(acb
);
2964 arcmsr_start_adapter_bgrb(acb
);
2965 /* clear Qbuffer if door bell ringed */
2966 outbound_doorbell
= readl(®
->outbound_doorbell
);
2967 writel(outbound_doorbell
, ®
->outbound_doorbell
); /*clear interrupt */
2968 writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK
, ®
->inbound_doorbell
);
2969 /* enable outbound Post Queue,outbound doorbell Interrupt */
2970 arcmsr_enable_outbound_ints(acb
, intmask_org
);
2971 atomic_set(&acb
->rq_map_token
, 16);
2972 atomic_set(&acb
->ante_token_value
, 16);
2973 acb
->fw_flag
= FW_NORMAL
;
2974 init_timer(&acb
->eternal_timer
);
2975 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6*HZ
);
2976 acb
->eternal_timer
.data
= (unsigned long) acb
;
2977 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
2978 add_timer(&acb
->eternal_timer
);
2979 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
2981 printk(KERN_ERR
"arcmsr: scsi bus reset eh returns with success\n");
2983 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
2984 if (atomic_read(&acb
->rq_map_token
) == 0) {
2985 atomic_set(&acb
->rq_map_token
, 16);
2986 atomic_set(&acb
->ante_token_value
, 16);
2987 acb
->fw_flag
= FW_NORMAL
;
2988 init_timer(&acb
->eternal_timer
);
2989 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6*HZ
);
2990 acb
->eternal_timer
.data
= (unsigned long) acb
;
2991 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
2992 add_timer(&acb
->eternal_timer
);
2994 atomic_set(&acb
->rq_map_token
, 16);
2995 atomic_set(&acb
->ante_token_value
, 16);
2996 acb
->fw_flag
= FW_NORMAL
;
2997 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6*HZ
));
3003 case ACB_ADAPTER_TYPE_B
:{
3004 acb
->acb_flags
|= ACB_F_BUS_RESET
;
3005 if (!arcmsr_iop_reset(acb
)) {
3006 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
3009 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
3010 if (atomic_read(&acb
->rq_map_token
) == 0) {
3011 atomic_set(&acb
->rq_map_token
, 16);
3012 atomic_set(&acb
->ante_token_value
, 16);
3013 acb
->fw_flag
= FW_NORMAL
;
3014 init_timer(&acb
->eternal_timer
);
3015 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6*HZ
);
3016 acb
->eternal_timer
.data
= (unsigned long) acb
;
3017 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
3018 add_timer(&acb
->eternal_timer
);
3020 atomic_set(&acb
->rq_map_token
, 16);
3021 atomic_set(&acb
->ante_token_value
, 16);
3022 acb
->fw_flag
= FW_NORMAL
;
3023 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6*HZ
));
3029 case ACB_ADAPTER_TYPE_C
:{
3030 if (acb
->acb_flags
& ACB_F_BUS_RESET
) {
3032 printk(KERN_ERR
"arcmsr: there is an bus reset eh proceeding.......\n");
3033 timeout
= wait_event_timeout(wait_q
, (acb
->acb_flags
& ACB_F_BUS_RESET
) == 0, 220*HZ
);
3038 acb
->acb_flags
|= ACB_F_BUS_RESET
;
3039 if (!arcmsr_iop_reset(acb
)) {
3040 struct MessageUnit_C __iomem
*reg
;
3042 arcmsr_hardware_reset(acb
);
3043 acb
->acb_flags
&= ~ACB_F_IOP_INITED
;
3045 arcmsr_sleep_for_bus_reset(cmd
);
3046 if ((readl(®
->host_diagnostic
) & 0x04) != 0) {
3047 printk(KERN_ERR
"arcmsr%d: waiting for hw bus reset return, retry=%d \n", acb
->host
->host_no
, retry_count
);
3048 if (retry_count
> retrycount
) {
3049 acb
->fw_flag
= FW_DEADLOCK
;
3050 printk(KERN_ERR
"arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!! \n", acb
->host
->host_no
);
3056 acb
->acb_flags
|= ACB_F_IOP_INITED
;
3057 /* disable all outbound interrupt */
3058 intmask_org
= arcmsr_disable_outbound_ints(acb
);
3059 arcmsr_get_firmware_spec(acb
);
3060 arcmsr_start_adapter_bgrb(acb
);
3061 /* clear Qbuffer if door bell ringed */
3062 outbound_doorbell
= readl(®
->outbound_doorbell
);
3063 writel(outbound_doorbell
, ®
->outbound_doorbell_clear
); /*clear interrupt */
3064 writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK
, ®
->inbound_doorbell
);
3065 /* enable outbound Post Queue,outbound doorbell Interrupt */
3066 arcmsr_enable_outbound_ints(acb
, intmask_org
);
3067 atomic_set(&acb
->rq_map_token
, 16);
3068 atomic_set(&acb
->ante_token_value
, 16);
3069 acb
->fw_flag
= FW_NORMAL
;
3070 init_timer(&acb
->eternal_timer
);
3071 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6 * HZ
);
3072 acb
->eternal_timer
.data
= (unsigned long) acb
;
3073 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
3074 add_timer(&acb
->eternal_timer
);
3075 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
3077 printk(KERN_ERR
"arcmsr: scsi bus reset eh returns with success\n");
3079 acb
->acb_flags
&= ~ACB_F_BUS_RESET
;
3080 if (atomic_read(&acb
->rq_map_token
) == 0) {
3081 atomic_set(&acb
->rq_map_token
, 16);
3082 atomic_set(&acb
->ante_token_value
, 16);
3083 acb
->fw_flag
= FW_NORMAL
;
3084 init_timer(&acb
->eternal_timer
);
3085 acb
->eternal_timer
.expires
= jiffies
+ msecs_to_jiffies(6*HZ
);
3086 acb
->eternal_timer
.data
= (unsigned long) acb
;
3087 acb
->eternal_timer
.function
= &arcmsr_request_device_map
;
3088 add_timer(&acb
->eternal_timer
);
3090 atomic_set(&acb
->rq_map_token
, 16);
3091 atomic_set(&acb
->ante_token_value
, 16);
3092 acb
->fw_flag
= FW_NORMAL
;
3093 mod_timer(&acb
->eternal_timer
, jiffies
+ msecs_to_jiffies(6*HZ
));
3103 static int arcmsr_abort_one_cmd(struct AdapterControlBlock
*acb
,
3104 struct CommandControlBlock
*ccb
)
3107 rtn
= arcmsr_polling_ccbdone(acb
, ccb
);
3111 static int arcmsr_abort(struct scsi_cmnd
*cmd
)
3113 struct AdapterControlBlock
*acb
=
3114 (struct AdapterControlBlock
*)cmd
->device
->host
->hostdata
;
3118 "arcmsr%d: abort device command of scsi id = %d lun = %d \n",
3119 acb
->host
->host_no
, cmd
->device
->id
, cmd
->device
->lun
);
3120 acb
->acb_flags
|= ACB_F_ABORT
;
3123 ************************************************
3124 ** the all interrupt service routine is locked
3125 ** we need to handle it as soon as possible and exit
3126 ************************************************
3128 if (!atomic_read(&acb
->ccboutstandingcount
))
3131 for (i
= 0; i
< ARCMSR_MAX_FREECCB_NUM
; i
++) {
3132 struct CommandControlBlock
*ccb
= acb
->pccb_pool
[i
];
3133 if (ccb
->startdone
== ARCMSR_CCB_START
&& ccb
->pcmd
== cmd
) {
3134 ccb
->startdone
= ARCMSR_CCB_ABORTED
;
3135 rtn
= arcmsr_abort_one_cmd(acb
, ccb
);
3139 acb
->acb_flags
&= ~ACB_F_ABORT
;
3143 static const char *arcmsr_info(struct Scsi_Host
*host
)
3145 struct AdapterControlBlock
*acb
=
3146 (struct AdapterControlBlock
*) host
->hostdata
;
3147 static char buf
[256];
3150 switch (acb
->pdev
->device
) {
3151 case PCI_DEVICE_ID_ARECA_1110
:
3152 case PCI_DEVICE_ID_ARECA_1200
:
3153 case PCI_DEVICE_ID_ARECA_1202
:
3154 case PCI_DEVICE_ID_ARECA_1210
:
3157 case PCI_DEVICE_ID_ARECA_1120
:
3158 case PCI_DEVICE_ID_ARECA_1130
:
3159 case PCI_DEVICE_ID_ARECA_1160
:
3160 case PCI_DEVICE_ID_ARECA_1170
:
3161 case PCI_DEVICE_ID_ARECA_1201
:
3162 case PCI_DEVICE_ID_ARECA_1220
:
3163 case PCI_DEVICE_ID_ARECA_1230
:
3164 case PCI_DEVICE_ID_ARECA_1260
:
3165 case PCI_DEVICE_ID_ARECA_1270
:
3166 case PCI_DEVICE_ID_ARECA_1280
:
3169 case PCI_DEVICE_ID_ARECA_1380
:
3170 case PCI_DEVICE_ID_ARECA_1381
:
3171 case PCI_DEVICE_ID_ARECA_1680
:
3172 case PCI_DEVICE_ID_ARECA_1681
:
3173 case PCI_DEVICE_ID_ARECA_1880
:
3180 sprintf(buf
, "Areca %s Host Adapter RAID Controller%s\n %s",
3181 type
, raid6
? "( RAID6 capable)" : "",
3182 ARCMSR_DRIVER_VERSION
);