2 * Aic94xx SAS/SATA driver sequencer interface.
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
7 * Parts of this code adapted from David Chaw's adp94xx_seq.c.
9 * This file is licensed under GPLv2.
11 * This file is part of the aic94xx driver.
13 * The aic94xx driver is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License as
15 * published by the Free Software Foundation; version 2 of the
18 * The aic94xx driver is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
21 * General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with the aic94xx driver; if not, write to the Free Software
25 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
29 #include <linux/delay.h>
30 #include <linux/pci.h>
31 #include <linux/module.h>
32 #include <linux/firmware.h>
33 #include "aic94xx_reg.h"
34 #include "aic94xx_hwi.h"
36 #include "aic94xx_seq.h"
37 #include "aic94xx_dump.h"
39 /* It takes no more than 0.05 us for an instruction
40 * to complete. So waiting for 1 us should be more than
44 #define PAUSE_TRIES 1000
46 static const struct firmware
*sequencer_fw
;
47 static u16 cseq_vecs
[CSEQ_NUM_VECS
], lseq_vecs
[LSEQ_NUM_VECS
], mode2_task
,
48 cseq_idle_loop
, lseq_idle_loop
;
49 static u8
*cseq_code
, *lseq_code
;
50 static u32 cseq_code_size
, lseq_code_size
;
52 static u16 first_scb_site_no
= 0xFFFF;
53 static u16 last_scb_site_no
;
55 /* ---------- Pause/Unpause CSEQ/LSEQ ---------- */
58 * asd_pause_cseq - pause the central sequencer
59 * @asd_ha: pointer to host adapter structure
61 * Return 0 on success, negative on failure.
63 int asd_pause_cseq(struct asd_ha_struct
*asd_ha
)
65 int count
= PAUSE_TRIES
;
68 arp2ctl
= asd_read_reg_dword(asd_ha
, CARP2CTL
);
72 asd_write_reg_dword(asd_ha
, CARP2CTL
, arp2ctl
| EPAUSE
);
74 arp2ctl
= asd_read_reg_dword(asd_ha
, CARP2CTL
);
78 } while (--count
> 0);
80 ASD_DPRINTK("couldn't pause CSEQ\n");
85 * asd_unpause_cseq - unpause the central sequencer.
86 * @asd_ha: pointer to host adapter structure.
88 * Return 0 on success, negative on error.
90 int asd_unpause_cseq(struct asd_ha_struct
*asd_ha
)
93 int count
= PAUSE_TRIES
;
95 arp2ctl
= asd_read_reg_dword(asd_ha
, CARP2CTL
);
96 if (!(arp2ctl
& PAUSED
))
99 asd_write_reg_dword(asd_ha
, CARP2CTL
, arp2ctl
& ~EPAUSE
);
101 arp2ctl
= asd_read_reg_dword(asd_ha
, CARP2CTL
);
102 if (!(arp2ctl
& PAUSED
))
105 } while (--count
> 0);
107 ASD_DPRINTK("couldn't unpause the CSEQ\n");
112 * asd_seq_pause_lseq - pause a link sequencer
113 * @asd_ha: pointer to a host adapter structure
114 * @lseq: link sequencer of interest
116 * Return 0 on success, negative on error.
118 static inline int asd_seq_pause_lseq(struct asd_ha_struct
*asd_ha
, int lseq
)
121 int count
= PAUSE_TRIES
;
123 arp2ctl
= asd_read_reg_dword(asd_ha
, LmARP2CTL(lseq
));
124 if (arp2ctl
& PAUSED
)
127 asd_write_reg_dword(asd_ha
, LmARP2CTL(lseq
), arp2ctl
| EPAUSE
);
129 arp2ctl
= asd_read_reg_dword(asd_ha
, LmARP2CTL(lseq
));
130 if (arp2ctl
& PAUSED
)
133 } while (--count
> 0);
135 ASD_DPRINTK("couldn't pause LSEQ %d\n", lseq
);
140 * asd_pause_lseq - pause the link sequencer(s)
141 * @asd_ha: pointer to host adapter structure
142 * @lseq_mask: mask of link sequencers of interest
144 * Return 0 on success, negative on failure.
146 int asd_pause_lseq(struct asd_ha_struct
*asd_ha
, u8 lseq_mask
)
151 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
152 err
= asd_seq_pause_lseq(asd_ha
, lseq
);
161 * asd_seq_unpause_lseq - unpause a link sequencer
162 * @asd_ha: pointer to host adapter structure
163 * @lseq: link sequencer of interest
165 * Return 0 on success, negative on error.
167 static inline int asd_seq_unpause_lseq(struct asd_ha_struct
*asd_ha
, int lseq
)
170 int count
= PAUSE_TRIES
;
172 arp2ctl
= asd_read_reg_dword(asd_ha
, LmARP2CTL(lseq
));
173 if (!(arp2ctl
& PAUSED
))
176 asd_write_reg_dword(asd_ha
, LmARP2CTL(lseq
), arp2ctl
& ~EPAUSE
);
178 arp2ctl
= asd_read_reg_dword(asd_ha
, LmARP2CTL(lseq
));
179 if (!(arp2ctl
& PAUSED
))
182 } while (--count
> 0);
184 ASD_DPRINTK("couldn't unpause LSEQ %d\n", lseq
);
190 * asd_unpause_lseq - unpause the link sequencer(s)
191 * @asd_ha: pointer to host adapter structure
192 * @lseq_mask: mask of link sequencers of interest
194 * Return 0 on success, negative on failure.
196 int asd_unpause_lseq(struct asd_ha_struct
*asd_ha
, u8 lseq_mask
)
201 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
202 err
= asd_seq_unpause_lseq(asd_ha
, lseq
);
210 /* ---------- Downloading CSEQ/LSEQ microcode ---------- */
212 static int asd_verify_cseq(struct asd_ha_struct
*asd_ha
, const u8
*_prog
,
215 u32 addr
= CSEQ_RAM_REG_BASE_ADR
;
216 const u32
*prog
= (u32
*) _prog
;
219 for (i
= 0; i
< size
; i
+= 4, prog
++, addr
+= 4) {
220 u32 val
= asd_read_reg_dword(asd_ha
, addr
);
222 if (le32_to_cpu(*prog
) != val
) {
223 asd_printk("%s: cseq verify failed at %u "
224 "read:0x%x, wanted:0x%x\n",
225 pci_name(asd_ha
->pcidev
),
226 i
, val
, le32_to_cpu(*prog
));
230 ASD_DPRINTK("verified %d bytes, passed\n", size
);
235 * asd_verify_lseq - verify the microcode of a link sequencer
236 * @asd_ha: pointer to host adapter structure
237 * @_prog: pointer to the microcode
238 * @size: size of the microcode in bytes
239 * @lseq: link sequencer of interest
241 * The link sequencer code is accessed in 4 KB pages, which are selected
242 * by setting LmRAMPAGE (bits 8 and 9) of the LmBISTCTL1 register.
243 * The 10 KB LSEQm instruction code is mapped, page at a time, at
246 static int asd_verify_lseq(struct asd_ha_struct
*asd_ha
, const u8
*_prog
,
249 #define LSEQ_CODEPAGE_SIZE 4096
250 int pages
= (size
+ LSEQ_CODEPAGE_SIZE
- 1) / LSEQ_CODEPAGE_SIZE
;
252 const u32
*prog
= (u32
*) _prog
;
254 for (page
= 0; page
< pages
; page
++) {
257 asd_write_reg_dword(asd_ha
, LmBISTCTL1(lseq
),
258 page
<< LmRAMPAGE_LSHIFT
);
259 for (i
= 0; size
> 0 && i
< LSEQ_CODEPAGE_SIZE
;
260 i
+= 4, prog
++, size
-=4) {
262 u32 val
= asd_read_reg_dword(asd_ha
, LmSEQRAM(lseq
)+i
);
264 if (le32_to_cpu(*prog
) != val
) {
265 asd_printk("%s: LSEQ%d verify failed "
266 "page:%d, offs:%d\n",
267 pci_name(asd_ha
->pcidev
),
273 ASD_DPRINTK("LSEQ%d verified %d bytes, passed\n", lseq
,
274 (int)((u8
*)prog
-_prog
));
279 * asd_verify_seq -- verify CSEQ/LSEQ microcode
280 * @asd_ha: pointer to host adapter structure
281 * @prog: pointer to microcode
282 * @size: size of the microcode
283 * @lseq_mask: if 0, verify CSEQ microcode, else mask of LSEQs of interest
285 * Return 0 if microcode is correct, negative on mismatch.
287 static int asd_verify_seq(struct asd_ha_struct
*asd_ha
, const u8
*prog
,
288 u32 size
, u8 lseq_mask
)
291 return asd_verify_cseq(asd_ha
, prog
, size
);
295 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
296 err
= asd_verify_lseq(asd_ha
, prog
, size
, lseq
);
304 #define ASD_DMA_MODE_DOWNLOAD
305 #ifdef ASD_DMA_MODE_DOWNLOAD
306 /* This is the size of the CSEQ Mapped instruction page */
307 #define MAX_DMA_OVLY_COUNT ((1U << 14)-1)
308 static int asd_download_seq(struct asd_ha_struct
*asd_ha
,
309 const u8
* const prog
, u32 size
, u8 lseq_mask
)
314 const int pages
= (size
+ MAX_DMA_OVLY_COUNT
- 1) / MAX_DMA_OVLY_COUNT
;
315 struct asd_dma_tok
*token
;
319 asd_printk("sequencer program not multiple of 4\n");
323 asd_pause_cseq(asd_ha
);
324 asd_pause_lseq(asd_ha
, 0xFF);
326 /* save, disable and clear interrupts */
327 comstaten
= asd_read_reg_dword(asd_ha
, COMSTATEN
);
328 asd_write_reg_dword(asd_ha
, COMSTATEN
, 0);
329 asd_write_reg_dword(asd_ha
, COMSTAT
, COMSTAT_MASK
);
331 asd_write_reg_dword(asd_ha
, CHIMINTEN
, RST_CHIMINTEN
);
332 asd_write_reg_dword(asd_ha
, CHIMINT
, CHIMINT_MASK
);
334 token
= asd_alloc_coherent(asd_ha
, MAX_DMA_OVLY_COUNT
, GFP_KERNEL
);
336 asd_printk("out of memory for dma SEQ download\n");
340 ASD_DPRINTK("dma-ing %d bytes\n", size
);
342 for (page
= 0; page
< pages
; page
++) {
344 u32 left
= min(size
-page
*MAX_DMA_OVLY_COUNT
,
345 (u32
)MAX_DMA_OVLY_COUNT
);
347 memcpy(token
->vaddr
, prog
+ page
*MAX_DMA_OVLY_COUNT
, left
);
348 asd_write_reg_addr(asd_ha
, OVLYDMAADR
, token
->dma_handle
);
349 asd_write_reg_dword(asd_ha
, OVLYDMACNT
, left
);
350 reg
= !page
? RESETOVLYDMA
: 0;
351 reg
|= (STARTOVLYDMA
| OVLYHALTERR
);
352 reg
|= (lseq_mask
? (((u32
)lseq_mask
) << 8) : OVLYCSEQ
);
354 asd_write_reg_dword(asd_ha
, OVLYDMACTL
, reg
);
356 for (i
= PAUSE_TRIES
*100; i
> 0; i
--) {
357 u32 dmadone
= asd_read_reg_dword(asd_ha
, OVLYDMACTL
);
358 if (!(dmadone
& OVLYDMAACT
))
364 reg
= asd_read_reg_dword(asd_ha
, COMSTAT
);
365 if (!(reg
& OVLYDMADONE
) || (reg
& OVLYERR
)
366 || (asd_read_reg_dword(asd_ha
, CHIMINT
) & DEVEXCEPT_MASK
)){
367 asd_printk("%s: error DMA-ing sequencer code\n",
368 pci_name(asd_ha
->pcidev
));
372 asd_free_coherent(asd_ha
, token
);
374 asd_write_reg_dword(asd_ha
, COMSTATEN
, comstaten
);
376 return err
? : asd_verify_seq(asd_ha
, prog
, size
, lseq_mask
);
378 #else /* ASD_DMA_MODE_DOWNLOAD */
379 static int asd_download_seq(struct asd_ha_struct
*asd_ha
, const u8
*_prog
,
380 u32 size
, u8 lseq_mask
)
384 const u32
*prog
= (u32
*) _prog
;
387 asd_printk("sequencer program not multiple of 4\n");
391 asd_pause_cseq(asd_ha
);
392 asd_pause_lseq(asd_ha
, 0xFF);
394 reg
|= (lseq_mask
? (((u32
)lseq_mask
) << 8) : OVLYCSEQ
);
397 asd_write_reg_dword(asd_ha
, OVLYDMACNT
, size
);
398 asd_write_reg_dword(asd_ha
, OVLYDMACTL
, reg
);
400 ASD_DPRINTK("downloading %s sequencer%s in PIO mode...\n",
401 lseq_mask
? "LSEQ" : "CSEQ", lseq_mask
? "s" : "");
403 for (i
= 0; i
< size
; i
+= 4, prog
++)
404 asd_write_reg_dword(asd_ha
, SPIODATA
, *prog
);
406 reg
= (reg
& ~PIOCMODE
) | OVLYHALTERR
;
407 asd_write_reg_dword(asd_ha
, OVLYDMACTL
, reg
);
409 return asd_verify_seq(asd_ha
, _prog
, size
, lseq_mask
);
411 #endif /* ASD_DMA_MODE_DOWNLOAD */
414 * asd_seq_download_seqs - download the sequencer microcode
415 * @asd_ha: pointer to host adapter structure
417 * Download the central and link sequencer microcode.
419 static int asd_seq_download_seqs(struct asd_ha_struct
*asd_ha
)
423 if (!asd_ha
->hw_prof
.enabled_phys
) {
424 asd_printk("%s: no enabled phys!\n", pci_name(asd_ha
->pcidev
));
428 /* Download the CSEQ */
429 ASD_DPRINTK("downloading CSEQ...\n");
430 err
= asd_download_seq(asd_ha
, cseq_code
, cseq_code_size
, 0);
432 asd_printk("CSEQ download failed:%d\n", err
);
436 /* Download the Link Sequencers code. All of the Link Sequencers
437 * microcode can be downloaded at the same time.
439 ASD_DPRINTK("downloading LSEQs...\n");
440 err
= asd_download_seq(asd_ha
, lseq_code
, lseq_code_size
,
441 asd_ha
->hw_prof
.enabled_phys
);
443 /* Try it one at a time */
445 u8 lseq_mask
= asd_ha
->hw_prof
.enabled_phys
;
447 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
448 err
= asd_download_seq(asd_ha
, lseq_code
,
449 lseq_code_size
, 1<<lseq
);
455 asd_printk("LSEQs download failed:%d\n", err
);
460 /* ---------- Initializing the chip, chip memory, etc. ---------- */
463 * asd_init_cseq_mip - initialize CSEQ mode independent pages 4-7
464 * @asd_ha: pointer to host adapter structure
466 static void asd_init_cseq_mip(struct asd_ha_struct
*asd_ha
)
468 /* CSEQ Mode Independent, page 4 setup. */
469 asd_write_reg_word(asd_ha
, CSEQ_Q_EXE_HEAD
, 0xFFFF);
470 asd_write_reg_word(asd_ha
, CSEQ_Q_EXE_TAIL
, 0xFFFF);
471 asd_write_reg_word(asd_ha
, CSEQ_Q_DONE_HEAD
, 0xFFFF);
472 asd_write_reg_word(asd_ha
, CSEQ_Q_DONE_TAIL
, 0xFFFF);
473 asd_write_reg_word(asd_ha
, CSEQ_Q_SEND_HEAD
, 0xFFFF);
474 asd_write_reg_word(asd_ha
, CSEQ_Q_SEND_TAIL
, 0xFFFF);
475 asd_write_reg_word(asd_ha
, CSEQ_Q_DMA2CHIM_HEAD
, 0xFFFF);
476 asd_write_reg_word(asd_ha
, CSEQ_Q_DMA2CHIM_TAIL
, 0xFFFF);
477 asd_write_reg_word(asd_ha
, CSEQ_Q_COPY_HEAD
, 0xFFFF);
478 asd_write_reg_word(asd_ha
, CSEQ_Q_COPY_TAIL
, 0xFFFF);
479 asd_write_reg_word(asd_ha
, CSEQ_REG0
, 0);
480 asd_write_reg_word(asd_ha
, CSEQ_REG1
, 0);
481 asd_write_reg_dword(asd_ha
, CSEQ_REG2
, 0);
482 asd_write_reg_byte(asd_ha
, CSEQ_LINK_CTL_Q_MAP
, 0);
484 u8 con
= asd_read_reg_byte(asd_ha
, CCONEXIST
);
485 u8 val
= hweight8(con
);
486 asd_write_reg_byte(asd_ha
, CSEQ_MAX_CSEQ_MODE
, (val
<<4)|val
);
488 asd_write_reg_word(asd_ha
, CSEQ_FREE_LIST_HACK_COUNT
, 0);
490 /* CSEQ Mode independent, page 5 setup. */
491 asd_write_reg_dword(asd_ha
, CSEQ_EST_NEXUS_REQ_QUEUE
, 0);
492 asd_write_reg_dword(asd_ha
, CSEQ_EST_NEXUS_REQ_QUEUE
+4, 0);
493 asd_write_reg_dword(asd_ha
, CSEQ_EST_NEXUS_REQ_COUNT
, 0);
494 asd_write_reg_dword(asd_ha
, CSEQ_EST_NEXUS_REQ_COUNT
+4, 0);
495 asd_write_reg_word(asd_ha
, CSEQ_Q_EST_NEXUS_HEAD
, 0xFFFF);
496 asd_write_reg_word(asd_ha
, CSEQ_Q_EST_NEXUS_TAIL
, 0xFFFF);
497 asd_write_reg_word(asd_ha
, CSEQ_NEED_EST_NEXUS_SCB
, 0);
498 asd_write_reg_byte(asd_ha
, CSEQ_EST_NEXUS_REQ_HEAD
, 0);
499 asd_write_reg_byte(asd_ha
, CSEQ_EST_NEXUS_REQ_TAIL
, 0);
500 asd_write_reg_byte(asd_ha
, CSEQ_EST_NEXUS_SCB_OFFSET
, 0);
502 /* CSEQ Mode independent, page 6 setup. */
503 asd_write_reg_word(asd_ha
, CSEQ_INT_ROUT_RET_ADDR0
, 0);
504 asd_write_reg_word(asd_ha
, CSEQ_INT_ROUT_RET_ADDR1
, 0);
505 asd_write_reg_word(asd_ha
, CSEQ_INT_ROUT_SCBPTR
, 0);
506 asd_write_reg_byte(asd_ha
, CSEQ_INT_ROUT_MODE
, 0);
507 asd_write_reg_byte(asd_ha
, CSEQ_ISR_SCRATCH_FLAGS
, 0);
508 asd_write_reg_word(asd_ha
, CSEQ_ISR_SAVE_SINDEX
, 0);
509 asd_write_reg_word(asd_ha
, CSEQ_ISR_SAVE_DINDEX
, 0);
510 asd_write_reg_word(asd_ha
, CSEQ_Q_MONIRTT_HEAD
, 0xFFFF);
511 asd_write_reg_word(asd_ha
, CSEQ_Q_MONIRTT_TAIL
, 0xFFFF);
512 /* Calculate the free scb mask. */
514 u16 cmdctx
= asd_get_cmdctx_size(asd_ha
);
515 cmdctx
= (~((cmdctx
/128)-1)) >> 8;
516 asd_write_reg_byte(asd_ha
, CSEQ_FREE_SCB_MASK
, (u8
)cmdctx
);
518 asd_write_reg_word(asd_ha
, CSEQ_BUILTIN_FREE_SCB_HEAD
,
520 asd_write_reg_word(asd_ha
, CSEQ_BUILTIN_FREE_SCB_TAIL
,
522 asd_write_reg_word(asd_ha
, CSEQ_EXTENDED_FREE_SCB_HEAD
, 0xFFFF);
523 asd_write_reg_word(asd_ha
, CSEQ_EXTENDED_FREE_SCB_TAIL
, 0xFFFF);
525 /* CSEQ Mode independent, page 7 setup. */
526 asd_write_reg_dword(asd_ha
, CSEQ_EMPTY_REQ_QUEUE
, 0);
527 asd_write_reg_dword(asd_ha
, CSEQ_EMPTY_REQ_QUEUE
+4, 0);
528 asd_write_reg_dword(asd_ha
, CSEQ_EMPTY_REQ_COUNT
, 0);
529 asd_write_reg_dword(asd_ha
, CSEQ_EMPTY_REQ_COUNT
+4, 0);
530 asd_write_reg_word(asd_ha
, CSEQ_Q_EMPTY_HEAD
, 0xFFFF);
531 asd_write_reg_word(asd_ha
, CSEQ_Q_EMPTY_TAIL
, 0xFFFF);
532 asd_write_reg_word(asd_ha
, CSEQ_NEED_EMPTY_SCB
, 0);
533 asd_write_reg_byte(asd_ha
, CSEQ_EMPTY_REQ_HEAD
, 0);
534 asd_write_reg_byte(asd_ha
, CSEQ_EMPTY_REQ_TAIL
, 0);
535 asd_write_reg_byte(asd_ha
, CSEQ_EMPTY_SCB_OFFSET
, 0);
536 asd_write_reg_word(asd_ha
, CSEQ_PRIMITIVE_DATA
, 0);
537 asd_write_reg_dword(asd_ha
, CSEQ_TIMEOUT_CONST
, 0);
541 * asd_init_cseq_mdp - initialize CSEQ Mode dependent pages
542 * @asd_ha: pointer to host adapter structure
544 static void asd_init_cseq_mdp(struct asd_ha_struct
*asd_ha
)
549 moffs
= CSEQ_PAGE_SIZE
* 2;
551 /* CSEQ Mode dependent, modes 0-7, page 0 setup. */
552 for (i
= 0; i
< 8; i
++) {
553 asd_write_reg_word(asd_ha
, i
*moffs
+CSEQ_LRM_SAVE_SINDEX
, 0);
554 asd_write_reg_word(asd_ha
, i
*moffs
+CSEQ_LRM_SAVE_SCBPTR
, 0);
555 asd_write_reg_word(asd_ha
, i
*moffs
+CSEQ_Q_LINK_HEAD
, 0xFFFF);
556 asd_write_reg_word(asd_ha
, i
*moffs
+CSEQ_Q_LINK_TAIL
, 0xFFFF);
557 asd_write_reg_byte(asd_ha
, i
*moffs
+CSEQ_LRM_SAVE_SCRPAGE
, 0);
560 /* CSEQ Mode dependent, mode 0-7, page 1 and 2 shall be ignored. */
562 /* CSEQ Mode dependent, mode 8, page 0 setup. */
563 asd_write_reg_word(asd_ha
, CSEQ_RET_ADDR
, 0xFFFF);
564 asd_write_reg_word(asd_ha
, CSEQ_RET_SCBPTR
, 0);
565 asd_write_reg_word(asd_ha
, CSEQ_SAVE_SCBPTR
, 0);
566 asd_write_reg_word(asd_ha
, CSEQ_EMPTY_TRANS_CTX
, 0);
567 asd_write_reg_word(asd_ha
, CSEQ_RESP_LEN
, 0);
568 asd_write_reg_word(asd_ha
, CSEQ_TMF_SCBPTR
, 0);
569 asd_write_reg_word(asd_ha
, CSEQ_GLOBAL_PREV_SCB
, 0);
570 asd_write_reg_word(asd_ha
, CSEQ_GLOBAL_HEAD
, 0);
571 asd_write_reg_word(asd_ha
, CSEQ_CLEAR_LU_HEAD
, 0);
572 asd_write_reg_byte(asd_ha
, CSEQ_TMF_OPCODE
, 0);
573 asd_write_reg_byte(asd_ha
, CSEQ_SCRATCH_FLAGS
, 0);
574 asd_write_reg_word(asd_ha
, CSEQ_HSB_SITE
, 0);
575 asd_write_reg_word(asd_ha
, CSEQ_FIRST_INV_SCB_SITE
,
576 (u16
)last_scb_site_no
+1);
577 asd_write_reg_word(asd_ha
, CSEQ_FIRST_INV_DDB_SITE
,
578 (u16
)asd_ha
->hw_prof
.max_ddbs
);
580 /* CSEQ Mode dependent, mode 8, page 1 setup. */
581 asd_write_reg_dword(asd_ha
, CSEQ_LUN_TO_CLEAR
, 0);
582 asd_write_reg_dword(asd_ha
, CSEQ_LUN_TO_CLEAR
+ 4, 0);
583 asd_write_reg_dword(asd_ha
, CSEQ_LUN_TO_CHECK
, 0);
584 asd_write_reg_dword(asd_ha
, CSEQ_LUN_TO_CHECK
+ 4, 0);
586 /* CSEQ Mode dependent, mode 8, page 2 setup. */
587 /* Tell the sequencer the bus address of the first SCB. */
588 asd_write_reg_addr(asd_ha
, CSEQ_HQ_NEW_POINTER
,
589 asd_ha
->seq
.next_scb
.dma_handle
);
590 ASD_DPRINTK("First SCB dma_handle: 0x%llx\n",
591 (unsigned long long)asd_ha
->seq
.next_scb
.dma_handle
);
593 /* Tell the sequencer the first Done List entry address. */
594 asd_write_reg_addr(asd_ha
, CSEQ_HQ_DONE_BASE
,
595 asd_ha
->seq
.actual_dl
->dma_handle
);
597 /* Initialize the Q_DONE_POINTER with the least significant
598 * 4 bytes of the first Done List address. */
599 asd_write_reg_dword(asd_ha
, CSEQ_HQ_DONE_POINTER
,
600 ASD_BUSADDR_LO(asd_ha
->seq
.actual_dl
->dma_handle
));
602 asd_write_reg_byte(asd_ha
, CSEQ_HQ_DONE_PASS
, ASD_DEF_DL_TOGGLE
);
604 /* CSEQ Mode dependent, mode 8, page 3 shall be ignored. */
608 * asd_init_cseq_scratch -- setup and init CSEQ
609 * @asd_ha: pointer to host adapter structure
611 * Setup and initialize Central sequencers. Initialiaze the mode
612 * independent and dependent scratch page to the default settings.
614 static void asd_init_cseq_scratch(struct asd_ha_struct
*asd_ha
)
616 asd_init_cseq_mip(asd_ha
);
617 asd_init_cseq_mdp(asd_ha
);
621 * asd_init_lseq_mip -- initialize LSEQ Mode independent pages 0-3
622 * @asd_ha: pointer to host adapter structure
624 static void asd_init_lseq_mip(struct asd_ha_struct
*asd_ha
, u8 lseq
)
628 /* LSEQ Mode independent page 0 setup. */
629 asd_write_reg_word(asd_ha
, LmSEQ_Q_TGTXFR_HEAD(lseq
), 0xFFFF);
630 asd_write_reg_word(asd_ha
, LmSEQ_Q_TGTXFR_TAIL(lseq
), 0xFFFF);
631 asd_write_reg_byte(asd_ha
, LmSEQ_LINK_NUMBER(lseq
), lseq
);
632 asd_write_reg_byte(asd_ha
, LmSEQ_SCRATCH_FLAGS(lseq
),
633 ASD_NOTIFY_ENABLE_SPINUP
);
634 asd_write_reg_dword(asd_ha
, LmSEQ_CONNECTION_STATE(lseq
),0x08000000);
635 asd_write_reg_word(asd_ha
, LmSEQ_CONCTL(lseq
), 0);
636 asd_write_reg_byte(asd_ha
, LmSEQ_CONSTAT(lseq
), 0);
637 asd_write_reg_byte(asd_ha
, LmSEQ_CONNECTION_MODES(lseq
), 0);
638 asd_write_reg_word(asd_ha
, LmSEQ_REG1_ISR(lseq
), 0);
639 asd_write_reg_word(asd_ha
, LmSEQ_REG2_ISR(lseq
), 0);
640 asd_write_reg_word(asd_ha
, LmSEQ_REG3_ISR(lseq
), 0);
641 asd_write_reg_dword(asd_ha
, LmSEQ_REG0_ISR(lseq
), 0);
642 asd_write_reg_dword(asd_ha
, LmSEQ_REG0_ISR(lseq
)+4, 0);
644 /* LSEQ Mode independent page 1 setup. */
645 asd_write_reg_word(asd_ha
, LmSEQ_EST_NEXUS_SCBPTR0(lseq
), 0xFFFF);
646 asd_write_reg_word(asd_ha
, LmSEQ_EST_NEXUS_SCBPTR1(lseq
), 0xFFFF);
647 asd_write_reg_word(asd_ha
, LmSEQ_EST_NEXUS_SCBPTR2(lseq
), 0xFFFF);
648 asd_write_reg_word(asd_ha
, LmSEQ_EST_NEXUS_SCBPTR3(lseq
), 0xFFFF);
649 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_OPCODE0(lseq
), 0);
650 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_OPCODE1(lseq
), 0);
651 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_OPCODE2(lseq
), 0);
652 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_OPCODE3(lseq
), 0);
653 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_HEAD(lseq
), 0);
654 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_SCB_TAIL(lseq
), 0);
655 asd_write_reg_byte(asd_ha
, LmSEQ_EST_NEXUS_BUF_AVAIL(lseq
), 0);
656 asd_write_reg_dword(asd_ha
, LmSEQ_TIMEOUT_CONST(lseq
), 0);
657 asd_write_reg_word(asd_ha
, LmSEQ_ISR_SAVE_SINDEX(lseq
), 0);
658 asd_write_reg_word(asd_ha
, LmSEQ_ISR_SAVE_DINDEX(lseq
), 0);
660 /* LSEQ Mode Independent page 2 setup. */
661 asd_write_reg_word(asd_ha
, LmSEQ_EMPTY_SCB_PTR0(lseq
), 0xFFFF);
662 asd_write_reg_word(asd_ha
, LmSEQ_EMPTY_SCB_PTR1(lseq
), 0xFFFF);
663 asd_write_reg_word(asd_ha
, LmSEQ_EMPTY_SCB_PTR2(lseq
), 0xFFFF);
664 asd_write_reg_word(asd_ha
, LmSEQ_EMPTY_SCB_PTR3(lseq
), 0xFFFF);
665 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_OPCD0(lseq
), 0);
666 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_OPCD1(lseq
), 0);
667 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_OPCD2(lseq
), 0);
668 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_OPCD3(lseq
), 0);
669 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_HEAD(lseq
), 0);
670 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_SCB_TAIL(lseq
), 0);
671 asd_write_reg_byte(asd_ha
, LmSEQ_EMPTY_BUFS_AVAIL(lseq
), 0);
672 for (i
= 0; i
< 12; i
+= 4)
673 asd_write_reg_dword(asd_ha
, LmSEQ_ATA_SCR_REGS(lseq
) + i
, 0);
675 /* LSEQ Mode Independent page 3 setup. */
677 /* Device present timer timeout */
678 asd_write_reg_dword(asd_ha
, LmSEQ_DEV_PRES_TMR_TOUT_CONST(lseq
),
679 ASD_DEV_PRESENT_TIMEOUT
);
681 /* SATA interlock timer disabled */
682 asd_write_reg_dword(asd_ha
, LmSEQ_SATA_INTERLOCK_TIMEOUT(lseq
),
683 ASD_SATA_INTERLOCK_TIMEOUT
);
685 /* STP shutdown timer timeout constant, IGNORED by the sequencer,
687 asd_write_reg_dword(asd_ha
, LmSEQ_STP_SHUTDOWN_TIMEOUT(lseq
),
688 ASD_STP_SHUTDOWN_TIMEOUT
);
690 asd_write_reg_dword(asd_ha
, LmSEQ_SRST_ASSERT_TIMEOUT(lseq
),
691 ASD_SRST_ASSERT_TIMEOUT
);
693 asd_write_reg_dword(asd_ha
, LmSEQ_RCV_FIS_TIMEOUT(lseq
),
694 ASD_RCV_FIS_TIMEOUT
);
696 asd_write_reg_dword(asd_ha
, LmSEQ_ONE_MILLISEC_TIMEOUT(lseq
),
697 ASD_ONE_MILLISEC_TIMEOUT
);
700 asd_write_reg_dword(asd_ha
, LmSEQ_TEN_MS_COMINIT_TIMEOUT(lseq
),
701 ASD_TEN_MILLISEC_TIMEOUT
);
703 asd_write_reg_dword(asd_ha
, LmSEQ_SMP_RCV_TIMEOUT(lseq
),
704 ASD_SMP_RCV_TIMEOUT
);
708 * asd_init_lseq_mdp -- initialize LSEQ mode dependent pages.
709 * @asd_ha: pointer to host adapter structure
711 static void asd_init_lseq_mdp(struct asd_ha_struct
*asd_ha
, int lseq
)
718 mode2_task
, /* mode 2 */
720 0xFFFF, /* mode 4/5 */
721 0xFFFF, /* mode 4/5 */
725 * Mode 0,1,2 and 4/5 have common field on page 0 for the first
728 for (i
= 0; i
< 3; i
++) {
729 moffs
= i
* LSEQ_MODE_SCRATCH_SIZE
;
730 asd_write_reg_word(asd_ha
, LmSEQ_RET_ADDR(lseq
)+moffs
,
732 asd_write_reg_word(asd_ha
, LmSEQ_REG0_MODE(lseq
)+moffs
, 0);
733 asd_write_reg_word(asd_ha
, LmSEQ_MODE_FLAGS(lseq
)+moffs
, 0);
734 asd_write_reg_word(asd_ha
, LmSEQ_RET_ADDR2(lseq
)+moffs
,0xFFFF);
735 asd_write_reg_word(asd_ha
, LmSEQ_RET_ADDR1(lseq
)+moffs
,0xFFFF);
736 asd_write_reg_byte(asd_ha
, LmSEQ_OPCODE_TO_CSEQ(lseq
)+moffs
,0);
737 asd_write_reg_word(asd_ha
, LmSEQ_DATA_TO_CSEQ(lseq
)+moffs
,0);
740 * Mode 5 page 0 overlaps the same scratch page with Mode 0 page 3.
742 asd_write_reg_word(asd_ha
,
743 LmSEQ_RET_ADDR(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
,
745 asd_write_reg_word(asd_ha
,
746 LmSEQ_REG0_MODE(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
,0);
747 asd_write_reg_word(asd_ha
,
748 LmSEQ_MODE_FLAGS(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
, 0);
749 asd_write_reg_word(asd_ha
,
750 LmSEQ_RET_ADDR2(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
,0xFFFF);
751 asd_write_reg_word(asd_ha
,
752 LmSEQ_RET_ADDR1(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
,0xFFFF);
753 asd_write_reg_byte(asd_ha
,
754 LmSEQ_OPCODE_TO_CSEQ(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
,0);
755 asd_write_reg_word(asd_ha
,
756 LmSEQ_DATA_TO_CSEQ(lseq
)+LSEQ_MODE5_PAGE0_OFFSET
, 0);
758 /* LSEQ Mode dependent 0, page 0 setup. */
759 asd_write_reg_word(asd_ha
, LmSEQ_FIRST_INV_DDB_SITE(lseq
),
760 (u16
)asd_ha
->hw_prof
.max_ddbs
);
761 asd_write_reg_word(asd_ha
, LmSEQ_EMPTY_TRANS_CTX(lseq
), 0);
762 asd_write_reg_word(asd_ha
, LmSEQ_RESP_LEN(lseq
), 0);
763 asd_write_reg_word(asd_ha
, LmSEQ_FIRST_INV_SCB_SITE(lseq
),
764 (u16
)last_scb_site_no
+1);
765 asd_write_reg_word(asd_ha
, LmSEQ_INTEN_SAVE(lseq
),
766 (u16
) ((LmM0INTEN_MASK
& 0xFFFF0000) >> 16));
767 asd_write_reg_word(asd_ha
, LmSEQ_INTEN_SAVE(lseq
) + 2,
768 (u16
) LmM0INTEN_MASK
& 0xFFFF);
769 asd_write_reg_byte(asd_ha
, LmSEQ_LINK_RST_FRM_LEN(lseq
), 0);
770 asd_write_reg_byte(asd_ha
, LmSEQ_LINK_RST_PROTOCOL(lseq
), 0);
771 asd_write_reg_byte(asd_ha
, LmSEQ_RESP_STATUS(lseq
), 0);
772 asd_write_reg_byte(asd_ha
, LmSEQ_LAST_LOADED_SGE(lseq
), 0);
773 asd_write_reg_word(asd_ha
, LmSEQ_SAVE_SCBPTR(lseq
), 0);
775 /* LSEQ mode dependent, mode 1, page 0 setup. */
776 asd_write_reg_word(asd_ha
, LmSEQ_Q_XMIT_HEAD(lseq
), 0xFFFF);
777 asd_write_reg_word(asd_ha
, LmSEQ_M1_EMPTY_TRANS_CTX(lseq
), 0);
778 asd_write_reg_word(asd_ha
, LmSEQ_INI_CONN_TAG(lseq
), 0);
779 asd_write_reg_byte(asd_ha
, LmSEQ_FAILED_OPEN_STATUS(lseq
), 0);
780 asd_write_reg_byte(asd_ha
, LmSEQ_XMIT_REQUEST_TYPE(lseq
), 0);
781 asd_write_reg_byte(asd_ha
, LmSEQ_M1_RESP_STATUS(lseq
), 0);
782 asd_write_reg_byte(asd_ha
, LmSEQ_M1_LAST_LOADED_SGE(lseq
), 0);
783 asd_write_reg_word(asd_ha
, LmSEQ_M1_SAVE_SCBPTR(lseq
), 0);
785 /* LSEQ Mode dependent mode 2, page 0 setup */
786 asd_write_reg_word(asd_ha
, LmSEQ_PORT_COUNTER(lseq
), 0);
787 asd_write_reg_word(asd_ha
, LmSEQ_PM_TABLE_PTR(lseq
), 0);
788 asd_write_reg_word(asd_ha
, LmSEQ_SATA_INTERLOCK_TMR_SAVE(lseq
), 0);
789 asd_write_reg_word(asd_ha
, LmSEQ_IP_BITL(lseq
), 0);
790 asd_write_reg_word(asd_ha
, LmSEQ_COPY_SMP_CONN_TAG(lseq
), 0);
791 asd_write_reg_byte(asd_ha
, LmSEQ_P0M2_OFFS1AH(lseq
), 0);
793 /* LSEQ Mode dependent, mode 4/5, page 0 setup. */
794 asd_write_reg_byte(asd_ha
, LmSEQ_SAVED_OOB_STATUS(lseq
), 0);
795 asd_write_reg_byte(asd_ha
, LmSEQ_SAVED_OOB_MODE(lseq
), 0);
796 asd_write_reg_word(asd_ha
, LmSEQ_Q_LINK_HEAD(lseq
), 0xFFFF);
797 asd_write_reg_byte(asd_ha
, LmSEQ_LINK_RST_ERR(lseq
), 0);
798 asd_write_reg_byte(asd_ha
, LmSEQ_SAVED_OOB_SIGNALS(lseq
), 0);
799 asd_write_reg_byte(asd_ha
, LmSEQ_SAS_RESET_MODE(lseq
), 0);
800 asd_write_reg_byte(asd_ha
, LmSEQ_LINK_RESET_RETRY_COUNT(lseq
), 0);
801 asd_write_reg_byte(asd_ha
, LmSEQ_NUM_LINK_RESET_RETRIES(lseq
), 0);
802 asd_write_reg_word(asd_ha
, LmSEQ_OOB_INT_ENABLES(lseq
), 0);
804 * Set the desired interval between transmissions of the NOTIFY
805 * (ENABLE SPINUP) primitive. Must be initilized to val - 1.
807 asd_write_reg_word(asd_ha
, LmSEQ_NOTIFY_TIMER_TIMEOUT(lseq
),
808 ASD_NOTIFY_TIMEOUT
- 1);
809 /* No delay for the first NOTIFY to be sent to the attached target. */
810 asd_write_reg_word(asd_ha
, LmSEQ_NOTIFY_TIMER_DOWN_COUNT(lseq
),
811 ASD_NOTIFY_DOWN_COUNT
);
812 asd_write_reg_word(asd_ha
, LmSEQ_NOTIFY_TIMER_INITIAL_COUNT(lseq
),
813 ASD_NOTIFY_DOWN_COUNT
);
815 /* LSEQ Mode dependent, mode 0 and 1, page 1 setup. */
816 for (i
= 0; i
< 2; i
++) {
818 /* Start from Page 1 of Mode 0 and 1. */
819 moffs
= LSEQ_PAGE_SIZE
+ i
*LSEQ_MODE_SCRATCH_SIZE
;
820 /* All the fields of page 1 can be intialized to 0. */
821 for (j
= 0; j
< LSEQ_PAGE_SIZE
; j
+= 4)
822 asd_write_reg_dword(asd_ha
, LmSCRATCH(lseq
)+moffs
+j
,0);
825 /* LSEQ Mode dependent, mode 2, page 1 setup. */
826 asd_write_reg_dword(asd_ha
, LmSEQ_INVALID_DWORD_COUNT(lseq
), 0);
827 asd_write_reg_dword(asd_ha
, LmSEQ_DISPARITY_ERROR_COUNT(lseq
), 0);
828 asd_write_reg_dword(asd_ha
, LmSEQ_LOSS_OF_SYNC_COUNT(lseq
), 0);
830 /* LSEQ Mode dependent, mode 4/5, page 1. */
831 for (i
= 0; i
< LSEQ_PAGE_SIZE
; i
+=4)
832 asd_write_reg_dword(asd_ha
, LmSEQ_FRAME_TYPE_MASK(lseq
)+i
, 0);
833 asd_write_reg_byte(asd_ha
, LmSEQ_FRAME_TYPE_MASK(lseq
), 0xFF);
834 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_DEST_ADDR_MASK(lseq
), 0xFF);
835 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_DEST_ADDR_MASK(lseq
)+1,0xFF);
836 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_DEST_ADDR_MASK(lseq
)+2,0xFF);
837 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_SRC_ADDR_MASK(lseq
), 0xFF);
838 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_SRC_ADDR_MASK(lseq
)+1, 0xFF);
839 asd_write_reg_byte(asd_ha
, LmSEQ_HASHED_SRC_ADDR_MASK(lseq
)+2, 0xFF);
840 asd_write_reg_dword(asd_ha
, LmSEQ_DATA_OFFSET(lseq
), 0xFFFFFFFF);
842 /* LSEQ Mode dependent, mode 0, page 2 setup. */
843 asd_write_reg_dword(asd_ha
, LmSEQ_SMP_RCV_TIMER_TERM_TS(lseq
), 0);
844 asd_write_reg_byte(asd_ha
, LmSEQ_DEVICE_BITS(lseq
), 0);
845 asd_write_reg_word(asd_ha
, LmSEQ_SDB_DDB(lseq
), 0);
846 asd_write_reg_byte(asd_ha
, LmSEQ_SDB_NUM_TAGS(lseq
), 0);
847 asd_write_reg_byte(asd_ha
, LmSEQ_SDB_CURR_TAG(lseq
), 0);
849 /* LSEQ Mode Dependent 1, page 2 setup. */
850 asd_write_reg_dword(asd_ha
, LmSEQ_TX_ID_ADDR_FRAME(lseq
), 0);
851 asd_write_reg_dword(asd_ha
, LmSEQ_TX_ID_ADDR_FRAME(lseq
)+4, 0);
852 asd_write_reg_dword(asd_ha
, LmSEQ_OPEN_TIMER_TERM_TS(lseq
), 0);
853 asd_write_reg_dword(asd_ha
, LmSEQ_SRST_AS_TIMER_TERM_TS(lseq
), 0);
854 asd_write_reg_dword(asd_ha
, LmSEQ_LAST_LOADED_SG_EL(lseq
), 0);
856 /* LSEQ Mode Dependent 2, page 2 setup. */
857 /* The LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS is IGNORED by the sequencer,
859 asd_write_reg_dword(asd_ha
, LmSEQ_STP_SHUTDOWN_TIMER_TERM_TS(lseq
),0);
860 asd_write_reg_dword(asd_ha
, LmSEQ_CLOSE_TIMER_TERM_TS(lseq
), 0);
861 asd_write_reg_dword(asd_ha
, LmSEQ_BREAK_TIMER_TERM_TS(lseq
), 0);
862 asd_write_reg_dword(asd_ha
, LmSEQ_DWS_RESET_TIMER_TERM_TS(lseq
), 0);
863 asd_write_reg_dword(asd_ha
,LmSEQ_SATA_INTERLOCK_TIMER_TERM_TS(lseq
),0);
864 asd_write_reg_dword(asd_ha
, LmSEQ_MCTL_TIMER_TERM_TS(lseq
), 0);
866 /* LSEQ Mode Dependent 4/5, page 2 setup. */
867 asd_write_reg_dword(asd_ha
, LmSEQ_COMINIT_TIMER_TERM_TS(lseq
), 0);
868 asd_write_reg_dword(asd_ha
, LmSEQ_RCV_ID_TIMER_TERM_TS(lseq
), 0);
869 asd_write_reg_dword(asd_ha
, LmSEQ_RCV_FIS_TIMER_TERM_TS(lseq
), 0);
870 asd_write_reg_dword(asd_ha
, LmSEQ_DEV_PRES_TIMER_TERM_TS(lseq
), 0);
874 * asd_init_lseq_scratch -- setup and init link sequencers
875 * @asd_ha: pointer to host adapter struct
877 static void asd_init_lseq_scratch(struct asd_ha_struct
*asd_ha
)
882 lseq_mask
= asd_ha
->hw_prof
.enabled_phys
;
883 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
884 asd_init_lseq_mip(asd_ha
, lseq
);
885 asd_init_lseq_mdp(asd_ha
, lseq
);
890 * asd_init_scb_sites -- initialize sequencer SCB sites (memory).
891 * @asd_ha: pointer to host adapter structure
893 * This should be done before initializing common CSEQ and LSEQ
894 * scratch since those areas depend on some computed values here,
895 * last_scb_site_no, etc.
897 static void asd_init_scb_sites(struct asd_ha_struct
*asd_ha
)
902 for (site_no
= asd_ha
->hw_prof
.max_scbs
-1;
907 /* Initialize all fields in the SCB site to 0. */
908 for (i
= 0; i
< ASD_SCB_SIZE
; i
+= 4)
909 asd_scbsite_write_dword(asd_ha
, site_no
, i
, 0);
911 /* Initialize SCB Site Opcode field to invalid. */
912 asd_scbsite_write_byte(asd_ha
, site_no
,
913 offsetof(struct scb_header
, opcode
),
916 /* Initialize SCB Site Flags field to mean a response
917 * frame has been received. This means inadvertent
918 * frames received to be dropped. */
919 asd_scbsite_write_byte(asd_ha
, site_no
, 0x49, 0x01);
921 /* Workaround needed by SEQ to fix a SATA issue is to exclude
922 * certain SCB sites from the free list. */
923 if (!SCB_SITE_VALID(site_no
))
926 if (last_scb_site_no
== 0)
927 last_scb_site_no
= site_no
;
929 /* For every SCB site, we need to initialize the
930 * following fields: Q_NEXT, SCB_OPCODE, SCB_FLAGS,
931 * and SG Element Flag. */
933 /* Q_NEXT field of the last SCB is invalidated. */
934 asd_scbsite_write_word(asd_ha
, site_no
, 0, first_scb_site_no
);
936 first_scb_site_no
= site_no
;
939 asd_ha
->hw_prof
.max_scbs
= max_scbs
;
940 ASD_DPRINTK("max_scbs:%d\n", asd_ha
->hw_prof
.max_scbs
);
941 ASD_DPRINTK("first_scb_site_no:0x%x\n", first_scb_site_no
);
942 ASD_DPRINTK("last_scb_site_no:0x%x\n", last_scb_site_no
);
946 * asd_init_cseq_cio - initialize CSEQ CIO registers
947 * @asd_ha: pointer to host adapter structure
949 static void asd_init_cseq_cio(struct asd_ha_struct
*asd_ha
)
953 asd_write_reg_byte(asd_ha
, CSEQCOMINTEN
, 0);
954 asd_write_reg_byte(asd_ha
, CSEQDLCTL
, ASD_DL_SIZE_BITS
);
955 asd_write_reg_byte(asd_ha
, CSEQDLOFFS
, 0);
956 asd_write_reg_byte(asd_ha
, CSEQDLOFFS
+1, 0);
957 asd_ha
->seq
.scbpro
= 0;
958 asd_write_reg_dword(asd_ha
, SCBPRO
, 0);
959 asd_write_reg_dword(asd_ha
, CSEQCON
, 0);
961 /* Intialize CSEQ Mode 11 Interrupt Vectors.
962 * The addresses are 16 bit wide and in dword units.
963 * The values of their macros are in byte units.
964 * Thus we have to divide by 4. */
965 asd_write_reg_word(asd_ha
, CM11INTVEC0
, cseq_vecs
[0]);
966 asd_write_reg_word(asd_ha
, CM11INTVEC1
, cseq_vecs
[1]);
967 asd_write_reg_word(asd_ha
, CM11INTVEC2
, cseq_vecs
[2]);
969 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
970 asd_write_reg_byte(asd_ha
, CARP2INTEN
, EN_ARP2HALTC
);
972 /* Initialize CSEQ Scratch Page to 0x04. */
973 asd_write_reg_byte(asd_ha
, CSCRATCHPAGE
, 0x04);
975 /* Initialize CSEQ Mode[0-8] Dependent registers. */
976 /* Initialize Scratch Page to 0. */
977 for (i
= 0; i
< 9; i
++)
978 asd_write_reg_byte(asd_ha
, CMnSCRATCHPAGE(i
), 0);
980 /* Reset the ARP2 Program Count. */
981 asd_write_reg_word(asd_ha
, CPRGMCNT
, cseq_idle_loop
);
983 for (i
= 0; i
< 8; i
++) {
984 /* Intialize Mode n Link m Interrupt Enable. */
985 asd_write_reg_dword(asd_ha
, CMnINTEN(i
), EN_CMnRSPMBXF
);
986 /* Initialize Mode n Request Mailbox. */
987 asd_write_reg_dword(asd_ha
, CMnREQMBX(i
), 0);
992 * asd_init_lseq_cio -- initialize LmSEQ CIO registers
993 * @asd_ha: pointer to host adapter structure
995 static void asd_init_lseq_cio(struct asd_ha_struct
*asd_ha
, int lseq
)
1000 /* Enable ARP2HALTC (ARP2 Halted from Halt Code Write). */
1001 asd_write_reg_dword(asd_ha
, LmARP2INTEN(lseq
), EN_ARP2HALTC
);
1003 asd_write_reg_byte(asd_ha
, LmSCRATCHPAGE(lseq
), 0);
1005 /* Initialize Mode 0,1, and 2 SCRATCHPAGE to 0. */
1006 for (i
= 0; i
< 3; i
++)
1007 asd_write_reg_byte(asd_ha
, LmMnSCRATCHPAGE(lseq
, i
), 0);
1009 /* Initialize Mode 5 SCRATCHPAGE to 0. */
1010 asd_write_reg_byte(asd_ha
, LmMnSCRATCHPAGE(lseq
, 5), 0);
1012 asd_write_reg_dword(asd_ha
, LmRSPMBX(lseq
), 0);
1013 /* Initialize Mode 0,1,2 and 5 Interrupt Enable and
1014 * Interrupt registers. */
1015 asd_write_reg_dword(asd_ha
, LmMnINTEN(lseq
, 0), LmM0INTEN_MASK
);
1016 asd_write_reg_dword(asd_ha
, LmMnINT(lseq
, 0), 0xFFFFFFFF);
1018 asd_write_reg_dword(asd_ha
, LmMnINTEN(lseq
, 1), LmM1INTEN_MASK
);
1019 asd_write_reg_dword(asd_ha
, LmMnINT(lseq
, 1), 0xFFFFFFFF);
1021 asd_write_reg_dword(asd_ha
, LmMnINTEN(lseq
, 2), LmM2INTEN_MASK
);
1022 asd_write_reg_dword(asd_ha
, LmMnINT(lseq
, 2), 0xFFFFFFFF);
1024 asd_write_reg_dword(asd_ha
, LmMnINTEN(lseq
, 5), LmM5INTEN_MASK
);
1025 asd_write_reg_dword(asd_ha
, LmMnINT(lseq
, 5), 0xFFFFFFFF);
1027 /* Enable HW Timer status. */
1028 asd_write_reg_byte(asd_ha
, LmHWTSTATEN(lseq
), LmHWTSTATEN_MASK
);
1030 /* Enable Primitive Status 0 and 1. */
1031 asd_write_reg_dword(asd_ha
, LmPRIMSTAT0EN(lseq
), LmPRIMSTAT0EN_MASK
);
1032 asd_write_reg_dword(asd_ha
, LmPRIMSTAT1EN(lseq
), LmPRIMSTAT1EN_MASK
);
1034 /* Enable Frame Error. */
1035 asd_write_reg_dword(asd_ha
, LmFRMERREN(lseq
), LmFRMERREN_MASK
);
1036 asd_write_reg_byte(asd_ha
, LmMnHOLDLVL(lseq
, 0), 0x50);
1038 /* Initialize Mode 0 Transfer Level to 512. */
1039 asd_write_reg_byte(asd_ha
, LmMnXFRLVL(lseq
, 0), LmMnXFRLVL_512
);
1040 /* Initialize Mode 1 Transfer Level to 256. */
1041 asd_write_reg_byte(asd_ha
, LmMnXFRLVL(lseq
, 1), LmMnXFRLVL_256
);
1043 /* Initialize Program Count. */
1044 asd_write_reg_word(asd_ha
, LmPRGMCNT(lseq
), lseq_idle_loop
);
1046 /* Enable Blind SG Move. */
1047 asd_write_reg_dword(asd_ha
, LmMODECTL(lseq
), LmBLIND48
);
1048 asd_write_reg_word(asd_ha
, LmM3SATATIMER(lseq
),
1049 ASD_SATA_INTERLOCK_TIMEOUT
);
1051 (void) asd_read_reg_dword(asd_ha
, LmREQMBX(lseq
));
1053 /* Clear Primitive Status 0 and 1. */
1054 asd_write_reg_dword(asd_ha
, LmPRMSTAT0(lseq
), 0xFFFFFFFF);
1055 asd_write_reg_dword(asd_ha
, LmPRMSTAT1(lseq
), 0xFFFFFFFF);
1057 /* Clear HW Timer status. */
1058 asd_write_reg_byte(asd_ha
, LmHWTSTAT(lseq
), 0xFF);
1060 /* Clear DMA Errors for Mode 0 and 1. */
1061 asd_write_reg_byte(asd_ha
, LmMnDMAERRS(lseq
, 0), 0xFF);
1062 asd_write_reg_byte(asd_ha
, LmMnDMAERRS(lseq
, 1), 0xFF);
1064 /* Clear SG DMA Errors for Mode 0 and 1. */
1065 asd_write_reg_byte(asd_ha
, LmMnSGDMAERRS(lseq
, 0), 0xFF);
1066 asd_write_reg_byte(asd_ha
, LmMnSGDMAERRS(lseq
, 1), 0xFF);
1068 /* Clear Mode 0 Buffer Parity Error. */
1069 asd_write_reg_byte(asd_ha
, LmMnBUFSTAT(lseq
, 0), LmMnBUFPERR
);
1071 /* Clear Mode 0 Frame Error register. */
1072 asd_write_reg_dword(asd_ha
, LmMnFRMERR(lseq
, 0), 0xFFFFFFFF);
1074 /* Reset LSEQ external interrupt arbiter. */
1075 asd_write_reg_byte(asd_ha
, LmARP2INTCTL(lseq
), RSTINTCTL
);
1077 /* Set the Phy SAS for the LmSEQ WWN. */
1078 sas_addr
= asd_ha
->phys
[lseq
].phy_desc
->sas_addr
;
1079 for (i
= 0; i
< SAS_ADDR_SIZE
; i
++)
1080 asd_write_reg_byte(asd_ha
, LmWWN(lseq
) + i
, sas_addr
[i
]);
1082 /* Set the Transmit Size to 1024 bytes, 0 = 256 Dwords. */
1083 asd_write_reg_byte(asd_ha
, LmMnXMTSIZE(lseq
, 1), 0);
1085 /* Set the Bus Inactivity Time Limit Timer. */
1086 asd_write_reg_word(asd_ha
, LmBITL_TIMER(lseq
), 9);
1088 /* Enable SATA Port Multiplier. */
1089 asd_write_reg_byte(asd_ha
, LmMnSATAFS(lseq
, 1), 0x80);
1091 /* Initialize Interrupt Vector[0-10] address in Mode 3.
1092 * See the comment on CSEQ_INT_* */
1093 asd_write_reg_word(asd_ha
, LmM3INTVEC0(lseq
), lseq_vecs
[0]);
1094 asd_write_reg_word(asd_ha
, LmM3INTVEC1(lseq
), lseq_vecs
[1]);
1095 asd_write_reg_word(asd_ha
, LmM3INTVEC2(lseq
), lseq_vecs
[2]);
1096 asd_write_reg_word(asd_ha
, LmM3INTVEC3(lseq
), lseq_vecs
[3]);
1097 asd_write_reg_word(asd_ha
, LmM3INTVEC4(lseq
), lseq_vecs
[4]);
1098 asd_write_reg_word(asd_ha
, LmM3INTVEC5(lseq
), lseq_vecs
[5]);
1099 asd_write_reg_word(asd_ha
, LmM3INTVEC6(lseq
), lseq_vecs
[6]);
1100 asd_write_reg_word(asd_ha
, LmM3INTVEC7(lseq
), lseq_vecs
[7]);
1101 asd_write_reg_word(asd_ha
, LmM3INTVEC8(lseq
), lseq_vecs
[8]);
1102 asd_write_reg_word(asd_ha
, LmM3INTVEC9(lseq
), lseq_vecs
[9]);
1103 asd_write_reg_word(asd_ha
, LmM3INTVEC10(lseq
), lseq_vecs
[10]);
1105 * Program the Link LED control, applicable only for
1106 * Chip Rev. B or later.
1108 asd_write_reg_dword(asd_ha
, LmCONTROL(lseq
),
1109 (LEDTIMER
| LEDMODE_TXRX
| LEDTIMERS_100ms
));
1111 /* Set the Align Rate for SAS and STP mode. */
1112 asd_write_reg_byte(asd_ha
, LmM1SASALIGN(lseq
), SAS_ALIGN_DEFAULT
);
1113 asd_write_reg_byte(asd_ha
, LmM1STPALIGN(lseq
), STP_ALIGN_DEFAULT
);
1118 * asd_post_init_cseq -- clear CSEQ Mode n Int. status and Response mailbox
1119 * @asd_ha: pointer to host adapter struct
1121 static void asd_post_init_cseq(struct asd_ha_struct
*asd_ha
)
1125 for (i
= 0; i
< 8; i
++)
1126 asd_write_reg_dword(asd_ha
, CMnINT(i
), 0xFFFFFFFF);
1127 for (i
= 0; i
< 8; i
++)
1128 asd_read_reg_dword(asd_ha
, CMnRSPMBX(i
));
1129 /* Reset the external interrupt arbiter. */
1130 asd_write_reg_byte(asd_ha
, CARP2INTCTL
, RSTINTCTL
);
1134 * asd_init_ddb_0 -- initialize DDB 0
1135 * @asd_ha: pointer to host adapter structure
1137 * Initialize DDB site 0 which is used internally by the sequencer.
1139 static void asd_init_ddb_0(struct asd_ha_struct
*asd_ha
)
1143 /* Zero out the DDB explicitly */
1144 for (i
= 0; i
< sizeof(struct asd_ddb_seq_shared
); i
+=4)
1145 asd_ddbsite_write_dword(asd_ha
, 0, i
, 0);
1147 asd_ddbsite_write_word(asd_ha
, 0,
1148 offsetof(struct asd_ddb_seq_shared
, q_free_ddb_head
), 0);
1149 asd_ddbsite_write_word(asd_ha
, 0,
1150 offsetof(struct asd_ddb_seq_shared
, q_free_ddb_tail
),
1151 asd_ha
->hw_prof
.max_ddbs
-1);
1152 asd_ddbsite_write_word(asd_ha
, 0,
1153 offsetof(struct asd_ddb_seq_shared
, q_free_ddb_cnt
), 0);
1154 asd_ddbsite_write_word(asd_ha
, 0,
1155 offsetof(struct asd_ddb_seq_shared
, q_used_ddb_head
), 0xFFFF);
1156 asd_ddbsite_write_word(asd_ha
, 0,
1157 offsetof(struct asd_ddb_seq_shared
, q_used_ddb_tail
), 0xFFFF);
1158 asd_ddbsite_write_word(asd_ha
, 0,
1159 offsetof(struct asd_ddb_seq_shared
, shared_mem_lock
), 0);
1160 asd_ddbsite_write_word(asd_ha
, 0,
1161 offsetof(struct asd_ddb_seq_shared
, smp_conn_tag
), 0);
1162 asd_ddbsite_write_word(asd_ha
, 0,
1163 offsetof(struct asd_ddb_seq_shared
, est_nexus_buf_cnt
), 0);
1164 asd_ddbsite_write_word(asd_ha
, 0,
1165 offsetof(struct asd_ddb_seq_shared
, est_nexus_buf_thresh
),
1166 asd_ha
->hw_prof
.num_phys
* 2);
1167 asd_ddbsite_write_byte(asd_ha
, 0,
1168 offsetof(struct asd_ddb_seq_shared
, settable_max_contexts
),0);
1169 asd_ddbsite_write_byte(asd_ha
, 0,
1170 offsetof(struct asd_ddb_seq_shared
, conn_not_active
), 0xFF);
1171 asd_ddbsite_write_byte(asd_ha
, 0,
1172 offsetof(struct asd_ddb_seq_shared
, phy_is_up
), 0x00);
1173 /* DDB 0 is reserved */
1174 set_bit(0, asd_ha
->hw_prof
.ddb_bitmap
);
1177 static void asd_seq_init_ddb_sites(struct asd_ha_struct
*asd_ha
)
1180 unsigned int ddb_site
;
1182 for (ddb_site
= 0 ; ddb_site
< ASD_MAX_DDBS
; ddb_site
++)
1183 for (i
= 0; i
< sizeof(struct asd_ddb_ssp_smp_target_port
); i
+= 4)
1184 asd_ddbsite_write_dword(asd_ha
, ddb_site
, i
, 0);
1188 * asd_seq_setup_seqs -- setup and initialize central and link sequencers
1189 * @asd_ha: pointer to host adapter structure
1191 static void asd_seq_setup_seqs(struct asd_ha_struct
*asd_ha
)
1196 /* Initialize DDB sites */
1197 asd_seq_init_ddb_sites(asd_ha
);
1199 /* Initialize SCB sites. Done first to compute some values which
1200 * the rest of the init code depends on. */
1201 asd_init_scb_sites(asd_ha
);
1203 /* Initialize CSEQ Scratch RAM registers. */
1204 asd_init_cseq_scratch(asd_ha
);
1206 /* Initialize LmSEQ Scratch RAM registers. */
1207 asd_init_lseq_scratch(asd_ha
);
1209 /* Initialize CSEQ CIO registers. */
1210 asd_init_cseq_cio(asd_ha
);
1212 asd_init_ddb_0(asd_ha
);
1214 /* Initialize LmSEQ CIO registers. */
1215 lseq_mask
= asd_ha
->hw_prof
.enabled_phys
;
1216 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
)
1217 asd_init_lseq_cio(asd_ha
, lseq
);
1218 asd_post_init_cseq(asd_ha
);
1223 * asd_seq_start_cseq -- start the central sequencer, CSEQ
1224 * @asd_ha: pointer to host adapter structure
1226 static int asd_seq_start_cseq(struct asd_ha_struct
*asd_ha
)
1228 /* Reset the ARP2 instruction to location zero. */
1229 asd_write_reg_word(asd_ha
, CPRGMCNT
, cseq_idle_loop
);
1231 /* Unpause the CSEQ */
1232 return asd_unpause_cseq(asd_ha
);
1236 * asd_seq_start_lseq -- start a link sequencer
1237 * @asd_ha: pointer to host adapter structure
1238 * @lseq: the link sequencer of interest
1240 static int asd_seq_start_lseq(struct asd_ha_struct
*asd_ha
, int lseq
)
1242 /* Reset the ARP2 instruction to location zero. */
1243 asd_write_reg_word(asd_ha
, LmPRGMCNT(lseq
), lseq_idle_loop
);
1245 /* Unpause the LmSEQ */
1246 return asd_seq_unpause_lseq(asd_ha
, lseq
);
1249 int asd_release_firmware(void)
1252 release_firmware(sequencer_fw
);
1256 static int asd_request_firmware(struct asd_ha_struct
*asd_ha
)
1259 struct sequencer_file_header header
, *hdr_ptr
;
1261 u16
*ptr_cseq_vecs
, *ptr_lseq_vecs
;
1264 /* already loaded */
1267 err
= request_firmware(&sequencer_fw
,
1268 SAS_RAZOR_SEQUENCER_FW_FILE
,
1269 &asd_ha
->pcidev
->dev
);
1273 hdr_ptr
= (struct sequencer_file_header
*)sequencer_fw
->data
;
1275 header
.csum
= le32_to_cpu(hdr_ptr
->csum
);
1276 header
.major
= le32_to_cpu(hdr_ptr
->major
);
1277 header
.minor
= le32_to_cpu(hdr_ptr
->minor
);
1278 header
.cseq_table_offset
= le32_to_cpu(hdr_ptr
->cseq_table_offset
);
1279 header
.cseq_table_size
= le32_to_cpu(hdr_ptr
->cseq_table_size
);
1280 header
.lseq_table_offset
= le32_to_cpu(hdr_ptr
->lseq_table_offset
);
1281 header
.lseq_table_size
= le32_to_cpu(hdr_ptr
->lseq_table_size
);
1282 header
.cseq_code_offset
= le32_to_cpu(hdr_ptr
->cseq_code_offset
);
1283 header
.cseq_code_size
= le32_to_cpu(hdr_ptr
->cseq_code_size
);
1284 header
.lseq_code_offset
= le32_to_cpu(hdr_ptr
->lseq_code_offset
);
1285 header
.lseq_code_size
= le32_to_cpu(hdr_ptr
->lseq_code_size
);
1286 header
.mode2_task
= le16_to_cpu(hdr_ptr
->mode2_task
);
1287 header
.cseq_idle_loop
= le16_to_cpu(hdr_ptr
->cseq_idle_loop
);
1288 header
.lseq_idle_loop
= le16_to_cpu(hdr_ptr
->lseq_idle_loop
);
1290 for (i
= sizeof(header
.csum
); i
< sequencer_fw
->size
; i
++)
1291 csum
+= sequencer_fw
->data
[i
];
1293 if (csum
!= header
.csum
) {
1294 asd_printk("Firmware file checksum mismatch\n");
1298 if (header
.cseq_table_size
!= CSEQ_NUM_VECS
||
1299 header
.lseq_table_size
!= LSEQ_NUM_VECS
) {
1300 asd_printk("Firmware file table size mismatch\n");
1304 asd_printk("Found sequencer Firmware version %d.%d (%s)\n",
1305 header
.major
, header
.minor
, hdr_ptr
->version
);
1307 if (header
.major
!= SAS_RAZOR_SEQUENCER_FW_MAJOR
) {
1308 asd_printk("Firmware Major Version Mismatch;"
1309 "driver requires version %d.X",
1310 SAS_RAZOR_SEQUENCER_FW_MAJOR
);
1314 ptr_cseq_vecs
= (u16
*)&sequencer_fw
->data
[header
.cseq_table_offset
];
1315 ptr_lseq_vecs
= (u16
*)&sequencer_fw
->data
[header
.lseq_table_offset
];
1316 mode2_task
= header
.mode2_task
;
1317 cseq_idle_loop
= header
.cseq_idle_loop
;
1318 lseq_idle_loop
= header
.lseq_idle_loop
;
1320 for (i
= 0; i
< CSEQ_NUM_VECS
; i
++)
1321 cseq_vecs
[i
] = le16_to_cpu(ptr_cseq_vecs
[i
]);
1323 for (i
= 0; i
< LSEQ_NUM_VECS
; i
++)
1324 lseq_vecs
[i
] = le16_to_cpu(ptr_lseq_vecs
[i
]);
1326 cseq_code
= &sequencer_fw
->data
[header
.cseq_code_offset
];
1327 cseq_code_size
= header
.cseq_code_size
;
1328 lseq_code
= &sequencer_fw
->data
[header
.lseq_code_offset
];
1329 lseq_code_size
= header
.lseq_code_size
;
1334 int asd_init_seqs(struct asd_ha_struct
*asd_ha
)
1338 err
= asd_request_firmware(asd_ha
);
1341 asd_printk("Failed to load sequencer firmware file %s, error %d\n",
1342 SAS_RAZOR_SEQUENCER_FW_FILE
, err
);
1346 err
= asd_seq_download_seqs(asd_ha
);
1348 asd_printk("couldn't download sequencers for %s\n",
1349 pci_name(asd_ha
->pcidev
));
1353 asd_seq_setup_seqs(asd_ha
);
1358 int asd_start_seqs(struct asd_ha_struct
*asd_ha
)
1364 err
= asd_seq_start_cseq(asd_ha
);
1366 asd_printk("couldn't start CSEQ for %s\n",
1367 pci_name(asd_ha
->pcidev
));
1371 lseq_mask
= asd_ha
->hw_prof
.enabled_phys
;
1372 for_each_sequencer(lseq_mask
, lseq_mask
, lseq
) {
1373 err
= asd_seq_start_lseq(asd_ha
, lseq
);
1375 asd_printk("coudln't start LSEQ %d for %s\n", lseq
,
1376 pci_name(asd_ha
->pcidev
));
1385 * asd_update_port_links -- update port_map_by_links and phy_is_up
1386 * @sas_phy: pointer to the phy which has been added to a port
1388 * 1) When a link reset has completed and we got BYTES DMAED with a
1389 * valid frame we call this function for that phy, to indicate that
1390 * the phy is up, i.e. we update the phy_is_up in DDB 0. The
1391 * sequencer checks phy_is_up when pending SCBs are to be sent, and
1392 * when an open address frame has been received.
1394 * 2) When we know of ports, we call this function to update the map
1395 * of phys participaing in that port, i.e. we update the
1396 * port_map_by_links in DDB 0. When a HARD_RESET primitive has been
1397 * received, the sequencer disables all phys in that port.
1398 * port_map_by_links is also used as the conn_mask byte in the
1399 * initiator/target port DDB.
1401 void asd_update_port_links(struct asd_ha_struct
*asd_ha
, struct asd_phy
*phy
)
1403 const u8 phy_mask
= (u8
) phy
->asd_port
->phy_mask
;
1407 unsigned long flags
;
1409 spin_lock_irqsave(&asd_ha
->hw_prof
.ddb_lock
, flags
);
1410 for_each_phy(phy_mask
, mask
, i
)
1411 asd_ddbsite_write_byte(asd_ha
, 0,
1412 offsetof(struct asd_ddb_seq_shared
,
1413 port_map_by_links
)+i
,phy_mask
);
1415 for (i
= 0; i
< 12; i
++) {
1416 phy_is_up
= asd_ddbsite_read_byte(asd_ha
, 0,
1417 offsetof(struct asd_ddb_seq_shared
, phy_is_up
));
1418 err
= asd_ddbsite_update_byte(asd_ha
, 0,
1419 offsetof(struct asd_ddb_seq_shared
, phy_is_up
),
1421 phy_is_up
| phy_mask
);
1424 else if (err
== -EFAULT
) {
1425 asd_printk("phy_is_up: parity error in DDB 0\n");
1429 spin_unlock_irqrestore(&asd_ha
->hw_prof
.ddb_lock
, flags
);
1432 asd_printk("couldn't update DDB 0:error:%d\n", err
);
1435 MODULE_FIRMWARE(SAS_RAZOR_SEQUENCER_FW_FILE
);