1 /**********************************************************************
4 * Contact: support@cavium.com
5 * Please include "LiquidIO" in the subject.
7 * Copyright (c) 2003-2016 Cavium, Inc.
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more details.
17 ***********************************************************************/
18 #include <linux/pci.h>
19 #include <linux/netdevice.h>
20 #include "liquidio_common.h"
21 #include "octeon_droq.h"
22 #include "octeon_iq.h"
23 #include "response_manager.h"
24 #include "octeon_device.h"
25 #include "octeon_main.h"
26 #include "cn66xx_regs.h"
27 #include "cn66xx_device.h"
29 int lio_cn6xxx_soft_reset(struct octeon_device
*oct
)
31 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
33 dev_dbg(&oct
->pci_dev
->dev
, "BIST enabled for soft reset\n");
35 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_BIST
);
36 octeon_write_csr64(oct
, CN6XXX_SLI_SCRATCH1
, 0x1234ULL
);
38 lio_pci_readq(oct
, CN6XXX_CIU_SOFT_RST
);
39 lio_pci_writeq(oct
, 1, CN6XXX_CIU_SOFT_RST
);
41 /* make sure that the reset is written before starting timer */
44 /* Wait for 10ms as Octeon resets. */
47 if (octeon_read_csr64(oct
, CN6XXX_SLI_SCRATCH1
) == 0x1234ULL
) {
48 dev_err(&oct
->pci_dev
->dev
, "Soft reset failed\n");
52 dev_dbg(&oct
->pci_dev
->dev
, "Reset completed\n");
53 octeon_write_csr64(oct
, CN6XXX_WIN_WR_MASK_REG
, 0xFF);
58 void lio_cn6xxx_enable_error_reporting(struct octeon_device
*oct
)
62 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
63 if (val
& 0x000c0000) {
64 dev_err(&oct
->pci_dev
->dev
, "PCI-E Link error detected: 0x%08x\n",
68 val
|= 0xf; /* Enable Link error reporting */
70 dev_dbg(&oct
->pci_dev
->dev
, "Enabling PCI-E error reporting..\n");
71 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
74 void lio_cn6xxx_setup_pcie_mps(struct octeon_device
*oct
,
75 enum octeon_pcie_mps mps
)
80 /* Read config register for MPS */
81 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
83 if (mps
== PCIE_MPS_DEFAULT
) {
84 mps
= ((val
& (0x7 << 5)) >> 5);
86 val
&= ~(0x7 << 5); /* Turn off any MPS bits */
87 val
|= (mps
<< 5); /* Set MPS */
88 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
91 /* Set MPS in DPI_SLI_PRT0_CFG to the same value. */
92 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
94 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
97 void lio_cn6xxx_setup_pcie_mrrs(struct octeon_device
*oct
,
98 enum octeon_pcie_mrrs mrrs
)
103 /* Read config register for MRRS */
104 pci_read_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, &val
);
106 if (mrrs
== PCIE_MRRS_DEFAULT
) {
107 mrrs
= ((val
& (0x7 << 12)) >> 12);
109 val
&= ~(0x7 << 12); /* Turn off any MRRS bits */
110 val
|= (mrrs
<< 12); /* Set MRRS */
111 pci_write_config_dword(oct
->pci_dev
, CN6XXX_PCIE_DEVCTL
, val
);
114 /* Set MRRS in SLI_S2M_PORT0_CTL to the same value. */
115 r64
= octeon_read_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
));
117 octeon_write_csr64(oct
, CN6XXX_SLI_S2M_PORTX_CTL(oct
->pcie_port
), r64
);
119 /* Set MRRS in DPI_SLI_PRT0_CFG to the same value. */
120 r64
= lio_pci_readq(oct
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
122 lio_pci_writeq(oct
, r64
, CN6XXX_DPI_SLI_PRTX_CFG(oct
->pcie_port
));
125 u32
lio_cn6xxx_coprocessor_clock(struct octeon_device
*oct
)
127 /* Bits 29:24 of MIO_RST_BOOT holds the ref. clock multiplier
130 return ((lio_pci_readq(oct
, CN6XXX_MIO_RST_BOOT
) >> 24) & 0x3f) * 50;
133 u32
lio_cn6xxx_get_oq_ticks(struct octeon_device
*oct
,
136 /* This gives the SLI clock per microsec */
137 u32 oqticks_per_us
= lio_cn6xxx_coprocessor_clock(oct
);
139 /* core clock per us / oq ticks will be fractional. TO avoid that
140 * we use the method below.
143 /* This gives the clock cycles per millisecond */
144 oqticks_per_us
*= 1000;
146 /* This gives the oq ticks (1024 core clock cycles) per millisecond */
147 oqticks_per_us
/= 1024;
149 /* time_intr is in microseconds. The next 2 steps gives the oq ticks
150 * corressponding to time_intr.
152 oqticks_per_us
*= time_intr_in_us
;
153 oqticks_per_us
/= 1000;
155 return oqticks_per_us
;
158 void lio_cn6xxx_setup_global_input_regs(struct octeon_device
*oct
)
160 /* Select Round-Robin Arb, ES, RO, NS for Input Queues */
161 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INPUT_CONTROL
,
162 CN6XXX_INPUT_CTL_MASK
);
164 /* Instruction Read Size - Max 4 instructions per PCIE Read */
165 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_INSTR_RD_SIZE
,
166 0xFFFFFFFFFFFFFFFFULL
);
168 /* Select PCIE Port for all Input rings. */
169 octeon_write_csr64(oct
, CN6XXX_SLI_IN_PCIE_PORT
,
170 (oct
->pcie_port
* 0x5555555555555555ULL
));
173 static void lio_cn66xx_setup_pkt_ctl_regs(struct octeon_device
*oct
)
177 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
179 pktctl
= octeon_read_csr64(oct
, CN6XXX_SLI_PKT_CTL
);
182 if (CFG_GET_OQ_MAX_Q(cn6xxx
->conf
) <= 4)
183 /* Disable RING_EN if only upto 4 rings are used. */
188 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
))
191 /* Disable per-port backpressure. */
193 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_CTL
, pktctl
);
196 void lio_cn6xxx_setup_global_output_regs(struct octeon_device
*oct
)
199 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
201 /* / Select PCI-E Port for all Output queues */
202 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_PCIE_PORT64
,
203 (oct
->pcie_port
* 0x5555555555555555ULL
));
205 if (CFG_GET_IS_SLI_BP_ON(cn6xxx
->conf
)) {
206 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 32);
208 /* / Set Output queue watermark to 0 to disable backpressure */
209 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_WMARK
, 0);
212 /* / Select Packet count instead of bytes for SLI_PKTi_CNTS[CNT] */
213 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_BMODE
, 0);
215 /* Select ES, RO, NS setting from register for Output Queue Packet
218 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DPADDR
, 0xFFFFFFFF);
220 /* No Relaxed Ordering, No Snoop, 64-bit swap for Output
223 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_ROR
, 0);
224 octeon_write_csr(oct
, CN6XXX_SLI_PKT_SLIST_NS
, 0);
226 /* / ENDIAN_SPECIFIC CHANGES - 0 works for LE. */
227 #ifdef __BIG_ENDIAN_BITFIELD
228 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
,
229 0x5555555555555555ULL
);
231 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_SLIST_ES64
, 0ULL);
234 /* / No Relaxed Ordering, No Snoop, 64-bit swap for Output Queue Data */
235 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_ROR
, 0);
236 octeon_write_csr(oct
, CN6XXX_SLI_PKT_DATA_OUT_NS
, 0);
237 octeon_write_csr64(oct
, CN6XXX_SLI_PKT_DATA_OUT_ES64
,
238 0x5555555555555555ULL
);
240 /* / Set up interrupt packet and time threshold */
241 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_PKTS
,
242 (u32
)CFG_GET_OQ_INTR_PKT(cn6xxx
->conf
));
244 lio_cn6xxx_get_oq_ticks(oct
, (u32
)
245 CFG_GET_OQ_INTR_TIME(cn6xxx
->conf
));
247 octeon_write_csr(oct
, CN6XXX_SLI_OQ_INT_LEVEL_TIME
, time_threshold
);
250 static int lio_cn6xxx_setup_device_regs(struct octeon_device
*oct
)
252 lio_cn6xxx_setup_pcie_mps(oct
, PCIE_MPS_DEFAULT
);
253 lio_cn6xxx_setup_pcie_mrrs(oct
, PCIE_MRRS_512B
);
254 lio_cn6xxx_enable_error_reporting(oct
);
256 lio_cn6xxx_setup_global_input_regs(oct
);
257 lio_cn66xx_setup_pkt_ctl_regs(oct
);
258 lio_cn6xxx_setup_global_output_regs(oct
);
260 /* Default error timeout value should be 0x200000 to avoid host hang
261 * when reads invalid register
263 octeon_write_csr64(oct
, CN6XXX_SLI_WINDOW_CTL
, 0x200000ULL
);
267 void lio_cn6xxx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
269 struct octeon_instr_queue
*iq
= oct
->instr_queue
[iq_no
];
271 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq_no
), 0);
273 /* Write the start of the input queue's ring and its size */
274 octeon_write_csr64(oct
, CN6XXX_SLI_IQ_BASE_ADDR64(iq_no
),
276 octeon_write_csr(oct
, CN6XXX_SLI_IQ_SIZE(iq_no
), iq
->max_count
);
278 /* Remember the doorbell & instruction count register addr for this
281 iq
->doorbell_reg
= oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_IQ_DOORBELL(iq_no
);
282 iq
->inst_cnt_reg
= oct
->mmio
[0].hw_addr
283 + CN6XXX_SLI_IQ_INSTR_COUNT(iq_no
);
284 dev_dbg(&oct
->pci_dev
->dev
, "InstQ[%d]:dbell reg @ 0x%p instcnt_reg @ 0x%p\n",
285 iq_no
, iq
->doorbell_reg
, iq
->inst_cnt_reg
);
287 /* Store the current instruction counter
288 * (used in flush_iq calculation)
290 iq
->reset_instr_cnt
= readl(iq
->inst_cnt_reg
);
293 static void lio_cn66xx_setup_iq_regs(struct octeon_device
*oct
, u32 iq_no
)
295 lio_cn6xxx_setup_iq_regs(oct
, iq_no
);
297 /* Backpressure for this queue - WMARK set to all F's. This effectively
298 * disables the backpressure mechanism.
300 octeon_write_csr64(oct
, CN66XX_SLI_IQ_BP64(iq_no
),
301 (0xFFFFFFFFULL
<< 32));
304 void lio_cn6xxx_setup_oq_regs(struct octeon_device
*oct
, u32 oq_no
)
307 struct octeon_droq
*droq
= oct
->droq
[oq_no
];
309 octeon_write_csr64(oct
, CN6XXX_SLI_OQ_BASE_ADDR64(oq_no
),
310 droq
->desc_ring_dma
);
311 octeon_write_csr(oct
, CN6XXX_SLI_OQ_SIZE(oq_no
), droq
->max_count
);
313 octeon_write_csr(oct
, CN6XXX_SLI_OQ_BUFF_INFO_SIZE(oq_no
),
316 /* Get the mapped address of the pkt_sent and pkts_credit regs */
317 droq
->pkts_sent_reg
=
318 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_SENT(oq_no
);
319 droq
->pkts_credit_reg
=
320 oct
->mmio
[0].hw_addr
+ CN6XXX_SLI_OQ_PKTS_CREDIT(oq_no
);
322 /* Enable this output queue to generate Packet Timer Interrupt */
323 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
324 intr
|= (1 << oq_no
);
325 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
, intr
);
327 /* Enable this output queue to generate Packet Timer Interrupt */
328 intr
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
329 intr
|= (1 << oq_no
);
330 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
, intr
);
333 int lio_cn6xxx_enable_io_queues(struct octeon_device
*oct
)
337 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
);
338 mask
|= oct
->io_qmask
.iq64B
;
339 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_SIZE
, mask
);
341 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
342 mask
|= oct
->io_qmask
.iq
;
343 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
345 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
346 mask
|= oct
->io_qmask
.oq
;
347 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
352 void lio_cn6xxx_disable_io_queues(struct octeon_device
*oct
)
358 /* Reset the Enable bits for Input Queues. */
359 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
);
360 mask
^= oct
->io_qmask
.iq
;
361 octeon_write_csr(oct
, CN6XXX_SLI_PKT_INSTR_ENB
, mask
);
363 /* Wait until hardware indicates that the queues are out of reset. */
364 mask
= (u32
)oct
->io_qmask
.iq
;
365 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
366 while (((d32
& mask
) != mask
) && loop
--) {
367 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_IQ
);
368 schedule_timeout_uninterruptible(1);
371 /* Reset the doorbell register for each Input queue. */
372 for (i
= 0; i
< MAX_OCTEON_INSTR_QUEUES(oct
); i
++) {
373 if (!(oct
->io_qmask
.iq
& BIT_ULL(i
)))
375 octeon_write_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
), 0xFFFFFFFF);
376 d32
= octeon_read_csr(oct
, CN6XXX_SLI_IQ_DOORBELL(i
));
379 /* Reset the Enable bits for Output Queues. */
380 mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
);
381 mask
^= oct
->io_qmask
.oq
;
382 octeon_write_csr(oct
, CN6XXX_SLI_PKT_OUT_ENB
, mask
);
384 /* Wait until hardware indicates that the queues are out of reset. */
386 mask
= (u32
)oct
->io_qmask
.oq
;
387 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
388 while (((d32
& mask
) != mask
) && loop
--) {
389 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PORT_IN_RST_OQ
);
390 schedule_timeout_uninterruptible(1);
394 /* Reset the doorbell register for each Output queue. */
395 for (i
= 0; i
< MAX_OCTEON_OUTPUT_QUEUES(oct
); i
++) {
396 if (!(oct
->io_qmask
.oq
& BIT_ULL(i
)))
398 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
), 0xFFFFFFFF);
399 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_CREDIT(i
));
401 d32
= octeon_read_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
));
402 octeon_write_csr(oct
, CN6XXX_SLI_OQ_PKTS_SENT(i
), d32
);
405 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
407 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, d32
);
409 d32
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
411 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, d32
);
415 lio_cn6xxx_bar1_idx_setup(struct octeon_device
*oct
,
423 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
424 lio_pci_writeq(oct
, (bar1
& 0xFFFFFFFEULL
),
425 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
426 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
430 /* Bits 17:4 of the PCI_BAR1_INDEXx stores bits 35:22 of
433 lio_pci_writeq(oct
, (((core_addr
>> 22) << 4) | PCI_BAR1_MASK
),
434 CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
436 bar1
= lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
439 void lio_cn6xxx_bar1_idx_write(struct octeon_device
*oct
,
443 lio_pci_writeq(oct
, mask
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
446 u32
lio_cn6xxx_bar1_idx_read(struct octeon_device
*oct
, u32 idx
)
448 return (u32
)lio_pci_readq(oct
, CN6XXX_BAR1_REG(idx
, oct
->pcie_port
));
452 lio_cn6xxx_update_read_index(struct octeon_instr_queue
*iq
)
454 u32 new_idx
= readl(iq
->inst_cnt_reg
);
456 /* The new instr cnt reg is a 32-bit counter that can roll over. We have
457 * noted the counter's initial value at init time into
460 if (iq
->reset_instr_cnt
< new_idx
)
461 new_idx
-= iq
->reset_instr_cnt
;
463 new_idx
+= (0xffffffff - iq
->reset_instr_cnt
) + 1;
465 /* Modulo of the new index with the IQ size will give us
468 new_idx
%= iq
->max_count
;
473 void lio_cn6xxx_enable_interrupt(struct octeon_device
*oct
,
474 u8 unused
__attribute__((unused
)))
476 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
477 u64 mask
= cn6xxx
->intr_mask64
| CN6XXX_INTR_DMA0_FORCE
;
479 /* Enable Interrupt */
480 writeq(mask
, cn6xxx
->intr_enb_reg64
);
483 void lio_cn6xxx_disable_interrupt(struct octeon_device
*oct
,
484 u8 unused
__attribute__((unused
)))
486 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
488 /* Disable Interrupts */
489 writeq(0, cn6xxx
->intr_enb_reg64
);
491 /* make sure interrupts are really disabled */
495 static void lio_cn6xxx_get_pcie_qlmport(struct octeon_device
*oct
)
497 /* CN63xx Pass2 and newer parts implements the SLI_MAC_NUMBER register
498 * to determine the PCIE port #
500 oct
->pcie_port
= octeon_read_csr(oct
, CN6XXX_SLI_MAC_NUMBER
) & 0xff;
502 dev_dbg(&oct
->pci_dev
->dev
, "Using PCIE Port %d\n", oct
->pcie_port
);
506 lio_cn6xxx_process_pcie_error_intr(struct octeon_device
*oct
, u64 intr64
)
508 dev_err(&oct
->pci_dev
->dev
, "Error Intr: 0x%016llx\n",
512 static int lio_cn6xxx_process_droq_intr_regs(struct octeon_device
*oct
)
514 struct octeon_droq
*droq
;
516 u32 pkt_count
, droq_time_mask
, droq_mask
, droq_int_enb
;
517 u32 droq_cnt_enb
, droq_cnt_mask
;
519 droq_cnt_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT_ENB
);
520 droq_cnt_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
);
521 droq_mask
= droq_cnt_mask
& droq_cnt_enb
;
523 droq_time_mask
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
);
524 droq_int_enb
= octeon_read_csr(oct
, CN6XXX_SLI_PKT_TIME_INT_ENB
);
525 droq_mask
|= (droq_time_mask
& droq_int_enb
);
527 droq_mask
&= oct
->io_qmask
.oq
;
531 for (oq_no
= 0; oq_no
< MAX_OCTEON_OUTPUT_QUEUES(oct
); oq_no
++) {
532 if (!(droq_mask
& BIT_ULL(oq_no
)))
535 droq
= oct
->droq
[oq_no
];
536 pkt_count
= octeon_droq_check_hw_for_pkts(droq
);
538 oct
->droq_intr
|= BIT_ULL(oq_no
);
539 if (droq
->ops
.poll_mode
) {
543 struct octeon_cn6xxx
*cn6xxx
=
544 (struct octeon_cn6xxx
*)oct
->chip
;
546 /* disable interrupts for this droq */
548 (&cn6xxx
->lock_for_droq_int_enb_reg
);
549 reg
= CN6XXX_SLI_PKT_TIME_INT_ENB
;
550 value
= octeon_read_csr(oct
, reg
);
551 value
&= ~(1 << oq_no
);
552 octeon_write_csr(oct
, reg
, value
);
553 reg
= CN6XXX_SLI_PKT_CNT_INT_ENB
;
554 value
= octeon_read_csr(oct
, reg
);
555 value
&= ~(1 << oq_no
);
556 octeon_write_csr(oct
, reg
, value
);
558 /* Ensure that the enable register is written.
562 spin_unlock(&cn6xxx
->lock_for_droq_int_enb_reg
);
567 droq_time_mask
&= oct
->io_qmask
.oq
;
568 droq_cnt_mask
&= oct
->io_qmask
.oq
;
570 /* Reset the PKT_CNT/TIME_INT registers. */
572 octeon_write_csr(oct
, CN6XXX_SLI_PKT_TIME_INT
, droq_time_mask
);
574 if (droq_cnt_mask
) /* reset PKT_CNT register:66xx */
575 octeon_write_csr(oct
, CN6XXX_SLI_PKT_CNT_INT
, droq_cnt_mask
);
580 irqreturn_t
lio_cn6xxx_process_interrupt_regs(void *dev
)
582 struct octeon_device
*oct
= (struct octeon_device
*)dev
;
583 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
586 intr64
= readq(cn6xxx
->intr_sum_reg64
);
588 /* If our device has interrupted, then proceed.
589 * Also check for all f's if interrupt was triggered on an error
590 * and the PCI read fails.
592 if (!intr64
|| (intr64
== 0xFFFFFFFFFFFFFFFFULL
))
597 if (intr64
& CN6XXX_INTR_ERR
)
598 lio_cn6xxx_process_pcie_error_intr(oct
, intr64
);
600 if (intr64
& CN6XXX_INTR_PKT_DATA
) {
601 lio_cn6xxx_process_droq_intr_regs(oct
);
602 oct
->int_status
|= OCT_DEV_INTR_PKT_DATA
;
605 if (intr64
& CN6XXX_INTR_DMA0_FORCE
)
606 oct
->int_status
|= OCT_DEV_INTR_DMA0_FORCE
;
608 if (intr64
& CN6XXX_INTR_DMA1_FORCE
)
609 oct
->int_status
|= OCT_DEV_INTR_DMA1_FORCE
;
611 /* Clear the current interrupts */
612 writeq(intr64
, cn6xxx
->intr_sum_reg64
);
617 void lio_cn6xxx_setup_reg_address(struct octeon_device
*oct
,
619 struct octeon_reg_list
*reg_list
)
621 u8 __iomem
*bar0_pciaddr
= oct
->mmio
[0].hw_addr
;
622 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)chip
;
624 reg_list
->pci_win_wr_addr_hi
=
625 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_HI
);
626 reg_list
->pci_win_wr_addr_lo
=
627 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR_LO
);
628 reg_list
->pci_win_wr_addr
=
629 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_ADDR64
);
631 reg_list
->pci_win_rd_addr_hi
=
632 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_HI
);
633 reg_list
->pci_win_rd_addr_lo
=
634 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR_LO
);
635 reg_list
->pci_win_rd_addr
=
636 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_ADDR64
);
638 reg_list
->pci_win_wr_data_hi
=
639 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_HI
);
640 reg_list
->pci_win_wr_data_lo
=
641 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA_LO
);
642 reg_list
->pci_win_wr_data
=
643 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_WR_DATA64
);
645 reg_list
->pci_win_rd_data_hi
=
646 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_HI
);
647 reg_list
->pci_win_rd_data_lo
=
648 (u32 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA_LO
);
649 reg_list
->pci_win_rd_data
=
650 (u64 __iomem
*)(bar0_pciaddr
+ CN6XXX_WIN_RD_DATA64
);
652 lio_cn6xxx_get_pcie_qlmport(oct
);
654 cn6xxx
->intr_sum_reg64
= bar0_pciaddr
+ CN6XXX_SLI_INT_SUM64
;
655 cn6xxx
->intr_mask64
= CN6XXX_INTR_MASK
;
656 cn6xxx
->intr_enb_reg64
=
657 bar0_pciaddr
+ CN6XXX_SLI_INT_ENB64(oct
->pcie_port
);
660 int lio_setup_cn66xx_octeon_device(struct octeon_device
*oct
)
662 struct octeon_cn6xxx
*cn6xxx
= (struct octeon_cn6xxx
*)oct
->chip
;
664 if (octeon_map_pci_barx(oct
, 0, 0))
667 if (octeon_map_pci_barx(oct
, 1, MAX_BAR1_IOREMAP_SIZE
)) {
668 dev_err(&oct
->pci_dev
->dev
, "%s CN66XX BAR1 map failed\n",
670 octeon_unmap_pci_barx(oct
, 0);
674 spin_lock_init(&cn6xxx
->lock_for_droq_int_enb_reg
);
676 oct
->fn_list
.setup_iq_regs
= lio_cn66xx_setup_iq_regs
;
677 oct
->fn_list
.setup_oq_regs
= lio_cn6xxx_setup_oq_regs
;
679 oct
->fn_list
.soft_reset
= lio_cn6xxx_soft_reset
;
680 oct
->fn_list
.setup_device_regs
= lio_cn6xxx_setup_device_regs
;
681 oct
->fn_list
.update_iq_read_idx
= lio_cn6xxx_update_read_index
;
683 oct
->fn_list
.bar1_idx_setup
= lio_cn6xxx_bar1_idx_setup
;
684 oct
->fn_list
.bar1_idx_write
= lio_cn6xxx_bar1_idx_write
;
685 oct
->fn_list
.bar1_idx_read
= lio_cn6xxx_bar1_idx_read
;
687 oct
->fn_list
.process_interrupt_regs
= lio_cn6xxx_process_interrupt_regs
;
688 oct
->fn_list
.enable_interrupt
= lio_cn6xxx_enable_interrupt
;
689 oct
->fn_list
.disable_interrupt
= lio_cn6xxx_disable_interrupt
;
691 oct
->fn_list
.enable_io_queues
= lio_cn6xxx_enable_io_queues
;
692 oct
->fn_list
.disable_io_queues
= lio_cn6xxx_disable_io_queues
;
694 lio_cn6xxx_setup_reg_address(oct
, oct
->chip
, &oct
->reg_list
);
696 cn6xxx
->conf
= (struct octeon_config
*)
697 oct_get_config_info(oct
, LIO_210SV
);
699 dev_err(&oct
->pci_dev
->dev
, "%s No Config found for CN66XX\n",
701 octeon_unmap_pci_barx(oct
, 0);
702 octeon_unmap_pci_barx(oct
, 1);
706 oct
->coproc_clock_rate
= 1000000ULL * lio_cn6xxx_coprocessor_clock(oct
);
711 int lio_validate_cn6xxx_config_info(struct octeon_device
*oct
,
712 struct octeon_config
*conf6xxx
)
714 if (CFG_GET_IQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_INPUT_QUEUES
) {
715 dev_err(&oct
->pci_dev
->dev
, "%s: Num IQ (%d) exceeds Max (%d)\n",
716 __func__
, CFG_GET_IQ_MAX_Q(conf6xxx
),
717 CN6XXX_MAX_INPUT_QUEUES
);
721 if (CFG_GET_OQ_MAX_Q(conf6xxx
) > CN6XXX_MAX_OUTPUT_QUEUES
) {
722 dev_err(&oct
->pci_dev
->dev
, "%s: Num OQ (%d) exceeds Max (%d)\n",
723 __func__
, CFG_GET_OQ_MAX_Q(conf6xxx
),
724 CN6XXX_MAX_OUTPUT_QUEUES
);
728 if (CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_32BYTE_INSTR
&&
729 CFG_GET_IQ_INSTR_TYPE(conf6xxx
) != OCTEON_64BYTE_INSTR
) {
730 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid instr type for IQ\n",
734 if (!CFG_GET_OQ_REFILL_THRESHOLD(conf6xxx
)) {
735 dev_err(&oct
->pci_dev
->dev
, "%s: Invalid parameter for OQ\n",
740 if (!(CFG_GET_OQ_INTR_TIME(conf6xxx
))) {
741 dev_err(&oct
->pci_dev
->dev
, "%s: No Time Interrupt for OQ\n",