4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
26 #include <sys/types.h>
27 #include <sys/cmn_err.h>
28 #include <sys/vmsystm.h>
30 #include <sys/machsystm.h> /* lddphys() */
31 #include <sys/iommutsb.h>
33 #include <sys/hotplug/pci/pcie_hp.h>
35 #include "oberon_regs.h"
41 * Registers that need to be saved and restored during suspend/resume.
45 * Registers in the PEC Module.
46 * LPU_RESET should be set to 0ull during resume
48 * This array is in reg,chip form. PX_CHIP_UNIDENTIFIED is for all chips
49 * or PX_CHIP_FIRE for Fire only, or PX_CHIP_OBERON for Oberon only.
51 static struct px_pec_regs
{
54 } pec_config_state_regs
[] = {
55 {PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
56 {ILU_ERROR_LOG_ENABLE
, PX_CHIP_UNIDENTIFIED
},
57 {ILU_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
58 {TLU_CONTROL
, PX_CHIP_UNIDENTIFIED
},
59 {TLU_OTHER_EVENT_LOG_ENABLE
, PX_CHIP_UNIDENTIFIED
},
60 {TLU_OTHER_EVENT_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
61 {TLU_DEVICE_CONTROL
, PX_CHIP_UNIDENTIFIED
},
62 {TLU_LINK_CONTROL
, PX_CHIP_UNIDENTIFIED
},
63 {TLU_UNCORRECTABLE_ERROR_LOG_ENABLE
, PX_CHIP_UNIDENTIFIED
},
64 {TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
65 {TLU_CORRECTABLE_ERROR_LOG_ENABLE
, PX_CHIP_UNIDENTIFIED
},
66 {TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
67 {DLU_LINK_LAYER_CONFIG
, PX_CHIP_OBERON
},
68 {DLU_FLOW_CONTROL_UPDATE_CONTROL
, PX_CHIP_OBERON
},
69 {DLU_TXLINK_REPLAY_TIMER_THRESHOLD
, PX_CHIP_OBERON
},
70 {LPU_LINK_LAYER_INTERRUPT_MASK
, PX_CHIP_FIRE
},
71 {LPU_PHY_INTERRUPT_MASK
, PX_CHIP_FIRE
},
72 {LPU_RECEIVE_PHY_INTERRUPT_MASK
, PX_CHIP_FIRE
},
73 {LPU_TRANSMIT_PHY_INTERRUPT_MASK
, PX_CHIP_FIRE
},
74 {LPU_GIGABLAZE_GLUE_INTERRUPT_MASK
, PX_CHIP_FIRE
},
75 {LPU_LTSSM_INTERRUPT_MASK
, PX_CHIP_FIRE
},
76 {LPU_RESET
, PX_CHIP_FIRE
},
77 {LPU_DEBUG_CONFIG
, PX_CHIP_FIRE
},
78 {LPU_INTERRUPT_MASK
, PX_CHIP_FIRE
},
79 {LPU_LINK_LAYER_CONFIG
, PX_CHIP_FIRE
},
80 {LPU_FLOW_CONTROL_UPDATE_CONTROL
, PX_CHIP_FIRE
},
81 {LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
, PX_CHIP_FIRE
},
82 {LPU_TXLINK_REPLAY_TIMER_THRESHOLD
, PX_CHIP_FIRE
},
83 {LPU_REPLAY_BUFFER_MAX_ADDRESS
, PX_CHIP_FIRE
},
84 {LPU_TXLINK_RETRY_FIFO_POINTER
, PX_CHIP_FIRE
},
85 {LPU_LTSSM_CONFIG2
, PX_CHIP_FIRE
},
86 {LPU_LTSSM_CONFIG3
, PX_CHIP_FIRE
},
87 {LPU_LTSSM_CONFIG4
, PX_CHIP_FIRE
},
88 {LPU_LTSSM_CONFIG5
, PX_CHIP_FIRE
},
89 {DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE
, PX_CHIP_UNIDENTIFIED
},
90 {DMC_DEBUG_SELECT_FOR_PORT_A
, PX_CHIP_UNIDENTIFIED
},
91 {DMC_DEBUG_SELECT_FOR_PORT_B
, PX_CHIP_UNIDENTIFIED
}
95 ((sizeof (pec_config_state_regs))/sizeof (struct px_pec_regs))
97 #define PEC_SIZE (PEC_KEYS * sizeof (uint64_t))
100 * Registers for the MMU module.
101 * MMU_TTE_CACHE_INVALIDATE needs to be cleared. (-1ull)
103 static uint64_t mmu_config_state_regs
[] = {
105 MMU_CONTROL_AND_STATUS
,
106 MMU_ERROR_LOG_ENABLE
,
109 #define MMU_SIZE (sizeof (mmu_config_state_regs))
110 #define MMU_KEYS (MMU_SIZE / sizeof (uint64_t))
113 * Registers for the IB Module
115 static uint64_t ib_config_state_regs
[] = {
116 IMU_ERROR_LOG_ENABLE
,
119 #define IB_SIZE (sizeof (ib_config_state_regs))
120 #define IB_KEYS (IB_SIZE / sizeof (uint64_t))
121 #define IB_MAP_SIZE (INTERRUPT_MAPPING_ENTRIES * sizeof (uint64_t))
124 * Registers for the JBC module.
125 * JBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
127 static uint64_t jbc_config_state_regs
[] = {
129 JBC_FATAL_RESET_ENABLE
,
130 JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE
,
131 JBC_ERROR_LOG_ENABLE
,
134 #define JBC_SIZE (sizeof (jbc_config_state_regs))
135 #define JBC_KEYS (JBC_SIZE / sizeof (uint64_t))
138 * Registers for the UBC module.
139 * UBC_ERROR_STATUS_CLEAR needs to be cleared. (-1ull)
141 static uint64_t ubc_config_state_regs
[] = {
142 UBC_ERROR_LOG_ENABLE
,
145 #define UBC_SIZE (sizeof (ubc_config_state_regs))
146 #define UBC_KEYS (UBC_SIZE / sizeof (uint64_t))
148 static uint64_t msiq_config_other_regs
[] = {
150 ERR_NONFATAL_MAPPING
,
157 #define MSIQ_OTHER_SIZE (sizeof (msiq_config_other_regs))
158 #define MSIQ_OTHER_KEYS (MSIQ_OTHER_SIZE / sizeof (uint64_t))
160 #define MSIQ_STATE_SIZE (EVENT_QUEUE_STATE_ENTRIES * sizeof (uint64_t))
161 #define MSIQ_MAPPING_SIZE (MSI_MAPPING_ENTRIES * sizeof (uint64_t))
163 /* OPL tuning variables for link unstable issue */
164 int wait_perst
= 5000000; /* step 9, default: 5s */
165 int wait_enable_port
= 30000; /* step 11, default: 30ms */
166 int link_retry_count
= 2; /* step 11, default: 2 */
167 int link_status_check
= 400000; /* step 11, default: 400ms */
169 static uint64_t msiq_suspend(devhandle_t dev_hdl
, pxu_t
*pxu_p
);
170 static void msiq_resume(devhandle_t dev_hdl
, pxu_t
*pxu_p
);
171 static void jbc_init(caddr_t xbc_csr_base
, pxu_t
*pxu_p
);
172 static void ubc_init(caddr_t xbc_csr_base
, pxu_t
*pxu_p
);
174 extern int px_acknak_timer_table
[LINK_MAX_PKT_ARR_SIZE
][LINK_WIDTH_ARR_SIZE
];
175 extern int px_replay_timer_table
[LINK_MAX_PKT_ARR_SIZE
][LINK_WIDTH_ARR_SIZE
];
178 * Initialize the bus, but do not enable interrupts.
182 hvio_cb_init(caddr_t xbc_csr_base
, pxu_t
*pxu_p
)
184 switch (PX_CHIP_TYPE(pxu_p
)) {
186 ubc_init(xbc_csr_base
, pxu_p
);
189 jbc_init(xbc_csr_base
, pxu_p
);
192 DBG(DBG_CB
, NULL
, "hvio_cb_init - unknown chip type: 0x%x\n",
193 PX_CHIP_TYPE(pxu_p
));
199 * Initialize the JBC module, but do not enable interrupts.
203 jbc_init(caddr_t xbc_csr_base
, pxu_t
*pxu_p
)
207 /* Check if we need to enable inverted parity */
208 val
= (1ULL << JBUS_PARITY_CONTROL_P_EN
);
209 CSR_XS(xbc_csr_base
, JBUS_PARITY_CONTROL
, val
);
210 DBG(DBG_CB
, NULL
, "jbc_init, JBUS_PARITY_CONTROL: 0x%llx\n",
211 CSR_XR(xbc_csr_base
, JBUS_PARITY_CONTROL
));
213 val
= (1 << JBC_FATAL_RESET_ENABLE_SPARE_P_INT_EN
) |
214 (1 << JBC_FATAL_RESET_ENABLE_MB_PEA_P_INT_EN
) |
215 (1 << JBC_FATAL_RESET_ENABLE_CPE_P_INT_EN
) |
216 (1 << JBC_FATAL_RESET_ENABLE_APE_P_INT_EN
) |
217 (1 << JBC_FATAL_RESET_ENABLE_PIO_CPE_INT_EN
) |
218 (1 << JBC_FATAL_RESET_ENABLE_JTCEEW_P_INT_EN
) |
219 (1 << JBC_FATAL_RESET_ENABLE_JTCEEI_P_INT_EN
) |
220 (1 << JBC_FATAL_RESET_ENABLE_JTCEER_P_INT_EN
);
221 CSR_XS(xbc_csr_base
, JBC_FATAL_RESET_ENABLE
, val
);
222 DBG(DBG_CB
, NULL
, "jbc_init, JBC_FATAL_RESET_ENABLE: 0x%llx\n",
223 CSR_XR(xbc_csr_base
, JBC_FATAL_RESET_ENABLE
));
226 * Enable merge, jbc and dmc interrupts.
228 CSR_XS(xbc_csr_base
, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE
, -1ull);
230 "jbc_init, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
231 CSR_XR(xbc_csr_base
, JBC_CORE_AND_BLOCK_INTERRUPT_ENABLE
));
234 * CSR_V JBC's interrupt regs (log, enable, status, clear)
236 DBG(DBG_CB
, NULL
, "jbc_init, JBC_ERROR_LOG_ENABLE: 0x%llx\n",
237 CSR_XR(xbc_csr_base
, JBC_ERROR_LOG_ENABLE
));
239 DBG(DBG_CB
, NULL
, "jbc_init, JBC_INTERRUPT_ENABLE: 0x%llx\n",
240 CSR_XR(xbc_csr_base
, JBC_INTERRUPT_ENABLE
));
242 DBG(DBG_CB
, NULL
, "jbc_init, JBC_INTERRUPT_STATUS: 0x%llx\n",
243 CSR_XR(xbc_csr_base
, JBC_INTERRUPT_STATUS
));
245 DBG(DBG_CB
, NULL
, "jbc_init, JBC_ERROR_STATUS_CLEAR: 0x%llx\n",
246 CSR_XR(xbc_csr_base
, JBC_ERROR_STATUS_CLEAR
));
250 * Initialize the UBC module, but do not enable interrupts.
254 ubc_init(caddr_t xbc_csr_base
, pxu_t
*pxu_p
)
257 * Enable Uranus bus error log bits.
259 CSR_XS(xbc_csr_base
, UBC_ERROR_LOG_ENABLE
, -1ull);
260 DBG(DBG_CB
, NULL
, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
261 CSR_XR(xbc_csr_base
, UBC_ERROR_LOG_ENABLE
));
264 * Clear Uranus bus errors.
266 CSR_XS(xbc_csr_base
, UBC_ERROR_STATUS_CLEAR
, -1ull);
267 DBG(DBG_CB
, NULL
, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
268 CSR_XR(xbc_csr_base
, UBC_ERROR_STATUS_CLEAR
));
271 * CSR_V UBC's interrupt regs (log, enable, status, clear)
273 DBG(DBG_CB
, NULL
, "ubc_init, UBC_ERROR_LOG_ENABLE: 0x%llx\n",
274 CSR_XR(xbc_csr_base
, UBC_ERROR_LOG_ENABLE
));
276 DBG(DBG_CB
, NULL
, "ubc_init, UBC_INTERRUPT_ENABLE: 0x%llx\n",
277 CSR_XR(xbc_csr_base
, UBC_INTERRUPT_ENABLE
));
279 DBG(DBG_CB
, NULL
, "ubc_init, UBC_INTERRUPT_STATUS: 0x%llx\n",
280 CSR_XR(xbc_csr_base
, UBC_INTERRUPT_STATUS
));
282 DBG(DBG_CB
, NULL
, "ubc_init, UBC_ERROR_STATUS_CLEAR: 0x%llx\n",
283 CSR_XR(xbc_csr_base
, UBC_ERROR_STATUS_CLEAR
));
287 * Initialize the module, but do not enable interrupts.
291 hvio_ib_init(caddr_t csr_base
, pxu_t
*pxu_p
)
294 * CSR_V IB's interrupt regs (log, enable, status, clear)
296 DBG(DBG_IB
, NULL
, "hvio_ib_init - IMU_ERROR_LOG_ENABLE: 0x%llx\n",
297 CSR_XR(csr_base
, IMU_ERROR_LOG_ENABLE
));
299 DBG(DBG_IB
, NULL
, "hvio_ib_init - IMU_INTERRUPT_ENABLE: 0x%llx\n",
300 CSR_XR(csr_base
, IMU_INTERRUPT_ENABLE
));
302 DBG(DBG_IB
, NULL
, "hvio_ib_init - IMU_INTERRUPT_STATUS: 0x%llx\n",
303 CSR_XR(csr_base
, IMU_INTERRUPT_STATUS
));
305 DBG(DBG_IB
, NULL
, "hvio_ib_init - IMU_ERROR_STATUS_CLEAR: 0x%llx\n",
306 CSR_XR(csr_base
, IMU_ERROR_STATUS_CLEAR
));
310 * Initialize the module, but do not enable interrupts.
314 ilu_init(caddr_t csr_base
, pxu_t
*pxu_p
)
317 * CSR_V ILU's interrupt regs (log, enable, status, clear)
319 DBG(DBG_ILU
, NULL
, "ilu_init - ILU_ERROR_LOG_ENABLE: 0x%llx\n",
320 CSR_XR(csr_base
, ILU_ERROR_LOG_ENABLE
));
322 DBG(DBG_ILU
, NULL
, "ilu_init - ILU_INTERRUPT_ENABLE: 0x%llx\n",
323 CSR_XR(csr_base
, ILU_INTERRUPT_ENABLE
));
325 DBG(DBG_ILU
, NULL
, "ilu_init - ILU_INTERRUPT_STATUS: 0x%llx\n",
326 CSR_XR(csr_base
, ILU_INTERRUPT_STATUS
));
328 DBG(DBG_ILU
, NULL
, "ilu_init - ILU_ERROR_STATUS_CLEAR: 0x%llx\n",
329 CSR_XR(csr_base
, ILU_ERROR_STATUS_CLEAR
));
333 * Initialize the module, but do not enable interrupts.
337 tlu_init(caddr_t csr_base
, pxu_t
*pxu_p
)
342 * CSR_V TLU_CONTROL Expect OBP ???
346 * L0s entry default timer value - 7.0 us
347 * Completion timeout select default value - 67.1 ms and
348 * OBP will set this value.
350 * Configuration - Bit 0 should always be 0 for upstream port.
351 * Bit 1 is clock - how is this related to the clock bit in TLU
352 * Link Control register? Both are hardware dependent and likely
355 * NOTE: Do not set the NPWR_EN bit. The desired value of this bit
356 * will be set by OBP.
358 val
= CSR_XR(csr_base
, TLU_CONTROL
);
359 val
|= (TLU_CONTROL_L0S_TIM_DEFAULT
<< TLU_CONTROL_L0S_TIM
) |
360 TLU_CONTROL_CONFIG_DEFAULT
;
363 * For Oberon, NPWR_EN is set to 0 to prevent PIO reads from blocking
364 * behind non-posted PIO writes. This blocking could cause a master or
365 * slave timeout on the host bus if multiple serialized PIOs were to
366 * suffer Completion Timeouts because the CTO delays for each PIO ahead
367 * of the read would accumulate. Since the Olympus processor can have
368 * only 1 PIO outstanding, there is no possibility of PIO accesses from
369 * a given CPU to a given device being re-ordered by the PCIe fabric;
370 * therefore turning off serialization should be safe from a PCIe
371 * ordering perspective.
373 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
)
374 val
&= ~(1ull << TLU_CONTROL_NPWR_EN
);
377 * Set Detect.Quiet. This will disable automatic link
378 * re-training, if the link goes down e.g. power management
379 * turns off power to the downstream device. This will enable
380 * Fire to go to Drain state, after link down. The drain state
381 * forces a reset to the FC state machine, which is required for
382 * proper link re-training.
384 val
|= (1ull << TLU_REMAIN_DETECT_QUIET
);
385 CSR_XS(csr_base
, TLU_CONTROL
, val
);
386 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_CONTROL: 0x%llx\n",
387 CSR_XR(csr_base
, TLU_CONTROL
));
390 * CSR_V TLU_STATUS Expect HW 0x4
394 * Only bit [7:0] are currently defined. Bits [2:0]
395 * are the state, which should likely be in state active,
396 * 100b. Bit three is 'recovery', which is not understood.
397 * All other bits are reserved.
399 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_STATUS: 0x%llx\n",
400 CSR_XR(csr_base
, TLU_STATUS
));
403 * CSR_V TLU_PME_TURN_OFF_GENERATE Expect HW 0x0
405 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_PME_TURN_OFF_GENERATE: 0x%llx\n",
406 CSR_XR(csr_base
, TLU_PME_TURN_OFF_GENERATE
));
409 * CSR_V TLU_INGRESS_CREDITS_INITIAL Expect HW 0x10000200C0
413 * Ingress credits initial register. Bits [39:32] should be
414 * 0x10, bits [19:12] should be 0x20, and bits [11:0] should
415 * be 0xC0. These are the reset values, and should be set by
418 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_INGRESS_CREDITS_INITIAL: 0x%llx\n",
419 CSR_XR(csr_base
, TLU_INGRESS_CREDITS_INITIAL
));
422 * CSR_V TLU_DIAGNOSTIC Expect HW 0x0
426 * Diagnostic register - always zero unless we are debugging.
428 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DIAGNOSTIC: 0x%llx\n",
429 CSR_XR(csr_base
, TLU_DIAGNOSTIC
));
432 * CSR_V TLU_EGRESS_CREDITS_CONSUMED Expect HW 0x0
434 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_EGRESS_CREDITS_CONSUMED: 0x%llx\n",
435 CSR_XR(csr_base
, TLU_EGRESS_CREDITS_CONSUMED
));
438 * CSR_V TLU_EGRESS_CREDIT_LIMIT Expect HW 0x0
440 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_EGRESS_CREDIT_LIMIT: 0x%llx\n",
441 CSR_XR(csr_base
, TLU_EGRESS_CREDIT_LIMIT
));
444 * CSR_V TLU_EGRESS_RETRY_BUFFER Expect HW 0x0
446 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_EGRESS_RETRY_BUFFER: 0x%llx\n",
447 CSR_XR(csr_base
, TLU_EGRESS_RETRY_BUFFER
));
450 * CSR_V TLU_INGRESS_CREDITS_ALLOCATED Expected HW 0x0
453 "tlu_init - TLU_INGRESS_CREDITS_ALLOCATED: 0x%llx\n",
454 CSR_XR(csr_base
, TLU_INGRESS_CREDITS_ALLOCATED
));
457 * CSR_V TLU_INGRESS_CREDITS_RECEIVED Expected HW 0x0
460 "tlu_init - TLU_INGRESS_CREDITS_RECEIVED: 0x%llx\n",
461 CSR_XR(csr_base
, TLU_INGRESS_CREDITS_RECEIVED
));
464 * CSR_V TLU's interrupt regs (log, enable, status, clear)
467 "tlu_init - TLU_OTHER_EVENT_LOG_ENABLE: 0x%llx\n",
468 CSR_XR(csr_base
, TLU_OTHER_EVENT_LOG_ENABLE
));
471 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_ENABLE: 0x%llx\n",
472 CSR_XR(csr_base
, TLU_OTHER_EVENT_INTERRUPT_ENABLE
));
475 "tlu_init - TLU_OTHER_EVENT_INTERRUPT_STATUS: 0x%llx\n",
476 CSR_XR(csr_base
, TLU_OTHER_EVENT_INTERRUPT_STATUS
));
479 "tlu_init - TLU_OTHER_EVENT_STATUS_CLEAR: 0x%llx\n",
480 CSR_XR(csr_base
, TLU_OTHER_EVENT_STATUS_CLEAR
));
483 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
486 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
487 CSR_XR(csr_base
, TLU_RECEIVE_OTHER_EVENT_HEADER1_LOG
));
490 * CSR_V TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
493 "tlu_init - TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
494 CSR_XR(csr_base
, TLU_RECEIVE_OTHER_EVENT_HEADER2_LOG
));
497 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG Expect HW 0x0
500 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG: 0x%llx\n",
501 CSR_XR(csr_base
, TLU_TRANSMIT_OTHER_EVENT_HEADER1_LOG
));
504 * CSR_V TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG Expect HW 0x0
507 "tlu_init - TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG: 0x%llx\n",
508 CSR_XR(csr_base
, TLU_TRANSMIT_OTHER_EVENT_HEADER2_LOG
));
511 * CSR_V TLU_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
514 "tlu_init - TLU_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
515 CSR_XR(csr_base
, TLU_PERFORMANCE_COUNTER_SELECT
));
518 * CSR_V TLU_PERFORMANCE_COUNTER_ZERO Expect HW 0x0
521 "tlu_init - TLU_PERFORMANCE_COUNTER_ZERO: 0x%llx\n",
522 CSR_XR(csr_base
, TLU_PERFORMANCE_COUNTER_ZERO
));
525 * CSR_V TLU_PERFORMANCE_COUNTER_ONE Expect HW 0x0
527 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_PERFORMANCE_COUNTER_ONE: 0x%llx\n",
528 CSR_XR(csr_base
, TLU_PERFORMANCE_COUNTER_ONE
));
531 * CSR_V TLU_PERFORMANCE_COUNTER_TWO Expect HW 0x0
533 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_PERFORMANCE_COUNTER_TWO: 0x%llx\n",
534 CSR_XR(csr_base
, TLU_PERFORMANCE_COUNTER_TWO
));
537 * CSR_V TLU_DEBUG_SELECT_A Expect HW 0x0
540 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DEBUG_SELECT_A: 0x%llx\n",
541 CSR_XR(csr_base
, TLU_DEBUG_SELECT_A
));
544 * CSR_V TLU_DEBUG_SELECT_B Expect HW 0x0
546 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DEBUG_SELECT_B: 0x%llx\n",
547 CSR_XR(csr_base
, TLU_DEBUG_SELECT_B
));
550 * CSR_V TLU_DEVICE_CAPABILITIES Expect HW 0xFC2
552 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DEVICE_CAPABILITIES: 0x%llx\n",
553 CSR_XR(csr_base
, TLU_DEVICE_CAPABILITIES
));
556 * CSR_V TLU_DEVICE_CONTROL Expect HW 0x0
560 * Bits [14:12] are the Max Read Request Size, which is always 64
561 * bytes which is 000b. Bits [7:5] are Max Payload Size, which
562 * start at 128 bytes which is 000b. This may be revisited if
563 * init_child finds greater values.
566 CSR_XS(csr_base
, TLU_DEVICE_CONTROL
, val
);
567 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DEVICE_CONTROL: 0x%llx\n",
568 CSR_XR(csr_base
, TLU_DEVICE_CONTROL
));
571 * CSR_V TLU_DEVICE_STATUS Expect HW 0x0
573 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_DEVICE_STATUS: 0x%llx\n",
574 CSR_XR(csr_base
, TLU_DEVICE_STATUS
));
577 * CSR_V TLU_LINK_CAPABILITIES Expect HW 0x15C81
579 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_LINK_CAPABILITIES: 0x%llx\n",
580 CSR_XR(csr_base
, TLU_LINK_CAPABILITIES
));
583 * CSR_V TLU_LINK_CONTROL Expect OBP 0x40
587 * The CLOCK bit should be set by OBP if the hardware dictates,
588 * and if it is set then ASPM should be used since then L0s exit
589 * latency should be lower than L1 exit latency.
591 * Note that we will not enable power management during bringup
592 * since it has not been test and is creating some problems in
595 val
= (1ull << TLU_LINK_CONTROL_CLOCK
);
597 CSR_XS(csr_base
, TLU_LINK_CONTROL
, val
);
598 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_LINK_CONTROL: 0x%llx\n",
599 CSR_XR(csr_base
, TLU_LINK_CONTROL
));
602 * CSR_V TLU_LINK_STATUS Expect OBP 0x1011
606 * Not sure if HW or OBP will be setting this read only
607 * register. Bit 12 is Clock, and it should always be 1
608 * signifying that the component uses the same physical
609 * clock as the platform. Bits [9:4] are for the width,
610 * with the expected value above signifying a x1 width.
611 * Bits [3:0] are the speed, with 1b signifying 2.5 Gb/s,
612 * the only speed as yet supported by the PCI-E spec.
614 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_LINK_STATUS: 0x%llx\n",
615 CSR_XR(csr_base
, TLU_LINK_STATUS
));
618 * CSR_V TLU_SLOT_CAPABILITIES Expect OBP ???
622 * Power Limits for the slots. Will be platform
623 * dependent, and OBP will need to set after consulting
626 * Bits [16:15] are power limit scale, which most likely
627 * will be 0b signifying 1x. Bits [14:7] are the Set
628 * Power Limit Value, which is a number which is multiplied
629 * by the power limit scale to get the actual power limit.
631 DBG(DBG_TLU
, NULL
, "tlu_init - TLU_SLOT_CAPABILITIES: 0x%llx\n",
632 CSR_XR(csr_base
, TLU_SLOT_CAPABILITIES
));
635 * CSR_V TLU_UNCORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x17F011
638 "tlu_init - TLU_UNCORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
639 CSR_XR(csr_base
, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE
));
642 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE Expect
643 * Kernel 0x17F0110017F011
646 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
647 CSR_XR(csr_base
, TLU_UNCORRECTABLE_ERROR_INTERRUPT_ENABLE
));
650 * CSR_V TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
653 "tlu_init - TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
654 CSR_XR(csr_base
, TLU_UNCORRECTABLE_ERROR_INTERRUPT_STATUS
));
657 * CSR_V TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
660 "tlu_init - TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
661 CSR_XR(csr_base
, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR
));
664 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
667 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
668 CSR_XR(csr_base
, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER1_LOG
));
671 * CSR_V TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
674 "tlu_init - TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
675 CSR_XR(csr_base
, TLU_RECEIVE_UNCORRECTABLE_ERROR_HEADER2_LOG
));
678 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG HW 0x0
681 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG: 0x%llx\n",
682 CSR_XR(csr_base
, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER1_LOG
));
685 * CSR_V TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG HW 0x0
688 "tlu_init - TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG: 0x%llx\n",
689 CSR_XR(csr_base
, TLU_TRANSMIT_UNCORRECTABLE_ERROR_HEADER2_LOG
));
693 * CSR_V TLU's CE interrupt regs (log, enable, status, clear)
698 * CSR_V TLU_CORRECTABLE_ERROR_LOG_ENABLE Expect Kernel 0x11C1
701 "tlu_init - TLU_CORRECTABLE_ERROR_LOG_ENABLE: 0x%llx\n",
702 CSR_XR(csr_base
, TLU_CORRECTABLE_ERROR_LOG_ENABLE
));
705 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE Kernel 0x11C1000011C1
708 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE: 0x%llx\n",
709 CSR_XR(csr_base
, TLU_CORRECTABLE_ERROR_INTERRUPT_ENABLE
));
712 * CSR_V TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS Expect HW 0x0
715 "tlu_init - TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS: 0x%llx\n",
716 CSR_XR(csr_base
, TLU_CORRECTABLE_ERROR_INTERRUPT_STATUS
));
719 * CSR_V TLU_CORRECTABLE_ERROR_STATUS_CLEAR Expect HW 0x0
722 "tlu_init - TLU_CORRECTABLE_ERROR_STATUS_CLEAR: 0x%llx\n",
723 CSR_XR(csr_base
, TLU_CORRECTABLE_ERROR_STATUS_CLEAR
));
728 lpu_init(caddr_t csr_base
, pxu_t
*pxu_p
)
730 /* Variables used to set the ACKNAK Latency Timer and Replay Timer */
731 int link_width
, max_payload
;
736 * Get the Link Width. See table above LINK_WIDTH_ARR_SIZE #define
737 * Only Link Widths of x1, x4, and x8 are supported.
738 * If any width is reported other than x8, set default to x8.
740 link_width
= CSR_FR(csr_base
, TLU_LINK_STATUS
, WIDTH
);
741 DBG(DBG_LPU
, NULL
, "lpu_init - Link Width: x%d\n", link_width
);
744 * Convert link_width to match timer array configuration.
746 switch (link_width
) {
764 * Get the Max Payload Size.
765 * See table above LINK_MAX_PKT_ARR_SIZE #define
767 max_payload
= ((CSR_FR(csr_base
, TLU_CONTROL
, CONFIG
) &
768 TLU_CONTROL_MPS_MASK
) >> TLU_CONTROL_MPS_SHIFT
);
770 DBG(DBG_LPU
, NULL
, "lpu_init - May Payload: %d\n",
771 (0x80 << max_payload
));
773 /* Make sure the packet size is not greater than 4096 */
774 max_payload
= (max_payload
>= LINK_MAX_PKT_ARR_SIZE
) ?
775 (LINK_MAX_PKT_ARR_SIZE
- 1) : max_payload
;
778 * CSR_V LPU_ID Expect HW 0x0
782 * This register has link id, phy id and gigablaze id.
783 * Should be set by HW.
785 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_ID: 0x%llx\n",
786 CSR_XR(csr_base
, LPU_ID
));
789 * CSR_V LPU_RESET Expect Kernel 0x0
793 * No reason to have any reset bits high until an error is
794 * detected on the link.
797 CSR_XS(csr_base
, LPU_RESET
, val
);
798 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RESET: 0x%llx\n",
799 CSR_XR(csr_base
, LPU_RESET
));
802 * CSR_V LPU_DEBUG_STATUS Expect HW 0x0
806 * Bits [15:8] are Debug B, and bit [7:0] are Debug A.
807 * They are read-only. What do the 8 bits mean, and
808 * how do they get set if they are read only?
810 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_DEBUG_STATUS: 0x%llx\n",
811 CSR_XR(csr_base
, LPU_DEBUG_STATUS
));
814 * CSR_V LPU_DEBUG_CONFIG Expect Kernel 0x0
816 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_DEBUG_CONFIG: 0x%llx\n",
817 CSR_XR(csr_base
, LPU_DEBUG_CONFIG
));
820 * CSR_V LPU_LTSSM_CONTROL Expect HW 0x0
822 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONTROL: 0x%llx\n",
823 CSR_XR(csr_base
, LPU_LTSSM_CONTROL
));
826 * CSR_V LPU_LINK_STATUS Expect HW 0x101
830 * This register has bits [9:4] for link width, and the
831 * default 0x10, means a width of x16. The problem is
832 * this width is not supported according to the TLU
833 * link status register.
835 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LINK_STATUS: 0x%llx\n",
836 CSR_XR(csr_base
, LPU_LINK_STATUS
));
839 * CSR_V LPU_INTERRUPT_STATUS Expect HW 0x0
841 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_INTERRUPT_STATUS: 0x%llx\n",
842 CSR_XR(csr_base
, LPU_INTERRUPT_STATUS
));
845 * CSR_V LPU_INTERRUPT_MASK Expect HW 0x0
847 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_INTERRUPT_MASK: 0x%llx\n",
848 CSR_XR(csr_base
, LPU_INTERRUPT_MASK
));
851 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_SELECT Expect HW 0x0
854 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_SELECT: 0x%llx\n",
855 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER_SELECT
));
858 * CSR_V LPU_LINK_PERFORMANCE_COUNTER_CONTROL Expect HW 0x0
861 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER_CONTROL: 0x%llx\n",
862 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER_CONTROL
));
865 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1 Expect HW 0x0
868 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1: 0x%llx\n",
869 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER1
));
872 * CSR_V LPU_LINK_PERFORMANCE_COUNTER1_TEST Expect HW 0x0
875 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER1_TEST: 0x%llx\n",
876 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER1_TEST
));
879 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2 Expect HW 0x0
882 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2: 0x%llx\n",
883 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER2
));
886 * CSR_V LPU_LINK_PERFORMANCE_COUNTER2_TEST Expect HW 0x0
889 "lpu_init - LPU_LINK_PERFORMANCE_COUNTER2_TEST: 0x%llx\n",
890 CSR_XR(csr_base
, LPU_LINK_PERFORMANCE_COUNTER2_TEST
));
893 * CSR_V LPU_LINK_LAYER_CONFIG Expect HW 0x100
897 * This is another place where Max Payload can be set,
898 * this time for the link layer. It will be set to
899 * 128B, which is the default, but this will need to
902 val
= (1ull << LPU_LINK_LAYER_CONFIG_VC0_EN
);
903 CSR_XS(csr_base
, LPU_LINK_LAYER_CONFIG
, val
);
904 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LINK_LAYER_CONFIG: 0x%llx\n",
905 CSR_XR(csr_base
, LPU_LINK_LAYER_CONFIG
));
908 * CSR_V LPU_LINK_LAYER_STATUS Expect OBP 0x5
912 * Another R/W status register. Bit 3, DL up Status, will
913 * be set high. The link state machine status bits [2:0]
914 * are set to 0x1, but the status bits are not defined in the
915 * PRM. What does 0x1 mean, what others values are possible
916 * and what are thier meanings?
918 * This register has been giving us problems in simulation.
919 * It has been mentioned that software should not program
920 * any registers with WE bits except during debug. So
921 * this register will no longer be programmed.
924 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LINK_LAYER_STATUS: 0x%llx\n",
925 CSR_XR(csr_base
, LPU_LINK_LAYER_STATUS
));
928 * CSR_V LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
931 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
932 CSR_XR(csr_base
, LPU_LINK_LAYER_INTERRUPT_AND_STATUS_TEST
));
935 * CSR_V LPU Link Layer interrupt regs (mask, status)
938 "lpu_init - LPU_LINK_LAYER_INTERRUPT_MASK: 0x%llx\n",
939 CSR_XR(csr_base
, LPU_LINK_LAYER_INTERRUPT_MASK
));
942 "lpu_init - LPU_LINK_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
943 CSR_XR(csr_base
, LPU_LINK_LAYER_INTERRUPT_AND_STATUS
));
946 * CSR_V LPU_FLOW_CONTROL_UPDATE_CONTROL Expect OBP 0x7
950 * The PRM says that only the first two bits will be set
951 * high by default, which will enable flow control for
952 * posted and non-posted updates, but NOT completetion
955 val
= (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN
) |
956 (1ull << LPU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN
);
957 CSR_XS(csr_base
, LPU_FLOW_CONTROL_UPDATE_CONTROL
, val
);
959 "lpu_init - LPU_FLOW_CONTROL_UPDATE_CONTROL: 0x%llx\n",
960 CSR_XR(csr_base
, LPU_FLOW_CONTROL_UPDATE_CONTROL
));
963 * CSR_V LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
968 * This should be set by OBP. We'll check to make sure.
970 DBG(DBG_LPU
, NULL
, "lpu_init - "
971 "LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE: 0x%llx\n",
973 LPU_LINK_LAYER_FLOW_CONTROL_UPDATE_TIMEOUT_VALUE
));
976 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0 Expect OBP ???
980 * This register has Flow Control Update Timer values for
981 * non-posted and posted requests, bits [30:16] and bits
982 * [14:0], respectively. These are read-only to SW so
983 * either HW or OBP needs to set them.
985 DBG(DBG_LPU
, NULL
, "lpu_init - "
986 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0: 0x%llx\n",
988 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER0
));
991 * CSR_V LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1 Expect OBP ???
995 * Same as timer0 register above, except for bits [14:0]
996 * have the timer values for completetions. Read-only to
997 * SW; OBP or HW need to set it.
999 DBG(DBG_LPU
, NULL
, "lpu_init - "
1000 "LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1: 0x%llx\n",
1002 LPU_LINK_LAYER_VC0_FLOW_CONTROL_UPDATE_TIMER1
));
1005 * CSR_V LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
1007 val
= px_acknak_timer_table
[max_payload
][link_width
];
1008 CSR_XS(csr_base
, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
, val
);
1010 DBG(DBG_LPU
, NULL
, "lpu_init - "
1011 "LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD: 0x%llx\n",
1012 CSR_XR(csr_base
, LPU_TXLINK_FREQUENT_NAK_LATENCY_TIMER_THRESHOLD
));
1015 * CSR_V LPU_TXLINK_ACKNAK_LATENCY_TIMER Expect HW 0x0
1018 "lpu_init - LPU_TXLINK_ACKNAK_LATENCY_TIMER: 0x%llx\n",
1019 CSR_XR(csr_base
, LPU_TXLINK_ACKNAK_LATENCY_TIMER
));
1022 * CSR_V LPU_TXLINK_REPLAY_TIMER_THRESHOLD
1024 val
= px_replay_timer_table
[max_payload
][link_width
];
1025 CSR_XS(csr_base
, LPU_TXLINK_REPLAY_TIMER_THRESHOLD
, val
);
1028 "lpu_init - LPU_TXLINK_REPLAY_TIMER_THRESHOLD: 0x%llx\n",
1029 CSR_XR(csr_base
, LPU_TXLINK_REPLAY_TIMER_THRESHOLD
));
1032 * CSR_V LPU_TXLINK_REPLAY_TIMER Expect HW 0x0
1034 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TXLINK_REPLAY_TIMER: 0x%llx\n",
1035 CSR_XR(csr_base
, LPU_TXLINK_REPLAY_TIMER
));
1038 * CSR_V LPU_TXLINK_REPLAY_NUMBER_STATUS Expect OBP 0x3
1041 "lpu_init - LPU_TXLINK_REPLAY_NUMBER_STATUS: 0x%llx\n",
1042 CSR_XR(csr_base
, LPU_TXLINK_REPLAY_NUMBER_STATUS
));
1045 * CSR_V LPU_REPLAY_BUFFER_MAX_ADDRESS Expect OBP 0xB3F
1048 "lpu_init - LPU_REPLAY_BUFFER_MAX_ADDRESS: 0x%llx\n",
1049 CSR_XR(csr_base
, LPU_REPLAY_BUFFER_MAX_ADDRESS
));
1052 * CSR_V LPU_TXLINK_RETRY_FIFO_POINTER Expect OBP 0xFFFF0000
1054 val
= ((LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR_DEFAULT
<<
1055 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_TLPTR
) |
1056 (LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR_DEFAULT
<<
1057 LPU_TXLINK_RETRY_FIFO_POINTER_RTRY_FIFO_HDPTR
));
1059 CSR_XS(csr_base
, LPU_TXLINK_RETRY_FIFO_POINTER
, val
);
1061 "lpu_init - LPU_TXLINK_RETRY_FIFO_POINTER: 0x%llx\n",
1062 CSR_XR(csr_base
, LPU_TXLINK_RETRY_FIFO_POINTER
));
1065 * CSR_V LPU_TXLINK_RETRY_FIFO_R_W_POINTER Expect OBP 0x0
1068 "lpu_init - LPU_TXLINK_RETRY_FIFO_R_W_POINTER: 0x%llx\n",
1069 CSR_XR(csr_base
, LPU_TXLINK_RETRY_FIFO_R_W_POINTER
));
1072 * CSR_V LPU_TXLINK_RETRY_FIFO_CREDIT Expect HW 0x1580
1075 "lpu_init - LPU_TXLINK_RETRY_FIFO_CREDIT: 0x%llx\n",
1076 CSR_XR(csr_base
, LPU_TXLINK_RETRY_FIFO_CREDIT
));
1079 * CSR_V LPU_TXLINK_SEQUENCE_COUNTER Expect OBP 0xFFF0000
1081 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TXLINK_SEQUENCE_COUNTER: 0x%llx\n",
1082 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_COUNTER
));
1085 * CSR_V LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER Expect HW 0xFFF
1088 "lpu_init - LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER: 0x%llx\n",
1089 CSR_XR(csr_base
, LPU_TXLINK_ACK_SENT_SEQUENCE_NUMBER
));
1092 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR Expect OBP 0x157
1096 * Test only register. Will not be programmed.
1099 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR: 0x%llx\n",
1100 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_COUNT_FIFO_MAX_ADDR
));
1103 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS Expect HW 0xFFF0000
1107 * Test only register. Will not be programmed.
1110 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS: 0x%llx\n",
1111 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_COUNT_FIFO_POINTERS
));
1114 * CSR_V LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS Expect HW 0x0
1117 "lpu_init - LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS: 0x%llx\n",
1118 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_COUNT_R_W_POINTERS
));
1121 * CSR_V LPU_TXLINK_TEST_CONTROL Expect HW 0x0
1123 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TXLINK_TEST_CONTROL: 0x%llx\n",
1124 CSR_XR(csr_base
, LPU_TXLINK_TEST_CONTROL
));
1127 * CSR_V LPU_TXLINK_MEMORY_ADDRESS_CONTROL Expect HW 0x0
1131 * Test only register. Will not be programmed.
1134 "lpu_init - LPU_TXLINK_MEMORY_ADDRESS_CONTROL: 0x%llx\n",
1135 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_ADDRESS_CONTROL
));
1138 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD0 Expect HW 0x0
1141 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD0: 0x%llx\n",
1142 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_DATA_LOAD0
));
1145 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD1 Expect HW 0x0
1148 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD1: 0x%llx\n",
1149 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_DATA_LOAD1
));
1152 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD2 Expect HW 0x0
1155 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD2: 0x%llx\n",
1156 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_DATA_LOAD2
));
1159 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD3 Expect HW 0x0
1162 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD3: 0x%llx\n",
1163 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_DATA_LOAD3
));
1166 * CSR_V LPU_TXLINK_MEMORY_DATA_LOAD4 Expect HW 0x0
1169 "lpu_init - LPU_TXLINK_MEMORY_DATA_LOAD4: 0x%llx\n",
1170 CSR_XR(csr_base
, LPU_TXLINK_MEMORY_DATA_LOAD4
));
1173 * CSR_V LPU_TXLINK_RETRY_DATA_COUNT Expect HW 0x0
1177 * Test only register. Will not be programmed.
1179 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TXLINK_RETRY_DATA_COUNT: 0x%llx\n",
1180 CSR_XR(csr_base
, LPU_TXLINK_RETRY_DATA_COUNT
));
1183 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_COUNT Expect HW 0x0
1187 * Test only register. Will not be programmed.
1190 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_COUNT: 0x%llx\n",
1191 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_BUFFER_COUNT
));
1194 * CSR_V LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA Expect HW 0x0
1198 * Test only register.
1201 "lpu_init - LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA: 0x%llx\n",
1202 CSR_XR(csr_base
, LPU_TXLINK_SEQUENCE_BUFFER_BOTTOM_DATA
));
1205 * CSR_V LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER Expect HW 0x0
1207 DBG(DBG_LPU
, NULL
, "lpu_init - "
1208 "LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER: 0x%llx\n",
1209 CSR_XR(csr_base
, LPU_RXLINK_NEXT_RECEIVE_SEQUENCE_1_COUNTER
));
1212 * CSR_V LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED Expect HW 0x0
1216 * test only register.
1219 "lpu_init - LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED: 0x%llx\n",
1220 CSR_XR(csr_base
, LPU_RXLINK_UNSUPPORTED_DLLP_RECEIVED
));
1223 * CSR_V LPU_RXLINK_TEST_CONTROL Expect HW 0x0
1227 * test only register.
1229 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RXLINK_TEST_CONTROL: 0x%llx\n",
1230 CSR_XR(csr_base
, LPU_RXLINK_TEST_CONTROL
));
1233 * CSR_V LPU_PHYSICAL_LAYER_CONFIGURATION Expect HW 0x10
1236 "lpu_init - LPU_PHYSICAL_LAYER_CONFIGURATION: 0x%llx\n",
1237 CSR_XR(csr_base
, LPU_PHYSICAL_LAYER_CONFIGURATION
));
1240 * CSR_V LPU_PHY_LAYER_STATUS Expect HW 0x0
1242 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_PHY_LAYER_STATUS: 0x%llx\n",
1243 CSR_XR(csr_base
, LPU_PHY_LAYER_STATUS
));
1246 * CSR_V LPU_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1249 "lpu_init - LPU_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1250 CSR_XR(csr_base
, LPU_PHY_INTERRUPT_AND_STATUS_TEST
));
1253 * CSR_V LPU PHY LAYER interrupt regs (mask, status)
1255 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_PHY_INTERRUPT_MASK: 0x%llx\n",
1256 CSR_XR(csr_base
, LPU_PHY_INTERRUPT_MASK
));
1259 "lpu_init - LPU_PHY_LAYER_INTERRUPT_AND_STATUS: 0x%llx\n",
1260 CSR_XR(csr_base
, LPU_PHY_LAYER_INTERRUPT_AND_STATUS
));
1263 * CSR_V LPU_RECEIVE_PHY_CONFIG Expect HW 0x0
1267 * This also needs some explanation. What is the best value
1268 * for the water mark? Test mode enables which test mode?
1269 * Programming model needed for the Receiver Reset Lane N
1272 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RECEIVE_PHY_CONFIG: 0x%llx\n",
1273 CSR_XR(csr_base
, LPU_RECEIVE_PHY_CONFIG
));
1276 * CSR_V LPU_RECEIVE_PHY_STATUS1 Expect HW 0x0
1278 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RECEIVE_PHY_STATUS1: 0x%llx\n",
1279 CSR_XR(csr_base
, LPU_RECEIVE_PHY_STATUS1
));
1282 * CSR_V LPU_RECEIVE_PHY_STATUS2 Expect HW 0x0
1284 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RECEIVE_PHY_STATUS2: 0x%llx\n",
1285 CSR_XR(csr_base
, LPU_RECEIVE_PHY_STATUS2
));
1288 * CSR_V LPU_RECEIVE_PHY_STATUS3 Expect HW 0x0
1290 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_RECEIVE_PHY_STATUS3: 0x%llx\n",
1291 CSR_XR(csr_base
, LPU_RECEIVE_PHY_STATUS3
));
1294 * CSR_V LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1297 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1298 CSR_XR(csr_base
, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS_TEST
));
1301 * CSR_V LPU RX LAYER interrupt regs (mask, status)
1304 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_MASK: 0x%llx\n",
1305 CSR_XR(csr_base
, LPU_RECEIVE_PHY_INTERRUPT_MASK
));
1308 "lpu_init - LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1309 CSR_XR(csr_base
, LPU_RECEIVE_PHY_INTERRUPT_AND_STATUS
));
1312 * CSR_V LPU_TRANSMIT_PHY_CONFIG Expect HW 0x0
1314 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TRANSMIT_PHY_CONFIG: 0x%llx\n",
1315 CSR_XR(csr_base
, LPU_TRANSMIT_PHY_CONFIG
));
1318 * CSR_V LPU_TRANSMIT_PHY_STATUS Expect HW 0x0
1320 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TRANSMIT_PHY_STATUS: 0x%llx\n",
1321 CSR_XR(csr_base
, LPU_TRANSMIT_PHY_STATUS
));
1324 * CSR_V LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1327 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1329 LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS_TEST
));
1332 * CSR_V LPU TX LAYER interrupt regs (mask, status)
1335 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_MASK: 0x%llx\n",
1336 CSR_XR(csr_base
, LPU_TRANSMIT_PHY_INTERRUPT_MASK
));
1339 "lpu_init - LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS: 0x%llx\n",
1340 CSR_XR(csr_base
, LPU_TRANSMIT_PHY_INTERRUPT_AND_STATUS
));
1343 * CSR_V LPU_TRANSMIT_PHY_STATUS_2 Expect HW 0x0
1345 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_TRANSMIT_PHY_STATUS_2: 0x%llx\n",
1346 CSR_XR(csr_base
, LPU_TRANSMIT_PHY_STATUS_2
));
1349 * CSR_V LPU_LTSSM_CONFIG1 Expect OBP 0x205
1353 * The new PRM has values for LTSSM 8 ns timeout value and
1354 * LTSSM 20 ns timeout value. But what do these values mean?
1355 * Most of the other bits are questions as well.
1357 * As such we will use the reset value.
1359 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONFIG1: 0x%llx\n",
1360 CSR_XR(csr_base
, LPU_LTSSM_CONFIG1
));
1363 * CSR_V LPU_LTSSM_CONFIG2 Expect OBP 0x2DC6C0
1367 * Again, what does '12 ms timeout value mean'?
1369 val
= (LPU_LTSSM_CONFIG2_LTSSM_12_TO_DEFAULT
<<
1370 LPU_LTSSM_CONFIG2_LTSSM_12_TO
);
1371 CSR_XS(csr_base
, LPU_LTSSM_CONFIG2
, val
);
1372 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONFIG2: 0x%llx\n",
1373 CSR_XR(csr_base
, LPU_LTSSM_CONFIG2
));
1376 * CSR_V LPU_LTSSM_CONFIG3 Expect OBP 0x7A120
1378 val
= (LPU_LTSSM_CONFIG3_LTSSM_2_TO_DEFAULT
<<
1379 LPU_LTSSM_CONFIG3_LTSSM_2_TO
);
1380 CSR_XS(csr_base
, LPU_LTSSM_CONFIG3
, val
);
1381 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONFIG3: 0x%llx\n",
1382 CSR_XR(csr_base
, LPU_LTSSM_CONFIG3
));
1385 * CSR_V LPU_LTSSM_CONFIG4 Expect OBP 0x21300
1387 val
= ((LPU_LTSSM_CONFIG4_DATA_RATE_DEFAULT
<<
1388 LPU_LTSSM_CONFIG4_DATA_RATE
) |
1389 (LPU_LTSSM_CONFIG4_N_FTS_DEFAULT
<<
1390 LPU_LTSSM_CONFIG4_N_FTS
));
1392 CSR_XS(csr_base
, LPU_LTSSM_CONFIG4
, val
);
1393 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONFIG4: 0x%llx\n",
1394 CSR_XR(csr_base
, LPU_LTSSM_CONFIG4
));
1397 * CSR_V LPU_LTSSM_CONFIG5 Expect OBP 0x0
1400 CSR_XS(csr_base
, LPU_LTSSM_CONFIG5
, val
);
1401 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_CONFIG5: 0x%llx\n",
1402 CSR_XR(csr_base
, LPU_LTSSM_CONFIG5
));
1405 * CSR_V LPU_LTSSM_STATUS1 Expect OBP 0x0
1409 * LTSSM Status registers are test only.
1411 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_STATUS1: 0x%llx\n",
1412 CSR_XR(csr_base
, LPU_LTSSM_STATUS1
));
1415 * CSR_V LPU_LTSSM_STATUS2 Expect OBP 0x0
1417 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_STATUS2: 0x%llx\n",
1418 CSR_XR(csr_base
, LPU_LTSSM_STATUS2
));
1421 * CSR_V LPU_LTSSM_INTERRUPT_AND_STATUS_TEST Expect HW 0x0
1424 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1425 CSR_XR(csr_base
, LPU_LTSSM_INTERRUPT_AND_STATUS_TEST
));
1428 * CSR_V LPU LTSSM LAYER interrupt regs (mask, status)
1430 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_LTSSM_INTERRUPT_MASK: 0x%llx\n",
1431 CSR_XR(csr_base
, LPU_LTSSM_INTERRUPT_MASK
));
1434 "lpu_init - LPU_LTSSM_INTERRUPT_AND_STATUS: 0x%llx\n",
1435 CSR_XR(csr_base
, LPU_LTSSM_INTERRUPT_AND_STATUS
));
1438 * CSR_V LPU_LTSSM_STATUS_WRITE_ENABLE Expect OBP 0x0
1441 "lpu_init - LPU_LTSSM_STATUS_WRITE_ENABLE: 0x%llx\n",
1442 CSR_XR(csr_base
, LPU_LTSSM_STATUS_WRITE_ENABLE
));
1445 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG1 Expect OBP 0x88407
1447 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG1: 0x%llx\n",
1448 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_CONFIG1
));
1451 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG2 Expect OBP 0x35
1453 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG2: 0x%llx\n",
1454 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_CONFIG2
));
1457 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG3 Expect OBP 0x4400FA
1459 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG3: 0x%llx\n",
1460 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_CONFIG3
));
1463 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG4 Expect OBP 0x1E848
1465 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG4: 0x%llx\n",
1466 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_CONFIG4
));
1469 * CSR_V LPU_GIGABLAZE_GLUE_STATUS Expect OBP 0x0
1471 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_STATUS: 0x%llx\n",
1472 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_STATUS
));
1475 * CSR_V LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST Expect OBP 0x0
1477 DBG(DBG_LPU
, NULL
, "lpu_init - "
1478 "LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST: 0x%llx\n",
1480 LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS_TEST
));
1483 * CSR_V LPU GIGABLASE LAYER interrupt regs (mask, status)
1486 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_MASK: 0x%llx\n",
1487 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_INTERRUPT_MASK
));
1490 "lpu_init - LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS: 0x%llx\n",
1491 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_INTERRUPT_AND_STATUS
));
1494 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN1 Expect HW 0x0
1497 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN1: 0x%llx\n",
1498 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_POWER_DOWN1
));
1501 * CSR_V LPU_GIGABLAZE_GLUE_POWER_DOWN2 Expect HW 0x0
1504 "lpu_init - LPU_GIGABLAZE_GLUE_POWER_DOWN2: 0x%llx\n",
1505 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_POWER_DOWN2
));
1508 * CSR_V LPU_GIGABLAZE_GLUE_CONFIG5 Expect OBP 0x0
1510 DBG(DBG_LPU
, NULL
, "lpu_init - LPU_GIGABLAZE_GLUE_CONFIG5: 0x%llx\n",
1511 CSR_XR(csr_base
, LPU_GIGABLAZE_GLUE_CONFIG5
));
1516 dlu_init(caddr_t csr_base
, pxu_t
*pxu_p
)
1520 CSR_XS(csr_base
, DLU_INTERRUPT_MASK
, 0ull);
1521 DBG(DBG_TLU
, NULL
, "dlu_init - DLU_INTERRUPT_MASK: 0x%llx\n",
1522 CSR_XR(csr_base
, DLU_INTERRUPT_MASK
));
1524 val
= (1ull << DLU_LINK_LAYER_CONFIG_VC0_EN
);
1525 CSR_XS(csr_base
, DLU_LINK_LAYER_CONFIG
, val
);
1526 DBG(DBG_TLU
, NULL
, "dlu_init - DLU_LINK_LAYER_CONFIG: 0x%llx\n",
1527 CSR_XR(csr_base
, DLU_LINK_LAYER_CONFIG
));
1529 val
= (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_NP_EN
) |
1530 (1ull << DLU_FLOW_CONTROL_UPDATE_CONTROL_FC0_U_P_EN
);
1532 CSR_XS(csr_base
, DLU_FLOW_CONTROL_UPDATE_CONTROL
, val
);
1533 DBG(DBG_TLU
, NULL
, "dlu_init - DLU_FLOW_CONTROL_UPDATE_CONTROL: "
1534 "0x%llx\n", CSR_XR(csr_base
, DLU_FLOW_CONTROL_UPDATE_CONTROL
));
1536 val
= (DLU_TXLINK_REPLAY_TIMER_THRESHOLD_DEFAULT
<<
1537 DLU_TXLINK_REPLAY_TIMER_THRESHOLD_RPLAY_TMR_THR
);
1539 CSR_XS(csr_base
, DLU_TXLINK_REPLAY_TIMER_THRESHOLD
, val
);
1541 DBG(DBG_TLU
, NULL
, "dlu_init - DLU_TXLINK_REPLAY_TIMER_THRESHOLD: "
1542 "0x%llx\n", CSR_XR(csr_base
, DLU_TXLINK_REPLAY_TIMER_THRESHOLD
));
1547 dmc_init(caddr_t csr_base
, pxu_t
*pxu_p
)
1552 * CSR_V DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect OBP 0x8000000000000003
1556 CSR_XS(csr_base
, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE
, val
);
1558 "dmc_init - DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1559 CSR_XR(csr_base
, DMC_CORE_AND_BLOCK_INTERRUPT_ENABLE
));
1562 * CSR_V DMC_CORE_AND_BLOCK_ERROR_STATUS Expect HW 0x0
1565 "dmc_init - DMC_CORE_AND_BLOCK_ERROR_STATUS: 0x%llx\n",
1566 CSR_XR(csr_base
, DMC_CORE_AND_BLOCK_ERROR_STATUS
));
1569 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_A Expect HW 0x0
1572 CSR_XS(csr_base
, DMC_DEBUG_SELECT_FOR_PORT_A
, val
);
1573 DBG(DBG_DMC
, NULL
, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_A: 0x%llx\n",
1574 CSR_XR(csr_base
, DMC_DEBUG_SELECT_FOR_PORT_A
));
1577 * CSR_V DMC_DEBUG_SELECT_FOR_PORT_B Expect HW 0x0
1580 CSR_XS(csr_base
, DMC_DEBUG_SELECT_FOR_PORT_B
, val
);
1581 DBG(DBG_DMC
, NULL
, "dmc_init - DMC_DEBUG_SELECT_FOR_PORT_B: 0x%llx\n",
1582 CSR_XR(csr_base
, DMC_DEBUG_SELECT_FOR_PORT_B
));
1586 hvio_pec_init(caddr_t csr_base
, pxu_t
*pxu_p
)
1590 ilu_init(csr_base
, pxu_p
);
1591 tlu_init(csr_base
, pxu_p
);
1593 switch (PX_CHIP_TYPE(pxu_p
)) {
1594 case PX_CHIP_OBERON
:
1595 dlu_init(csr_base
, pxu_p
);
1598 lpu_init(csr_base
, pxu_p
);
1601 DBG(DBG_PEC
, NULL
, "hvio_pec_init - unknown chip type: 0x%x\n",
1602 PX_CHIP_TYPE(pxu_p
));
1606 dmc_init(csr_base
, pxu_p
);
1609 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE Expect Kernel 0x800000000000000F
1613 CSR_XS(csr_base
, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE
, val
);
1615 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE: 0x%llx\n",
1616 CSR_XR(csr_base
, PEC_CORE_AND_BLOCK_INTERRUPT_ENABLE
));
1619 * CSR_V PEC_CORE_AND_BLOCK_INTERRUPT_STATUS Expect HW 0x0
1622 "hvio_pec_init - PEC_CORE_AND_BLOCK_INTERRUPT_STATUS: 0x%llx\n",
1623 CSR_XR(csr_base
, PEC_CORE_AND_BLOCK_INTERRUPT_STATUS
));
1627 * Convert a TTE to physical address
1630 mmu_tte_to_pa(uint64_t tte
, pxu_t
*pxu_p
)
1634 switch (PX_CHIP_TYPE(pxu_p
)) {
1635 case PX_CHIP_OBERON
:
1636 pa_mask
= MMU_OBERON_PADDR_MASK
;
1639 pa_mask
= MMU_FIRE_PADDR_MASK
;
1642 DBG(DBG_MMU
, NULL
, "mmu_tte_to_pa - unknown chip type: 0x%x\n",
1643 PX_CHIP_TYPE(pxu_p
));
1647 return ((tte
& pa_mask
) >> MMU_PAGE_SHIFT
);
1651 * Return MMU bypass noncache bit for chip
1654 mmu_bypass_noncache(pxu_t
*pxu_p
)
1656 r_addr_t bypass_noncache_bit
;
1658 switch (PX_CHIP_TYPE(pxu_p
)) {
1659 case PX_CHIP_OBERON
:
1660 bypass_noncache_bit
= MMU_OBERON_BYPASS_NONCACHE
;
1663 bypass_noncache_bit
= MMU_FIRE_BYPASS_NONCACHE
;
1667 "mmu_bypass_nocache - unknown chip type: 0x%x\n",
1668 PX_CHIP_TYPE(pxu_p
));
1669 bypass_noncache_bit
= 0;
1672 return (bypass_noncache_bit
);
1676 * Calculate number of TSB entries for the chip.
1680 mmu_tsb_entries(caddr_t csr_base
, pxu_t
*pxu_p
)
1683 uint_t obp_tsb_entries
, obp_tsb_size
;
1685 tsb_ctrl
= CSR_XR(csr_base
, MMU_TSB_CONTROL
);
1687 obp_tsb_size
= tsb_ctrl
& 0xF;
1689 obp_tsb_entries
= MMU_TSBSIZE_TO_TSBENTRIES(obp_tsb_size
);
1691 return (obp_tsb_entries
);
1695 * Initialize the module, but do not enable interrupts.
1698 hvio_mmu_init(caddr_t csr_base
, pxu_t
*pxu_p
)
1700 uint64_t val
, i
, obp_tsb_pa
;
1701 uint_t obp_tsb_entries
;
1703 bzero(pxu_p
->tsb_vaddr
, pxu_p
->tsb_size
);
1706 * Preserve OBP's TSB
1708 obp_tsb_pa
= CSR_XR(csr_base
, MMU_TSB_CONTROL
) & MMU_TSB_PA_MASK
;
1710 obp_tsb_entries
= mmu_tsb_entries(csr_base
, pxu_p
);
1712 /* save "shape" of OBP's TSB for use during Detach */
1713 pxu_p
->obp_tsb_paddr
= obp_tsb_pa
;
1714 pxu_p
->obp_tsb_entries
= obp_tsb_entries
;
1716 /* For each Valid TTE in OBP's TSB, save its value in px's IOTSB */
1717 hvio_obptsb_attach(pxu_p
);
1720 * Invalidate the TLB through the diagnostic register.
1723 CSR_XS(csr_base
, MMU_TTE_CACHE_INVALIDATE
, -1ull);
1726 * Configure the Fire MMU TSB Control Register. Determine
1727 * the encoding for either 8KB pages (0) or 64KB pages (1).
1729 * Write the most significant 30 bits of the TSB physical address
1730 * and the encoded TSB table size.
1732 for (i
= 8; i
&& (pxu_p
->tsb_size
< (0x2000 << i
)); i
--)
1735 val
= (((((va_to_pa(pxu_p
->tsb_vaddr
)) >> 13) << 13) |
1736 ((MMU_PAGE_SHIFT
== 13) ? 0 : 1) << 8) | i
);
1738 CSR_XS(csr_base
, MMU_TSB_CONTROL
, val
);
1741 * Enable the MMU, set the "TSB Cache Snoop Enable",
1742 * the "Cache Mode", the "Bypass Enable" and
1743 * the "Translation Enable" bits.
1745 val
= CSR_XR(csr_base
, MMU_CONTROL_AND_STATUS
);
1746 val
|= ((1ull << MMU_CONTROL_AND_STATUS_SE
)
1747 | (MMU_CONTROL_AND_STATUS_ROE_BIT63_ENABLE
<<
1748 MMU_CONTROL_AND_STATUS_ROE
)
1749 | (MMU_CONTROL_AND_STATUS_CM_MASK
<< MMU_CONTROL_AND_STATUS_CM
)
1750 | (1ull << MMU_CONTROL_AND_STATUS_BE
)
1751 | (1ull << MMU_CONTROL_AND_STATUS_TE
));
1753 CSR_XS(csr_base
, MMU_CONTROL_AND_STATUS
, val
);
1756 * Read the register here to ensure that the previous writes to
1757 * the Fire MMU registers have been flushed. (Technically, this
1758 * is not entirely necessary here as we will likely do later reads
1759 * during Fire initialization, but it is a small price to pay for
1760 * more modular code.)
1762 (void) CSR_XR(csr_base
, MMU_CONTROL_AND_STATUS
);
1765 * CSR_V TLU's UE interrupt regs (log, enable, status, clear)
1768 DBG(DBG_MMU
, NULL
, "mmu_init - MMU_ERROR_LOG_ENABLE: 0x%llx\n",
1769 CSR_XR(csr_base
, MMU_ERROR_LOG_ENABLE
));
1771 DBG(DBG_MMU
, NULL
, "mmu_init - MMU_INTERRUPT_ENABLE: 0x%llx\n",
1772 CSR_XR(csr_base
, MMU_INTERRUPT_ENABLE
));
1774 DBG(DBG_MMU
, NULL
, "mmu_init - MMU_INTERRUPT_STATUS: 0x%llx\n",
1775 CSR_XR(csr_base
, MMU_INTERRUPT_STATUS
));
1777 DBG(DBG_MMU
, NULL
, "mmu_init - MMU_ERROR_STATUS_CLEAR: 0x%llx\n",
1778 CSR_XR(csr_base
, MMU_ERROR_STATUS_CLEAR
));
1782 * Generic IOMMU Servies
1787 hvio_iommu_map(devhandle_t dev_hdl
, pxu_t
*pxu_p
, tsbid_t tsbid
, pages_t pages
,
1788 io_attributes_t io_attr
, void *addr
, size_t pfn_index
, int flags
)
1790 tsbindex_t tsb_index
= PCI_TSBID_TO_TSBINDEX(tsbid
);
1791 uint64_t attr
= MMU_TTE_V
;
1794 if (io_attr
& PCI_MAP_ATTR_WRITE
)
1797 if ((PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
) &&
1798 (io_attr
& PCI_MAP_ATTR_RO
))
1801 if (attr
& MMU_TTE_RO
) {
1802 DBG(DBG_MMU
, NULL
, "hvio_iommu_map: pfn_index=0x%x "
1803 "pages=0x%x attr = 0x%lx\n", pfn_index
, pages
, attr
);
1806 if (flags
& MMU_MAP_PFN
) {
1807 ddi_dma_impl_t
*mp
= (ddi_dma_impl_t
*)addr
;
1808 for (i
= 0; i
< pages
; i
++, pfn_index
++, tsb_index
++) {
1809 px_iopfn_t pfn
= PX_GET_MP_PFN(mp
, pfn_index
);
1810 pxu_p
->tsb_vaddr
[tsb_index
] = MMU_PTOB(pfn
) | attr
;
1813 * Oberon will need to flush the corresponding TTEs in
1814 * Cache. We only need to flush every cache line.
1815 * Extra PIO's are expensive.
1817 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
) {
1818 if ((i
== (pages
-1))||!((tsb_index
+1) & 0x7)) {
1820 MMU_TTE_CACHE_FLUSH_ADDRESS
,
1822 (tsb_index
*MMU_TTE_SIZE
)));
1827 caddr_t a
= (caddr_t
)addr
;
1828 for (i
= 0; i
< pages
; i
++, a
+= MMU_PAGE_SIZE
, tsb_index
++) {
1829 px_iopfn_t pfn
= hat_getpfnum(kas
.a_hat
, a
);
1830 pxu_p
->tsb_vaddr
[tsb_index
] = MMU_PTOB(pfn
) | attr
;
1833 * Oberon will need to flush the corresponding TTEs in
1834 * Cache. We only need to flush every cache line.
1835 * Extra PIO's are expensive.
1837 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
) {
1838 if ((i
== (pages
-1))||!((tsb_index
+1) & 0x7)) {
1840 MMU_TTE_CACHE_FLUSH_ADDRESS
,
1842 (tsb_index
*MMU_TTE_SIZE
)));
1853 hvio_iommu_demap(devhandle_t dev_hdl
, pxu_t
*pxu_p
, tsbid_t tsbid
,
1856 tsbindex_t tsb_index
= PCI_TSBID_TO_TSBINDEX(tsbid
);
1859 for (i
= 0; i
< pages
; i
++, tsb_index
++) {
1860 pxu_p
->tsb_vaddr
[tsb_index
] = MMU_INVALID_TTE
;
1863 * Oberon will need to flush the corresponding TTEs in
1864 * Cache. We only need to flush every cache line.
1865 * Extra PIO's are expensive.
1867 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
) {
1868 if ((i
== (pages
-1))||!((tsb_index
+1) & 0x7)) {
1870 MMU_TTE_CACHE_FLUSH_ADDRESS
,
1872 (tsb_index
*MMU_TTE_SIZE
)));
1882 hvio_iommu_getmap(devhandle_t dev_hdl
, pxu_t
*pxu_p
, tsbid_t tsbid
,
1883 io_attributes_t
*attr_p
, r_addr_t
*r_addr_p
)
1885 tsbindex_t tsb_index
= PCI_TSBID_TO_TSBINDEX(tsbid
);
1887 uint64_t ret
= H_EOK
;
1889 tte_addr
= (uint64_t *)(pxu_p
->tsb_vaddr
) + tsb_index
;
1891 if (*tte_addr
& MMU_TTE_V
) {
1892 *r_addr_p
= mmu_tte_to_pa(*tte_addr
, pxu_p
);
1893 *attr_p
= (*tte_addr
& MMU_TTE_W
) ?
1894 PCI_MAP_ATTR_WRITE
:PCI_MAP_ATTR_READ
;
1905 * Copy each Valid OBP TTE from OBP's IOTSB to px's IOTSB.
1908 hvio_obptsb_attach(pxu_t
*pxu_p
)
1910 uint64_t obp_tsb_pa
;
1911 uint64_t *base_tte_addr
;
1913 uint_t obp_tsb_entries
;
1915 obp_tsb_pa
= pxu_p
->obp_tsb_paddr
;
1916 obp_tsb_entries
= pxu_p
->obp_tsb_entries
;
1919 * Compute the starting addr of the area reserved for
1920 * OBP's TTEs; OBP's TTEs are stored at the highest addrs
1923 base_tte_addr
= pxu_p
->tsb_vaddr
+
1924 ((pxu_p
->tsb_size
>> 3) - obp_tsb_entries
);
1926 for (i
= 0; i
< obp_tsb_entries
; i
++) {
1927 uint64_t tte
= lddphys(obp_tsb_pa
+ i
* 8);
1929 if (!MMU_TTE_VALID(tte
))
1932 base_tte_addr
[i
] = tte
;
1937 * For each Valid OBP TTE, deallocate space from the vmem Arena used
1938 * to manage the TTE's associated DVMA addr space. (Allocation from
1939 * the DVMA Arena was done in px_mmu_attach).
1942 hvio_obptsb_detach(px_t
*px_p
)
1944 uint64_t obp_tsb_pa
;
1946 uint_t obp_tsb_entries
;
1947 uint_t obp_tsb_bias
;
1948 px_mmu_t
*mmu_p
= px_p
->px_mmu_p
;
1950 pxu_t
*pxu_p
= (pxu_t
*)px_p
->px_plat_p
;
1952 dvma_map
= mmu_p
->mmu_dvma_map
;
1954 obp_tsb_pa
= pxu_p
->obp_tsb_paddr
;
1955 obp_tsb_entries
= pxu_p
->obp_tsb_entries
;
1957 * OBP's TTEs are located at the high end of px's IOTSB.
1958 * Equivalently, OBP's DVMA space is allocated at the high end
1959 * of px's DVMA space. Compute the bias that references
1960 * OBP's first possible page of DVMA space.
1962 obp_tsb_bias
= (pxu_p
->tsb_size
>> 3) - obp_tsb_entries
;
1964 for (i
= 0; i
< obp_tsb_entries
; i
++) {
1966 uint64_t tte
= lddphys(obp_tsb_pa
+ i
* 8);
1968 if (!MMU_TTE_VALID(tte
))
1971 /* deallocate the TTE's associated page of DVMA space */
1972 va
= (caddr_t
)(MMU_PTOB(mmu_p
->dvma_base_pg
+ obp_tsb_bias
+
1974 vmem_xfree(dvma_map
, va
, MMU_PAGE_SIZE
);
1980 hvio_get_bypass_base(pxu_t
*pxu_p
)
1984 switch (PX_CHIP_TYPE(pxu_p
)) {
1985 case PX_CHIP_OBERON
:
1986 base
= MMU_OBERON_BYPASS_BASE
;
1989 base
= MMU_FIRE_BYPASS_BASE
;
1993 "hvio_get_bypass_base - unknown chip type: 0x%x\n",
1994 PX_CHIP_TYPE(pxu_p
));
2003 hvio_get_bypass_end(pxu_t
*pxu_p
)
2007 switch (PX_CHIP_TYPE(pxu_p
)) {
2008 case PX_CHIP_OBERON
:
2009 end
= MMU_OBERON_BYPASS_END
;
2012 end
= MMU_FIRE_BYPASS_END
;
2016 "hvio_get_bypass_end - unknown chip type: 0x%x\n",
2017 PX_CHIP_TYPE(pxu_p
));
2026 hvio_iommu_getbypass(devhandle_t dev_hdl
, pxu_t
*pxu_p
, r_addr_t ra
,
2027 io_attributes_t attr
, io_addr_t
*io_addr_p
)
2029 uint64_t pfn
= MMU_BTOP(ra
);
2031 *io_addr_p
= hvio_get_bypass_base(pxu_p
) | ra
|
2032 (pf_is_memory(pfn
) ? 0 : mmu_bypass_noncache(pxu_p
));
2038 * Generic IO Interrupt Servies
2042 * Converts a device specific interrupt number given by the
2043 * arguments devhandle and devino into a system specific ino.
2047 hvio_intr_devino_to_sysino(devhandle_t dev_hdl
, pxu_t
*pxu_p
, devino_t devino
,
2050 if (devino
> INTERRUPT_MAPPING_ENTRIES
) {
2051 DBG(DBG_IB
, NULL
, "ino %x is invalid\n", devino
);
2055 *sysino
= DEVINO_TO_SYSINO(pxu_p
->portid
, devino
);
2061 * Returns state in intr_valid_state if the interrupt defined by sysino
2062 * is valid (enabled) or not-valid (disabled).
2065 hvio_intr_getvalid(devhandle_t dev_hdl
, sysino_t sysino
,
2066 intr_valid_state_t
*intr_valid_state
)
2068 if (CSRA_BR((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
,
2069 SYSINO_TO_DEVINO(sysino
), ENTRIES_V
)) {
2070 *intr_valid_state
= INTR_VALID
;
2072 *intr_valid_state
= INTR_NOTVALID
;
2079 * Sets the 'valid' state of the interrupt defined by
2080 * the argument sysino to the state defined by the
2081 * argument intr_valid_state.
2084 hvio_intr_setvalid(devhandle_t dev_hdl
, sysino_t sysino
,
2085 intr_valid_state_t intr_valid_state
)
2087 switch (intr_valid_state
) {
2089 CSRA_BS((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
,
2090 SYSINO_TO_DEVINO(sysino
), ENTRIES_V
);
2093 CSRA_BC((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
,
2094 SYSINO_TO_DEVINO(sysino
), ENTRIES_V
);
2104 * Returns the current state of the interrupt given by the sysino
2108 hvio_intr_getstate(devhandle_t dev_hdl
, sysino_t sysino
,
2109 intr_state_t
*intr_state
)
2113 state
= CSRA_FR((caddr_t
)dev_hdl
, INTERRUPT_CLEAR
,
2114 SYSINO_TO_DEVINO(sysino
), ENTRIES_INT_STATE
);
2117 case INTERRUPT_IDLE_STATE
:
2118 *intr_state
= INTR_IDLE_STATE
;
2120 case INTERRUPT_RECEIVED_STATE
:
2121 *intr_state
= INTR_RECEIVED_STATE
;
2123 case INTERRUPT_PENDING_STATE
:
2124 *intr_state
= INTR_DELIVERED_STATE
;
2135 * Sets the current state of the interrupt given by the sysino
2136 * argument to the value given in the argument intr_state.
2138 * Note: Setting the state to INTR_IDLE clears any pending
2139 * interrupt for sysino.
2142 hvio_intr_setstate(devhandle_t dev_hdl
, sysino_t sysino
,
2143 intr_state_t intr_state
)
2147 switch (intr_state
) {
2148 case INTR_IDLE_STATE
:
2149 state
= INTERRUPT_IDLE_STATE
;
2151 case INTR_DELIVERED_STATE
:
2152 state
= INTERRUPT_PENDING_STATE
;
2158 CSRA_FS((caddr_t
)dev_hdl
, INTERRUPT_CLEAR
,
2159 SYSINO_TO_DEVINO(sysino
), ENTRIES_INT_STATE
, state
);
2165 * Returns the cpuid that is the current target of the
2166 * interrupt given by the sysino argument.
2168 * The cpuid value returned is undefined if the target
2169 * has not been set via intr_settarget.
2172 hvio_intr_gettarget(devhandle_t dev_hdl
, pxu_t
*pxu_p
, sysino_t sysino
,
2175 switch (PX_CHIP_TYPE(pxu_p
)) {
2176 case PX_CHIP_OBERON
:
2177 *cpuid
= CSRA_FR((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
,
2178 SYSINO_TO_DEVINO(sysino
), ENTRIES_T_DESTID
);
2181 *cpuid
= CSRA_FR((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
,
2182 SYSINO_TO_DEVINO(sysino
), ENTRIES_T_JPID
);
2185 DBG(DBG_CB
, NULL
, "hvio_intr_gettarget - "
2186 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p
));
2194 * Set the target cpu for the interrupt defined by the argument
2195 * sysino to the target cpu value defined by the argument cpuid.
2198 hvio_intr_settarget(devhandle_t dev_hdl
, pxu_t
*pxu_p
, sysino_t sysino
,
2201 uint64_t val
, intr_controller
;
2202 uint32_t ino
= SYSINO_TO_DEVINO(sysino
);
2205 * For now, we assign interrupt controller in a round
2206 * robin fashion. Later, we may need to come up with
2207 * a more efficient assignment algorithm.
2209 intr_controller
= 0x1ull
<< (cpuid
% 4);
2211 switch (PX_CHIP_TYPE(pxu_p
)) {
2212 case PX_CHIP_OBERON
:
2214 INTERRUPT_MAPPING_ENTRIES_T_DESTID_MASK
) <<
2215 INTERRUPT_MAPPING_ENTRIES_T_DESTID
) |
2217 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK
)
2218 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM
));
2221 val
= (((cpuid
& INTERRUPT_MAPPING_ENTRIES_T_JPID_MASK
) <<
2222 INTERRUPT_MAPPING_ENTRIES_T_JPID
) |
2224 INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM_MASK
)
2225 << INTERRUPT_MAPPING_ENTRIES_INT_CNTRL_NUM
));
2228 DBG(DBG_CB
, NULL
, "hvio_intr_settarget - "
2229 "unknown chip type: 0x%x\n", PX_CHIP_TYPE(pxu_p
));
2233 /* For EQ interrupts, set DATA MONDO bit */
2234 if ((ino
>= EQ_1ST_DEVINO
) && (ino
< (EQ_1ST_DEVINO
+ EQ_CNT
)))
2235 val
|= (0x1ull
<< INTERRUPT_MAPPING_ENTRIES_MDO_MODE
);
2237 CSRA_XS((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
, ino
, val
);
2246 hvio_msiq_init(devhandle_t dev_hdl
, pxu_t
*pxu_p
)
2248 CSRA_XS((caddr_t
)dev_hdl
, EVENT_QUEUE_BASE_ADDRESS
, 0,
2249 (uint64_t)pxu_p
->msiq_mapped_p
);
2251 "hvio_msiq_init: EVENT_QUEUE_BASE_ADDRESS 0x%llx\n",
2252 CSR_XR((caddr_t
)dev_hdl
, EVENT_QUEUE_BASE_ADDRESS
));
2254 CSRA_XS((caddr_t
)dev_hdl
, INTERRUPT_MONDO_DATA_0
, 0,
2255 (uint64_t)ID_TO_IGN(PX_CHIP_TYPE(pxu_p
),
2256 pxu_p
->portid
) << INO_BITS
);
2257 DBG(DBG_IB
, NULL
, "hvio_msiq_init: "
2258 "INTERRUPT_MONDO_DATA_0: 0x%llx\n",
2259 CSR_XR((caddr_t
)dev_hdl
, INTERRUPT_MONDO_DATA_0
));
2265 hvio_msiq_getvalid(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2266 pci_msiq_valid_state_t
*msiq_valid_state
)
2269 uint64_t ret
= H_EOK
;
2271 eq_state
= CSRA_FR((caddr_t
)dev_hdl
, EVENT_QUEUE_STATE
,
2272 msiq_id
, ENTRIES_STATE
);
2276 *msiq_valid_state
= PCI_MSIQ_INVALID
;
2278 case EQ_ACTIVE_STATE
:
2279 case EQ_ERROR_STATE
:
2280 *msiq_valid_state
= PCI_MSIQ_VALID
;
2291 hvio_msiq_setvalid(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2292 pci_msiq_valid_state_t msiq_valid_state
)
2294 uint64_t ret
= H_EOK
;
2296 switch (msiq_valid_state
) {
2297 case PCI_MSIQ_INVALID
:
2298 CSRA_BS((caddr_t
)dev_hdl
, EVENT_QUEUE_CONTROL_CLEAR
,
2299 msiq_id
, ENTRIES_DIS
);
2301 case PCI_MSIQ_VALID
:
2302 CSRA_BS((caddr_t
)dev_hdl
, EVENT_QUEUE_CONTROL_SET
,
2303 msiq_id
, ENTRIES_EN
);
2314 hvio_msiq_getstate(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2315 pci_msiq_state_t
*msiq_state
)
2318 uint64_t ret
= H_EOK
;
2320 eq_state
= CSRA_FR((caddr_t
)dev_hdl
, EVENT_QUEUE_STATE
,
2321 msiq_id
, ENTRIES_STATE
);
2325 case EQ_ACTIVE_STATE
:
2326 *msiq_state
= PCI_MSIQ_STATE_IDLE
;
2328 case EQ_ERROR_STATE
:
2329 *msiq_state
= PCI_MSIQ_STATE_ERROR
;
2339 hvio_msiq_setstate(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2340 pci_msiq_state_t msiq_state
)
2343 uint64_t ret
= H_EOK
;
2345 eq_state
= CSRA_FR((caddr_t
)dev_hdl
, EVENT_QUEUE_STATE
,
2346 msiq_id
, ENTRIES_STATE
);
2350 if (msiq_state
== PCI_MSIQ_STATE_ERROR
)
2353 case EQ_ACTIVE_STATE
:
2354 if (msiq_state
== PCI_MSIQ_STATE_ERROR
)
2355 CSRA_BS((caddr_t
)dev_hdl
, EVENT_QUEUE_CONTROL_SET
,
2356 msiq_id
, ENTRIES_ENOVERR
);
2360 case EQ_ERROR_STATE
:
2361 if (msiq_state
== PCI_MSIQ_STATE_IDLE
)
2362 CSRA_BS((caddr_t
)dev_hdl
, EVENT_QUEUE_CONTROL_CLEAR
,
2363 msiq_id
, ENTRIES_E2I
);
2375 hvio_msiq_gethead(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2376 msiqhead_t
*msiq_head
)
2378 *msiq_head
= CSRA_FR((caddr_t
)dev_hdl
, EVENT_QUEUE_HEAD
,
2379 msiq_id
, ENTRIES_HEAD
);
2385 hvio_msiq_sethead(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2386 msiqhead_t msiq_head
)
2388 CSRA_FS((caddr_t
)dev_hdl
, EVENT_QUEUE_HEAD
, msiq_id
,
2389 ENTRIES_HEAD
, msiq_head
);
2395 hvio_msiq_gettail(devhandle_t dev_hdl
, msiqid_t msiq_id
,
2396 msiqtail_t
*msiq_tail
)
2398 *msiq_tail
= CSRA_FR((caddr_t
)dev_hdl
, EVENT_QUEUE_TAIL
,
2399 msiq_id
, ENTRIES_TAIL
);
2408 hvio_msi_init(devhandle_t dev_hdl
, uint64_t addr32
, uint64_t addr64
)
2410 /* PCI MEM 32 resources to perform 32 bit MSI transactions */
2411 CSRA_FS((caddr_t
)dev_hdl
, MSI_32_BIT_ADDRESS
, 0,
2412 ADDR
, (uint64_t)addr32
>> MSI_32_BIT_ADDRESS_ADDR
);
2413 DBG(DBG_IB
, NULL
, "hvio_msi_init: MSI_32_BIT_ADDRESS: 0x%llx\n",
2414 CSR_XR((caddr_t
)dev_hdl
, MSI_32_BIT_ADDRESS
));
2416 /* Reserve PCI MEM 64 resources to perform 64 bit MSI transactions */
2417 CSRA_FS((caddr_t
)dev_hdl
, MSI_64_BIT_ADDRESS
, 0,
2418 ADDR
, (uint64_t)addr64
>> MSI_64_BIT_ADDRESS_ADDR
);
2419 DBG(DBG_IB
, NULL
, "hvio_msi_init: MSI_64_BIT_ADDRESS: 0x%llx\n",
2420 CSR_XR((caddr_t
)dev_hdl
, MSI_64_BIT_ADDRESS
));
2426 hvio_msi_getmsiq(devhandle_t dev_hdl
, msinum_t msi_num
,
2429 *msiq_id
= CSRA_FR((caddr_t
)dev_hdl
, MSI_MAPPING
,
2430 msi_num
, ENTRIES_EQNUM
);
2436 hvio_msi_setmsiq(devhandle_t dev_hdl
, msinum_t msi_num
,
2439 CSRA_FS((caddr_t
)dev_hdl
, MSI_MAPPING
, msi_num
,
2440 ENTRIES_EQNUM
, msiq_id
);
2446 hvio_msi_getvalid(devhandle_t dev_hdl
, msinum_t msi_num
,
2447 pci_msi_valid_state_t
*msi_valid_state
)
2449 *msi_valid_state
= CSRA_BR((caddr_t
)dev_hdl
, MSI_MAPPING
,
2450 msi_num
, ENTRIES_V
);
2456 hvio_msi_setvalid(devhandle_t dev_hdl
, msinum_t msi_num
,
2457 pci_msi_valid_state_t msi_valid_state
)
2459 uint64_t ret
= H_EOK
;
2461 switch (msi_valid_state
) {
2463 CSRA_BS((caddr_t
)dev_hdl
, MSI_MAPPING
, msi_num
,
2466 case PCI_MSI_INVALID
:
2467 CSRA_BC((caddr_t
)dev_hdl
, MSI_MAPPING
, msi_num
,
2478 hvio_msi_getstate(devhandle_t dev_hdl
, msinum_t msi_num
,
2479 pci_msi_state_t
*msi_state
)
2481 *msi_state
= CSRA_BR((caddr_t
)dev_hdl
, MSI_MAPPING
,
2482 msi_num
, ENTRIES_EQWR_N
);
2488 hvio_msi_setstate(devhandle_t dev_hdl
, msinum_t msi_num
,
2489 pci_msi_state_t msi_state
)
2491 uint64_t ret
= H_EOK
;
2493 switch (msi_state
) {
2494 case PCI_MSI_STATE_IDLE
:
2495 CSRA_BS((caddr_t
)dev_hdl
, MSI_CLEAR
, msi_num
,
2498 case PCI_MSI_STATE_DELIVERED
:
2511 hvio_msg_getmsiq(devhandle_t dev_hdl
, pcie_msg_type_t msg_type
,
2514 uint64_t ret
= H_EOK
;
2518 *msiq_id
= CSR_FR((caddr_t
)dev_hdl
, PM_PME_MAPPING
, EQNUM
);
2520 case PCIE_PME_ACK_MSG
:
2521 *msiq_id
= CSR_FR((caddr_t
)dev_hdl
, PME_TO_ACK_MAPPING
,
2525 *msiq_id
= CSR_FR((caddr_t
)dev_hdl
, ERR_COR_MAPPING
, EQNUM
);
2527 case PCIE_NONFATAL_MSG
:
2528 *msiq_id
= CSR_FR((caddr_t
)dev_hdl
, ERR_NONFATAL_MAPPING
,
2531 case PCIE_FATAL_MSG
:
2532 *msiq_id
= CSR_FR((caddr_t
)dev_hdl
, ERR_FATAL_MAPPING
, EQNUM
);
2543 hvio_msg_setmsiq(devhandle_t dev_hdl
, pcie_msg_type_t msg_type
,
2546 uint64_t ret
= H_EOK
;
2550 CSR_FS((caddr_t
)dev_hdl
, PM_PME_MAPPING
, EQNUM
, msiq_id
);
2552 case PCIE_PME_ACK_MSG
:
2553 CSR_FS((caddr_t
)dev_hdl
, PME_TO_ACK_MAPPING
, EQNUM
, msiq_id
);
2556 CSR_FS((caddr_t
)dev_hdl
, ERR_COR_MAPPING
, EQNUM
, msiq_id
);
2558 case PCIE_NONFATAL_MSG
:
2559 CSR_FS((caddr_t
)dev_hdl
, ERR_NONFATAL_MAPPING
, EQNUM
, msiq_id
);
2561 case PCIE_FATAL_MSG
:
2562 CSR_FS((caddr_t
)dev_hdl
, ERR_FATAL_MAPPING
, EQNUM
, msiq_id
);
2573 hvio_msg_getvalid(devhandle_t dev_hdl
, pcie_msg_type_t msg_type
,
2574 pcie_msg_valid_state_t
*msg_valid_state
)
2576 uint64_t ret
= H_EOK
;
2580 *msg_valid_state
= CSR_BR((caddr_t
)dev_hdl
, PM_PME_MAPPING
, V
);
2582 case PCIE_PME_ACK_MSG
:
2583 *msg_valid_state
= CSR_BR((caddr_t
)dev_hdl
,
2584 PME_TO_ACK_MAPPING
, V
);
2587 *msg_valid_state
= CSR_BR((caddr_t
)dev_hdl
, ERR_COR_MAPPING
, V
);
2589 case PCIE_NONFATAL_MSG
:
2590 *msg_valid_state
= CSR_BR((caddr_t
)dev_hdl
,
2591 ERR_NONFATAL_MAPPING
, V
);
2593 case PCIE_FATAL_MSG
:
2594 *msg_valid_state
= CSR_BR((caddr_t
)dev_hdl
, ERR_FATAL_MAPPING
,
2606 hvio_msg_setvalid(devhandle_t dev_hdl
, pcie_msg_type_t msg_type
,
2607 pcie_msg_valid_state_t msg_valid_state
)
2609 uint64_t ret
= H_EOK
;
2611 switch (msg_valid_state
) {
2612 case PCIE_MSG_VALID
:
2615 CSR_BS((caddr_t
)dev_hdl
, PM_PME_MAPPING
, V
);
2617 case PCIE_PME_ACK_MSG
:
2618 CSR_BS((caddr_t
)dev_hdl
, PME_TO_ACK_MAPPING
, V
);
2621 CSR_BS((caddr_t
)dev_hdl
, ERR_COR_MAPPING
, V
);
2623 case PCIE_NONFATAL_MSG
:
2624 CSR_BS((caddr_t
)dev_hdl
, ERR_NONFATAL_MAPPING
, V
);
2626 case PCIE_FATAL_MSG
:
2627 CSR_BS((caddr_t
)dev_hdl
, ERR_FATAL_MAPPING
, V
);
2635 case PCIE_MSG_INVALID
:
2638 CSR_BC((caddr_t
)dev_hdl
, PM_PME_MAPPING
, V
);
2640 case PCIE_PME_ACK_MSG
:
2641 CSR_BC((caddr_t
)dev_hdl
, PME_TO_ACK_MAPPING
, V
);
2644 CSR_BC((caddr_t
)dev_hdl
, ERR_COR_MAPPING
, V
);
2646 case PCIE_NONFATAL_MSG
:
2647 CSR_BC((caddr_t
)dev_hdl
, ERR_NONFATAL_MAPPING
, V
);
2649 case PCIE_FATAL_MSG
:
2650 CSR_BC((caddr_t
)dev_hdl
, ERR_FATAL_MAPPING
, V
);
2665 * Suspend/Resume Functions:
2668 * Registers saved have all been touched in the XXX_init functions.
2671 hvio_suspend(devhandle_t dev_hdl
, pxu_t
*pxu_p
)
2673 uint64_t *config_state
;
2677 if (msiq_suspend(dev_hdl
, pxu_p
) != H_EOK
)
2680 total_size
= PEC_SIZE
+ MMU_SIZE
+ IB_SIZE
+ IB_MAP_SIZE
;
2681 config_state
= kmem_zalloc(total_size
, KM_NOSLEEP
);
2683 if (config_state
== NULL
) {
2688 * Soft state for suspend/resume from pxu_t
2689 * uint64_t *pec_config_state;
2690 * uint64_t *mmu_config_state;
2691 * uint64_t *ib_intr_map;
2692 * uint64_t *ib_config_state;
2693 * uint64_t *xcb_config_state;
2696 /* Save the PEC configuration states */
2697 pxu_p
->pec_config_state
= config_state
;
2698 for (i
= 0; i
< PEC_KEYS
; i
++) {
2699 if ((pec_config_state_regs
[i
].chip
== PX_CHIP_TYPE(pxu_p
)) ||
2700 (pec_config_state_regs
[i
].chip
== PX_CHIP_UNIDENTIFIED
)) {
2701 pxu_p
->pec_config_state
[i
] =
2702 CSR_XR((caddr_t
)dev_hdl
,
2703 pec_config_state_regs
[i
].reg
);
2707 /* Save the MMU configuration states */
2708 pxu_p
->mmu_config_state
= pxu_p
->pec_config_state
+ PEC_KEYS
;
2709 for (i
= 0; i
< MMU_KEYS
; i
++) {
2710 pxu_p
->mmu_config_state
[i
] =
2711 CSR_XR((caddr_t
)dev_hdl
, mmu_config_state_regs
[i
]);
2714 /* Save the interrupt mapping registers */
2715 pxu_p
->ib_intr_map
= pxu_p
->mmu_config_state
+ MMU_KEYS
;
2716 for (i
= 0; i
< INTERRUPT_MAPPING_ENTRIES
; i
++) {
2717 pxu_p
->ib_intr_map
[i
] =
2718 CSRA_XR((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
, i
);
2721 /* Save the IB configuration states */
2722 pxu_p
->ib_config_state
= pxu_p
->ib_intr_map
+ INTERRUPT_MAPPING_ENTRIES
;
2723 for (i
= 0; i
< IB_KEYS
; i
++) {
2724 pxu_p
->ib_config_state
[i
] =
2725 CSR_XR((caddr_t
)dev_hdl
, ib_config_state_regs
[i
]);
2732 hvio_resume(devhandle_t dev_hdl
, devino_t devino
, pxu_t
*pxu_p
)
2739 /* Make sure that suspend actually did occur */
2740 if (!pxu_p
->pec_config_state
) {
2744 /* Restore IB configuration states */
2745 for (i
= 0; i
< IB_KEYS
; i
++) {
2746 CSR_XS((caddr_t
)dev_hdl
, ib_config_state_regs
[i
],
2747 pxu_p
->ib_config_state
[i
]);
2751 * Restore the interrupt mapping registers
2752 * And make sure the intrs are idle.
2754 for (i
= 0; i
< INTERRUPT_MAPPING_ENTRIES
; i
++) {
2755 CSRA_FS((caddr_t
)dev_hdl
, INTERRUPT_CLEAR
, i
,
2756 ENTRIES_INT_STATE
, INTERRUPT_IDLE_STATE
);
2757 CSRA_XS((caddr_t
)dev_hdl
, INTERRUPT_MAPPING
, i
,
2758 pxu_p
->ib_intr_map
[i
]);
2761 /* Restore MMU configuration states */
2762 /* Clear the cache. */
2763 CSR_XS((caddr_t
)dev_hdl
, MMU_TTE_CACHE_INVALIDATE
, -1ull);
2765 for (i
= 0; i
< MMU_KEYS
; i
++) {
2766 CSR_XS((caddr_t
)dev_hdl
, mmu_config_state_regs
[i
],
2767 pxu_p
->mmu_config_state
[i
]);
2770 /* Restore PEC configuration states */
2771 /* Make sure all reset bits are low until error is detected */
2772 CSR_XS((caddr_t
)dev_hdl
, LPU_RESET
, 0ull);
2774 for (i
= 0; i
< PEC_KEYS
; i
++) {
2775 if ((pec_config_state_regs
[i
].chip
== PX_CHIP_TYPE(pxu_p
)) ||
2776 (pec_config_state_regs
[i
].chip
== PX_CHIP_UNIDENTIFIED
)) {
2777 CSR_XS((caddr_t
)dev_hdl
, pec_config_state_regs
[i
].reg
,
2778 pxu_p
->pec_config_state
[i
]);
2782 /* Enable PCI-E interrupt */
2783 if ((ret
= hvio_intr_devino_to_sysino(dev_hdl
, pxu_p
, devino
,
2784 &sysino
)) != H_EOK
) {
2786 "hvio_resume: hvio_intr_devino_to_sysino failed, "
2790 if ((ret
= hvio_intr_setstate(dev_hdl
, sysino
, INTR_IDLE_STATE
))
2793 "hvio_resume: hvio_intr_setstate failed, "
2797 total_size
= PEC_SIZE
+ MMU_SIZE
+ IB_SIZE
+ IB_MAP_SIZE
;
2798 kmem_free(pxu_p
->pec_config_state
, total_size
);
2800 pxu_p
->pec_config_state
= NULL
;
2801 pxu_p
->mmu_config_state
= NULL
;
2802 pxu_p
->ib_config_state
= NULL
;
2803 pxu_p
->ib_intr_map
= NULL
;
2805 msiq_resume(dev_hdl
, pxu_p
);
2809 hvio_cb_suspend(devhandle_t dev_hdl
, pxu_t
*pxu_p
)
2811 uint64_t *config_state
, *cb_regs
;
2812 int i
, cb_size
, cb_keys
;
2814 switch (PX_CHIP_TYPE(pxu_p
)) {
2815 case PX_CHIP_OBERON
:
2818 cb_regs
= ubc_config_state_regs
;
2823 cb_regs
= jbc_config_state_regs
;
2826 DBG(DBG_CB
, NULL
, "hvio_cb_suspend - unknown chip type: 0x%x\n",
2827 PX_CHIP_TYPE(pxu_p
));
2831 config_state
= kmem_zalloc(cb_size
, KM_NOSLEEP
);
2833 if (config_state
== NULL
) {
2837 /* Save the configuration states */
2838 pxu_p
->xcb_config_state
= config_state
;
2839 for (i
= 0; i
< cb_keys
; i
++) {
2840 pxu_p
->xcb_config_state
[i
] =
2841 CSR_XR((caddr_t
)dev_hdl
, cb_regs
[i
]);
2848 hvio_cb_resume(devhandle_t pci_dev_hdl
, devhandle_t xbus_dev_hdl
,
2849 devino_t devino
, pxu_t
*pxu_p
)
2853 int i
, cb_size
, cb_keys
;
2856 switch (PX_CHIP_TYPE(pxu_p
)) {
2857 case PX_CHIP_OBERON
:
2860 cb_regs
= ubc_config_state_regs
;
2862 * No reason to have any reset bits high until an error is
2863 * detected on the link.
2865 CSR_XS((caddr_t
)xbus_dev_hdl
, UBC_ERROR_STATUS_CLEAR
, -1ull);
2870 cb_regs
= jbc_config_state_regs
;
2872 * No reason to have any reset bits high until an error is
2873 * detected on the link.
2875 CSR_XS((caddr_t
)xbus_dev_hdl
, JBC_ERROR_STATUS_CLEAR
, -1ull);
2878 DBG(DBG_CB
, NULL
, "hvio_cb_resume - unknown chip type: 0x%x\n",
2879 PX_CHIP_TYPE(pxu_p
));
2883 ASSERT(pxu_p
->xcb_config_state
);
2885 /* Restore the configuration states */
2886 for (i
= 0; i
< cb_keys
; i
++) {
2887 CSR_XS((caddr_t
)xbus_dev_hdl
, cb_regs
[i
],
2888 pxu_p
->xcb_config_state
[i
]);
2891 /* Enable XBC interrupt */
2892 if ((ret
= hvio_intr_devino_to_sysino(pci_dev_hdl
, pxu_p
, devino
,
2893 &sysino
)) != H_EOK
) {
2895 "hvio_cb_resume: hvio_intr_devino_to_sysino failed, "
2899 if ((ret
= hvio_intr_setstate(pci_dev_hdl
, sysino
, INTR_IDLE_STATE
))
2902 "hvio_cb_resume: hvio_intr_setstate failed, "
2906 kmem_free(pxu_p
->xcb_config_state
, cb_size
);
2908 pxu_p
->xcb_config_state
= NULL
;
2912 msiq_suspend(devhandle_t dev_hdl
, pxu_t
*pxu_p
)
2915 volatile uint64_t *cur_p
;
2918 bufsz
= MSIQ_STATE_SIZE
+ MSIQ_MAPPING_SIZE
+ MSIQ_OTHER_SIZE
;
2919 if ((pxu_p
->msiq_config_state
= kmem_zalloc(bufsz
, KM_NOSLEEP
)) ==
2923 cur_p
= pxu_p
->msiq_config_state
;
2925 /* Save each EQ state */
2926 for (i
= 0; i
< EVENT_QUEUE_STATE_ENTRIES
; i
++, cur_p
++)
2927 *cur_p
= CSRA_XR((caddr_t
)dev_hdl
, EVENT_QUEUE_STATE
, i
);
2929 /* Save MSI mapping registers */
2930 for (i
= 0; i
< MSI_MAPPING_ENTRIES
; i
++, cur_p
++)
2931 *cur_p
= CSRA_XR((caddr_t
)dev_hdl
, MSI_MAPPING
, i
);
2933 /* Save all other MSIQ registers */
2934 for (i
= 0; i
< MSIQ_OTHER_KEYS
; i
++, cur_p
++)
2935 *cur_p
= CSR_XR((caddr_t
)dev_hdl
, msiq_config_other_regs
[i
]);
2940 msiq_resume(devhandle_t dev_hdl
, pxu_t
*pxu_p
)
2943 uint64_t *cur_p
, state
;
2947 bufsz
= MSIQ_STATE_SIZE
+ MSIQ_MAPPING_SIZE
+ MSIQ_OTHER_SIZE
;
2948 cur_p
= pxu_p
->msiq_config_state
;
2950 * Initialize EQ base address register and
2951 * Interrupt Mondo Data 0 register.
2953 if ((ret
= hvio_msiq_init(dev_hdl
, pxu_p
)) != H_EOK
) {
2955 "msiq_resume: hvio_msiq_init failed, "
2959 /* Restore EQ states */
2960 for (i
= 0; i
< EVENT_QUEUE_STATE_ENTRIES
; i
++, cur_p
++) {
2961 state
= (*cur_p
) & EVENT_QUEUE_STATE_ENTRIES_STATE_MASK
;
2962 if ((state
== EQ_ACTIVE_STATE
) || (state
== EQ_ERROR_STATE
))
2963 CSRA_BS((caddr_t
)dev_hdl
, EVENT_QUEUE_CONTROL_SET
,
2967 /* Restore MSI mapping */
2968 for (i
= 0; i
< MSI_MAPPING_ENTRIES
; i
++, cur_p
++)
2969 CSRA_XS((caddr_t
)dev_hdl
, MSI_MAPPING
, i
, *cur_p
);
2972 * Restore all other registers. MSI 32 bit address and
2973 * MSI 64 bit address are restored as part of this.
2975 for (i
= 0; i
< MSIQ_OTHER_KEYS
; i
++, cur_p
++)
2976 CSR_XS((caddr_t
)dev_hdl
, msiq_config_other_regs
[i
], *cur_p
);
2978 kmem_free(pxu_p
->msiq_config_state
, bufsz
);
2979 pxu_p
->msiq_config_state
= NULL
;
2983 * sends PME_Turn_Off message to put the link in L2/L3 ready state.
2984 * called by px_goto_l23ready.
2985 * returns DDI_SUCCESS or DDI_FAILURE
2988 px_send_pme_turnoff(caddr_t csr_base
)
2990 volatile uint64_t reg
;
2992 reg
= CSR_XR(csr_base
, TLU_PME_TURN_OFF_GENERATE
);
2993 /* If already pending, return failure */
2994 if (reg
& (1ull << TLU_PME_TURN_OFF_GENERATE_PTO
)) {
2995 DBG(DBG_PWR
, NULL
, "send_pme_turnoff: pending PTO bit "
2996 "tlu_pme_turn_off_generate = %x\n", reg
);
2997 return (DDI_FAILURE
);
3000 /* write to PME_Turn_off reg to boradcast */
3001 reg
|= (1ull << TLU_PME_TURN_OFF_GENERATE_PTO
);
3002 CSR_XS(csr_base
, TLU_PME_TURN_OFF_GENERATE
, reg
);
3004 return (DDI_SUCCESS
);
3008 * Checks for link being in L1idle state.
3010 * DDI_SUCCESS - if the link is in L1idle
3011 * DDI_FAILURE - if the link is not in L1idle
3014 px_link_wait4l1idle(caddr_t csr_base
)
3016 uint8_t ltssm_state
;
3017 int ntries
= px_max_l1_tries
;
3019 while (ntries
> 0) {
3020 ltssm_state
= CSR_FR(csr_base
, LPU_LTSSM_STATUS1
, LTSSM_STATE
);
3021 if (ltssm_state
== LPU_LTSSM_L1_IDLE
|| (--ntries
<= 0))
3025 DBG(DBG_PWR
, NULL
, "check_for_l1idle: ltssm_state %x\n", ltssm_state
);
3026 return ((ltssm_state
== LPU_LTSSM_L1_IDLE
) ? DDI_SUCCESS
: DDI_FAILURE
);
3030 * Tranisition the link to L0, after it is down.
3033 px_link_retrain(caddr_t csr_base
)
3035 volatile uint64_t reg
;
3037 reg
= CSR_XR(csr_base
, TLU_CONTROL
);
3038 if (!(reg
& (1ull << TLU_REMAIN_DETECT_QUIET
))) {
3039 DBG(DBG_PWR
, NULL
, "retrain_link: detect.quiet bit not set\n");
3040 return (DDI_FAILURE
);
3043 /* Clear link down bit in TLU Other Event Clear Status Register. */
3044 CSR_BS(csr_base
, TLU_OTHER_EVENT_STATUS_CLEAR
, LDN_P
);
3046 /* Clear Drain bit in TLU Status Register */
3047 CSR_BS(csr_base
, TLU_STATUS
, DRAIN
);
3049 /* Clear Remain in Detect.Quiet bit in TLU Control Register */
3050 reg
= CSR_XR(csr_base
, TLU_CONTROL
);
3051 reg
&= ~(1ull << TLU_REMAIN_DETECT_QUIET
);
3052 CSR_XS(csr_base
, TLU_CONTROL
, reg
);
3054 return (DDI_SUCCESS
);
3058 px_enable_detect_quiet(caddr_t csr_base
)
3060 volatile uint64_t tlu_ctrl
;
3062 tlu_ctrl
= CSR_XR(csr_base
, TLU_CONTROL
);
3063 tlu_ctrl
|= (1ull << TLU_REMAIN_DETECT_QUIET
);
3064 CSR_XS(csr_base
, TLU_CONTROL
, tlu_ctrl
);
3068 oberon_hp_pwron(caddr_t csr_base
)
3070 volatile uint64_t reg
;
3071 boolean_t link_retry
, link_up
;
3074 DBG(DBG_HP
, NULL
, "oberon_hp_pwron the slot\n");
3076 /* Check Leaf Reset status */
3077 reg
= CSR_XR(csr_base
, ILU_ERROR_LOG_ENABLE
);
3078 if (!(reg
& (1ull << ILU_ERROR_LOG_ENABLE_SPARE3
))) {
3079 DBG(DBG_HP
, NULL
, "oberon_hp_pwron fails: leaf not reset\n");
3083 /* Check HP Capable */
3084 if (!CSR_BR(csr_base
, TLU_SLOT_CAPABILITIES
, HP
)) {
3085 DBG(DBG_HP
, NULL
, "oberon_hp_pwron fails: leaf not "
3090 /* Check Slot status */
3091 reg
= CSR_XR(csr_base
, TLU_SLOT_STATUS
);
3092 if (!(reg
& (1ull << TLU_SLOT_STATUS_PSD
)) ||
3093 (reg
& (1ull << TLU_SLOT_STATUS_MRLS
))) {
3094 DBG(DBG_HP
, NULL
, "oberon_hp_pwron fails: slot status %lx\n",
3099 /* Blink power LED, this is done from pciehpc already */
3101 /* Turn on slot power */
3102 CSR_BS(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3104 /* power fault detection */
3105 delay(drv_usectohz(25000));
3106 CSR_BS(csr_base
, TLU_SLOT_STATUS
, PWFD
);
3107 CSR_BC(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3109 /* wait to check power state */
3110 delay(drv_usectohz(25000));
3112 if (!CSR_BR(csr_base
, TLU_SLOT_STATUS
, PWFD
)) {
3113 DBG(DBG_HP
, NULL
, "oberon_hp_pwron fails: power fault\n");
3118 CSR_BS(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3120 delay(drv_usectohz(25000));
3121 CSR_BS(csr_base
, TLU_SLOT_STATUS
, PWFD
);
3122 CSR_BS(csr_base
, TLU_SLOT_CONTROL
, PWFDEN
);
3124 /* Turn on slot clock */
3125 CSR_BS(csr_base
, HOTPLUG_CONTROL
, CLKEN
);
3128 link_retry
= B_FALSE
;
3130 for (loop
= 0; (loop
< link_retry_count
) && (link_up
== B_FALSE
);
3132 if (link_retry
== B_TRUE
) {
3133 DBG(DBG_HP
, NULL
, "oberon_hp_pwron : retry link loop "
3135 CSR_BS(csr_base
, TLU_CONTROL
, DRN_TR_DIS
);
3136 CSR_XS(csr_base
, FLP_PORT_CONTROL
, 0x1);
3137 delay(drv_usectohz(10000));
3138 CSR_BC(csr_base
, TLU_CONTROL
, DRN_TR_DIS
);
3139 CSR_BS(csr_base
, TLU_DIAGNOSTIC
, IFC_DIS
);
3140 CSR_BC(csr_base
, HOTPLUG_CONTROL
, N_PERST
);
3141 delay(drv_usectohz(50000));
3144 /* Release PCI-E Reset */
3145 delay(drv_usectohz(wait_perst
));
3146 CSR_BS(csr_base
, HOTPLUG_CONTROL
, N_PERST
);
3150 * This should be done from pciehpc already
3153 /* Enable PCIE port */
3154 delay(drv_usectohz(wait_enable_port
));
3155 CSR_BS(csr_base
, TLU_CONTROL
, DRN_TR_DIS
);
3156 CSR_XS(csr_base
, FLP_PORT_CONTROL
, 0x20);
3158 /* wait for the link up */
3160 for (i
= 0; (i
< 2) && (link_up
== B_FALSE
); i
++) {
3161 delay(drv_usectohz(link_status_check
));
3162 reg
= CSR_XR(csr_base
, DLU_LINK_LAYER_STATUS
);
3164 if ((((reg
>> DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS
) &
3165 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_MASK
) ==
3166 DLU_LINK_LAYER_STATUS_INIT_FC_SM_STS_FC_INIT_DONE
) &&
3167 (reg
& (1ull << DLU_LINK_LAYER_STATUS_DLUP_STS
)) &&
3169 DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_MASK
) ==
3170 DLU_LINK_LAYER_STATUS_LNK_STATE_MACH_STS_DL_ACTIVE
)) {
3171 DBG(DBG_HP
, NULL
, "oberon_hp_pwron : "
3175 link_retry
= B_TRUE
;
3181 if (link_up
== B_FALSE
) {
3182 DBG(DBG_HP
, NULL
, "oberon_hp_pwron fails to enable "
3188 CSR_BC(csr_base
, TLU_DIAGNOSTIC
, IFC_DIS
);
3189 CSR_BS(csr_base
, FLP_PORT_ACTIVE_STATUS
, TRAIN_ERROR
);
3190 CSR_BS(csr_base
, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR
, TE_P
);
3191 CSR_BS(csr_base
, TLU_UNCORRECTABLE_ERROR_STATUS_CLEAR
, TE_S
);
3192 CSR_BC(csr_base
, TLU_CONTROL
, DRN_TR_DIS
);
3194 /* Restore LUP/LDN */
3195 reg
= CSR_XR(csr_base
, TLU_OTHER_EVENT_LOG_ENABLE
);
3196 if (px_tlu_oe_log_mask
& (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P
))
3197 reg
|= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P
;
3198 if (px_tlu_oe_log_mask
& (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P
))
3199 reg
|= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P
;
3200 if (px_tlu_oe_log_mask
& (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S
))
3201 reg
|= 1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S
;
3202 if (px_tlu_oe_log_mask
& (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S
))
3203 reg
|= 1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S
;
3204 CSR_XS(csr_base
, TLU_OTHER_EVENT_LOG_ENABLE
, reg
);
3208 * SPLS = 00b, SPLV = 11001b, i.e. 25W
3210 reg
= CSR_XR(csr_base
, TLU_SLOT_CAPABILITIES
);
3211 reg
&= ~(TLU_SLOT_CAPABILITIES_SPLS_MASK
<<
3212 TLU_SLOT_CAPABILITIES_SPLS
);
3213 reg
&= ~(TLU_SLOT_CAPABILITIES_SPLV_MASK
<<
3214 TLU_SLOT_CAPABILITIES_SPLV
);
3215 reg
|= (0x19 << TLU_SLOT_CAPABILITIES_SPLV
);
3216 CSR_XS(csr_base
, TLU_SLOT_CAPABILITIES
, reg
);
3218 /* Turn on Power LED */
3219 reg
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3220 reg
&= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK
;
3221 reg
= pcie_slotctl_pwr_indicator_set(reg
,
3222 PCIE_SLOTCTL_INDICATOR_STATE_ON
);
3223 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, reg
);
3226 if (CSR_BR(csr_base
, HOTPLUG_CONTROL
, SLOTPON
))
3227 CSR_BC(csr_base
, HOTPLUG_CONTROL
, SLOTPON
);
3229 CSR_BS(csr_base
, HOTPLUG_CONTROL
, SLOTPON
);
3231 /* Wait for one second */
3234 return (DDI_SUCCESS
);
3237 /* Link up is failed */
3238 CSR_BS(csr_base
, FLP_PORT_CONTROL
, PORT_DIS
);
3239 CSR_BC(csr_base
, HOTPLUG_CONTROL
, N_PERST
);
3240 delay(drv_usectohz(150));
3242 CSR_BC(csr_base
, HOTPLUG_CONTROL
, CLKEN
);
3243 delay(drv_usectohz(100));
3246 CSR_BC(csr_base
, TLU_SLOT_CONTROL
, PWFDEN
);
3248 CSR_BC(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3250 reg
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3251 reg
&= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK
;
3252 reg
= pcie_slotctl_pwr_indicator_set(reg
,
3253 PCIE_SLOTCTL_INDICATOR_STATE_OFF
);
3254 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, reg
);
3256 CSR_BC(csr_base
, TLU_SLOT_STATUS
, PWFD
);
3259 return ((uint_t
)DDI_FAILURE
);
3262 hrtime_t oberon_leaf_reset_timeout
= 120ll * NANOSEC
; /* 120 seconds */
3265 oberon_hp_pwroff(caddr_t csr_base
)
3267 volatile uint64_t reg
;
3268 volatile uint64_t reg_tluue
, reg_tluce
;
3269 hrtime_t start_time
, end_time
;
3271 DBG(DBG_HP
, NULL
, "oberon_hp_pwroff the slot\n");
3273 /* Blink power LED, this is done from pciehpc already */
3275 /* Clear Slot Event */
3276 CSR_BS(csr_base
, TLU_SLOT_STATUS
, PSDC
);
3277 CSR_BS(csr_base
, TLU_SLOT_STATUS
, PWFD
);
3280 CSR_BS(csr_base
, TLU_CONTROL
, DRN_TR_DIS
);
3281 delay(drv_usectohz(10000));
3283 /* Disable LUP/LDN */
3284 reg
= CSR_XR(csr_base
, TLU_OTHER_EVENT_LOG_ENABLE
);
3285 reg
&= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P
) |
3286 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P
) |
3287 (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S
) |
3288 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S
));
3289 CSR_XS(csr_base
, TLU_OTHER_EVENT_LOG_ENABLE
, reg
);
3291 /* Save the TLU registers */
3292 reg_tluue
= CSR_XR(csr_base
, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE
);
3293 reg_tluce
= CSR_XR(csr_base
, TLU_CORRECTABLE_ERROR_LOG_ENABLE
);
3295 CSR_XS(csr_base
, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE
, 0);
3296 CSR_XS(csr_base
, TLU_CORRECTABLE_ERROR_LOG_ENABLE
, 0);
3299 CSR_BS(csr_base
, FLP_PORT_CONTROL
, PORT_DIS
);
3302 delay(drv_usectohz(10000));
3303 CSR_BC(csr_base
, HOTPLUG_CONTROL
, N_PERST
);
3305 /* PCIE clock stop */
3306 delay(drv_usectohz(150));
3307 CSR_BC(csr_base
, HOTPLUG_CONTROL
, CLKEN
);
3309 /* Turn off slot power */
3310 delay(drv_usectohz(100));
3311 CSR_BC(csr_base
, TLU_SLOT_CONTROL
, PWFDEN
);
3312 CSR_BC(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3313 delay(drv_usectohz(25000));
3314 CSR_BS(csr_base
, TLU_SLOT_STATUS
, PWFD
);
3316 /* write 0 to bit 7 of ILU Error Log Enable Register */
3317 CSR_BC(csr_base
, ILU_ERROR_LOG_ENABLE
, SPARE3
);
3319 /* Set back TLU registers */
3320 CSR_XS(csr_base
, TLU_UNCORRECTABLE_ERROR_LOG_ENABLE
, reg_tluue
);
3321 CSR_XS(csr_base
, TLU_CORRECTABLE_ERROR_LOG_ENABLE
, reg_tluce
);
3324 reg
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3325 reg
&= ~PCIE_SLOTCTL_PWR_INDICATOR_MASK
;
3326 reg
= pcie_slotctl_pwr_indicator_set(reg
,
3327 PCIE_SLOTCTL_INDICATOR_STATE_OFF
);
3328 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, reg
);
3330 /* Indicator LED blink */
3331 reg
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3332 reg
&= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK
;
3333 reg
= pcie_slotctl_attn_indicator_set(reg
,
3334 PCIE_SLOTCTL_INDICATOR_STATE_BLINK
);
3335 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, reg
);
3338 if (CSR_BR(csr_base
, HOTPLUG_CONTROL
, SLOTPON
))
3339 CSR_BC(csr_base
, HOTPLUG_CONTROL
, SLOTPON
);
3341 CSR_BS(csr_base
, HOTPLUG_CONTROL
, SLOTPON
);
3343 start_time
= gethrtime();
3344 /* Check Leaf Reset status */
3345 while (!(CSR_BR(csr_base
, ILU_ERROR_LOG_ENABLE
, SPARE3
))) {
3346 if ((end_time
= (gethrtime() - start_time
)) >
3347 oberon_leaf_reset_timeout
) {
3348 cmn_err(CE_WARN
, "Oberon leaf reset is not completed, "
3349 "even after waiting %llx ticks", end_time
);
3354 /* Wait for one second */
3358 /* Indicator LED off */
3359 reg
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3360 reg
&= ~PCIE_SLOTCTL_ATTN_INDICATOR_MASK
;
3361 reg
= pcie_slotctl_attn_indicator_set(reg
,
3362 PCIE_SLOTCTL_INDICATOR_STATE_OFF
);
3363 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, reg
);
3365 return (DDI_SUCCESS
);
3369 oberon_hpreg_get(void *cookie
, off_t off
)
3371 caddr_t csr_base
= *(caddr_t
*)cookie
;
3372 volatile uint64_t val
= -1ull;
3376 val
= CSR_XR(csr_base
, TLU_SLOT_CAPABILITIES
);
3379 val
= CSR_XR(csr_base
, TLU_SLOT_CONTROL
);
3381 /* Get the power state */
3382 val
|= (CSR_XR(csr_base
, HOTPLUG_CONTROL
) &
3383 (1ull << HOTPLUG_CONTROL_PWREN
)) ?
3384 0 : PCIE_SLOTCTL_PWR_CONTROL
;
3387 val
= CSR_XR(csr_base
, TLU_SLOT_STATUS
);
3390 val
= CSR_XR(csr_base
, TLU_LINK_CAPABILITIES
);
3393 val
= CSR_XR(csr_base
, TLU_LINK_STATUS
);
3396 DBG(DBG_HP
, NULL
, "oberon_hpreg_get(): "
3397 "unsupported offset 0x%lx\n", off
);
3401 return ((uint_t
)val
);
3405 oberon_hpreg_put(void *cookie
, off_t off
, uint_t val
)
3407 caddr_t csr_base
= *(caddr_t
*)cookie
;
3408 volatile uint64_t pwr_state_on
, pwr_fault
;
3409 uint_t pwr_off
, ret
= DDI_SUCCESS
;
3411 DBG(DBG_HP
, NULL
, "oberon_hpreg_put 0x%lx: cur %x, new %x\n",
3412 off
, oberon_hpreg_get(cookie
, off
), val
);
3417 * Depending on the current state, insertion or removal
3418 * will go through their respective sequences.
3420 pwr_state_on
= CSR_BR(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3421 pwr_off
= val
& PCIE_SLOTCTL_PWR_CONTROL
;
3423 if (!pwr_off
&& !pwr_state_on
)
3424 ret
= oberon_hp_pwron(csr_base
);
3425 else if (pwr_off
&& pwr_state_on
) {
3426 pwr_fault
= CSR_XR(csr_base
, TLU_SLOT_STATUS
) &
3427 (1ull << TLU_SLOT_STATUS_PWFD
);
3430 DBG(DBG_HP
, NULL
, "oberon_hpreg_put: power "
3431 "off because of power fault\n");
3432 CSR_BC(csr_base
, HOTPLUG_CONTROL
, PWREN
);
3435 ret
= oberon_hp_pwroff(csr_base
);
3437 CSR_XS(csr_base
, TLU_SLOT_CONTROL
, val
);
3440 CSR_XS(csr_base
, TLU_SLOT_STATUS
, val
);
3443 DBG(DBG_HP
, NULL
, "oberon_hpreg_put(): "
3444 "unsupported offset 0x%lx\n", off
);
3445 ret
= (uint_t
)DDI_FAILURE
;
3453 hvio_hotplug_init(dev_info_t
*dip
, void *arg
)
3455 pcie_hp_regops_t
*regops
= (pcie_hp_regops_t
*)arg
;
3456 px_t
*px_p
= DIP_TO_STATE(dip
);
3457 pxu_t
*pxu_p
= (pxu_t
*)px_p
->px_plat_p
;
3458 volatile uint64_t reg
;
3460 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
) {
3461 if (!CSR_BR((caddr_t
)pxu_p
->px_address
[PX_REG_CSR
],
3462 TLU_SLOT_CAPABILITIES
, HP
)) {
3463 DBG(DBG_HP
, NULL
, "%s%d: hotplug capabale not set\n",
3464 ddi_driver_name(dip
), ddi_get_instance(dip
));
3465 return (DDI_FAILURE
);
3468 /* For empty or disconnected slot, disable LUP/LDN */
3469 if (!CSR_BR((caddr_t
)pxu_p
->px_address
[PX_REG_CSR
],
3470 TLU_SLOT_STATUS
, PSD
) ||
3471 !CSR_BR((caddr_t
)pxu_p
->px_address
[PX_REG_CSR
],
3472 HOTPLUG_CONTROL
, PWREN
)) {
3474 reg
= CSR_XR((caddr_t
)pxu_p
->px_address
[PX_REG_CSR
],
3475 TLU_OTHER_EVENT_LOG_ENABLE
);
3476 reg
&= ~((1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_P
) |
3477 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_P
) |
3478 (1ull << TLU_OTHER_EVENT_STATUS_SET_LDN_S
) |
3479 (1ull << TLU_OTHER_EVENT_STATUS_SET_LUP_S
));
3480 CSR_XS((caddr_t
)pxu_p
->px_address
[PX_REG_CSR
],
3481 TLU_OTHER_EVENT_LOG_ENABLE
, reg
);
3484 regops
->get
= oberon_hpreg_get
;
3485 regops
->put
= oberon_hpreg_put
;
3487 /* cookie is the csr_base */
3488 regops
->cookie
= (void *)&pxu_p
->px_address
[PX_REG_CSR
];
3490 return (DDI_SUCCESS
);
3493 return (DDI_ENOTSUP
);
3497 hvio_hotplug_uninit(dev_info_t
*dip
)
3499 px_t
*px_p
= DIP_TO_STATE(dip
);
3500 pxu_t
*pxu_p
= (pxu_t
*)px_p
->px_plat_p
;
3502 if (PX_CHIP_TYPE(pxu_p
) == PX_CHIP_OBERON
)
3503 return (DDI_SUCCESS
);
3505 return (DDI_FAILURE
);