4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
28 * Copyright 2017 Joyent, Inc.
32 * x86 root nexus driver
35 #include <sys/sysmacros.h>
37 #include <sys/autoconf.h>
38 #include <sys/sysmacros.h>
39 #include <sys/debug.h>
41 #include <sys/ddidmareq.h>
42 #include <sys/promif.h>
43 #include <sys/devops.h>
45 #include <sys/cmn_err.h>
47 #include <vm/seg_kmem.h>
48 #include <vm/seg_dev.h>
54 #include <sys/avintr.h>
55 #include <sys/errno.h>
56 #include <sys/modctl.h>
57 #include <sys/ddi_impldefs.h>
58 #include <sys/sunddi.h>
59 #include <sys/sunndi.h>
60 #include <sys/mach_intr.h>
62 #include <sys/ontrap.h>
63 #include <sys/atomic.h>
65 #include <sys/rootnex.h>
66 #include <vm/hat_i86.h>
67 #include <sys/ddifm.h>
68 #include <sys/ddi_isa.h>
72 #include <sys/bootinfo.h>
73 #include <sys/hypervisor.h>
74 #include <sys/bootconf.h>
75 #include <vm/kboot_mmu.h>
78 #if defined(__amd64) && !defined(__xpv)
84 * enable/disable extra checking of function parameters. Useful for debugging
88 int rootnex_alloc_check_parms
= 1;
89 int rootnex_bind_check_parms
= 1;
90 int rootnex_bind_check_inuse
= 1;
91 int rootnex_unbind_verify_buffer
= 0;
92 int rootnex_sync_check_parms
= 1;
94 int rootnex_alloc_check_parms
= 0;
95 int rootnex_bind_check_parms
= 0;
96 int rootnex_bind_check_inuse
= 0;
97 int rootnex_unbind_verify_buffer
= 0;
98 int rootnex_sync_check_parms
= 0;
101 boolean_t rootnex_dmar_not_setup
;
103 /* Master Abort and Target Abort panic flag */
104 int rootnex_fm_ma_ta_panic_flag
= 0;
106 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
107 int rootnex_bind_fail
= 1;
108 int rootnex_bind_warn
= 1;
109 uint8_t *rootnex_warn_list
;
110 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
111 #define ROOTNEX_BIND_WARNING (0x1 << 0)
114 * revert back to old broken behavior of always sync'ing entire copy buffer.
115 * This is useful if be have a buggy driver which doesn't correctly pass in
116 * the offset and size into ddi_dma_sync().
118 int rootnex_sync_ignore_params
= 0;
121 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
122 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
123 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
124 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
125 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
126 * (< 8K). We will still need to allocate the copy buffer during bind though
127 * (if we need one). These can only be modified in /etc/system before rootnex
131 int rootnex_prealloc_cookies
= 65;
132 int rootnex_prealloc_windows
= 4;
133 int rootnex_prealloc_copybuf
= 2;
135 int rootnex_prealloc_cookies
= 33;
136 int rootnex_prealloc_windows
= 4;
137 int rootnex_prealloc_copybuf
= 2;
140 /* driver global state */
141 static rootnex_state_t
*rootnex_state
;
144 /* shortcut to rootnex counters */
145 static uint64_t *rootnex_cnt
;
149 * XXX - does x86 even need these or are they left over from the SPARC days?
151 /* statically defined integer/boolean properties for the root node */
152 static rootnex_intprop_t rootnex_intprp
[] = {
153 { "PAGESIZE", PAGESIZE
},
154 { "MMU_PAGESIZE", MMU_PAGESIZE
},
155 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET
},
156 { DDI_RELATIVE_ADDRESSING
, 1 },
158 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
161 * If we're dom0, we're using a real device so we need to load
162 * the cookies with MFNs instead of PFNs.
165 typedef maddr_t rootnex_addr_t
;
166 #define ROOTNEX_PADDR_TO_RBASE(pa) \
167 (DOMAIN_IS_INITDOMAIN(xen_info) ? pa_to_ma(pa) : (pa))
169 typedef paddr_t rootnex_addr_t
;
170 #define ROOTNEX_PADDR_TO_RBASE(pa) (pa)
173 static struct cb_ops rootnex_cb_ops
= {
176 nodev
, /* strategy */
185 nochpoll
, /* chpoll */
186 ddi_prop_op
, /* cb_prop_op */
187 NULL
, /* struct streamtab */
188 D_NEW
| D_MP
| D_HOTPLUG
, /* compatibility flags */
190 nodev
, /* cb_aread */
191 nodev
/* cb_awrite */
194 static int rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
195 off_t offset
, off_t len
, caddr_t
*vaddrp
);
196 static int rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
,
197 struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
198 struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
);
199 static int rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
200 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
201 ddi_dma_handle_t
*handlep
);
202 static int rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
203 ddi_dma_handle_t handle
);
204 static int rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
205 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
206 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
207 static int rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
208 ddi_dma_handle_t handle
);
209 static int rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
210 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
211 static int rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
212 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
213 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
214 static int rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
215 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
216 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t cache_flags
);
217 static int rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
,
218 ddi_ctl_enum_t ctlop
, void *arg
, void *result
);
219 static int rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
220 ddi_iblock_cookie_t
*ibc
);
221 static int rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
,
222 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
, void *result
);
223 static int rootnex_alloc_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*,
225 static int rootnex_free_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*);
227 static int rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
228 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
229 ddi_dma_handle_t
*handlep
);
230 static int rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
231 ddi_dma_handle_t handle
);
232 static int rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
233 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
234 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
235 static int rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
236 ddi_dma_handle_t handle
);
237 #if defined(__amd64) && !defined(__xpv)
238 static void rootnex_coredma_reset_cookies(dev_info_t
*dip
,
239 ddi_dma_handle_t handle
);
240 static int rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
241 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
);
242 static int rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
243 ddi_dma_cookie_t
*cookiep
, uint_t ccount
);
244 static int rootnex_coredma_clear_cookies(dev_info_t
*dip
,
245 ddi_dma_handle_t handle
);
246 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
);
248 static int rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
249 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
250 static int rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
251 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
252 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
254 #if defined(__amd64) && !defined(__xpv)
255 static int rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
256 ddi_dma_handle_t handle
, void *v
);
257 static void *rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
258 ddi_dma_handle_t handle
);
262 static struct bus_ops rootnex_bus_ops
= {
270 rootnex_dma_allochdl
,
273 rootnex_dma_unbindhdl
,
279 i_ddi_rootnex_get_eventcookie
,
280 i_ddi_rootnex_add_eventcall
,
281 i_ddi_rootnex_remove_eventcall
,
282 i_ddi_rootnex_post_event
,
283 0, /* bus_intr_ctl */
285 0, /* bus_unconfig */
286 rootnex_fm_init
, /* bus_fm_init */
287 NULL
, /* bus_fm_fini */
288 NULL
, /* bus_fm_access_enter */
289 NULL
, /* bus_fm_access_exit */
291 rootnex_intr_ops
/* bus_intr_op */
294 static int rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
295 static int rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
296 static int rootnex_quiesce(dev_info_t
*dip
);
298 static struct dev_ops rootnex_ops
= {
310 rootnex_quiesce
, /* quiesce */
313 static struct modldrv rootnex_modldrv
= {
319 static struct modlinkage rootnex_modlinkage
= {
321 (void *)&rootnex_modldrv
,
325 #if defined(__amd64) && !defined(__xpv)
326 static iommulib_nexops_t iommulib_nexops
= {
327 IOMMU_NEXOPS_VERSION
,
328 "Rootnex IOMMU ops Vers 1.1",
330 rootnex_coredma_allochdl
,
331 rootnex_coredma_freehdl
,
332 rootnex_coredma_bindhdl
,
333 rootnex_coredma_unbindhdl
,
334 rootnex_coredma_reset_cookies
,
335 rootnex_coredma_get_cookies
,
336 rootnex_coredma_set_cookies
,
337 rootnex_coredma_clear_cookies
,
338 rootnex_coredma_get_sleep_flags
,
339 rootnex_coredma_sync
,
341 rootnex_coredma_hdl_setprivate
,
342 rootnex_coredma_hdl_getprivate
349 extern struct seg_ops segdev_ops
;
350 extern int ignore_hardware_nodes
; /* force flag from ddi_impl.c */
352 extern int ddi_map_debug_flag
;
353 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf
355 extern void i86_pp_map(page_t
*pp
, caddr_t kaddr
);
356 extern void i86_va_map(caddr_t vaddr
, struct as
*asp
, caddr_t kaddr
);
357 extern int (*psm_intr_ops
)(dev_info_t
*, ddi_intr_handle_impl_t
*,
358 psm_intr_op_t
, int *);
359 extern int impl_ddi_sunbus_initchild(dev_info_t
*dip
);
360 extern void impl_ddi_sunbus_removechild(dev_info_t
*dip
);
363 * Use device arena to use for device control register mappings.
364 * Various kernel memory walkers (debugger, dtrace) need to know
365 * to avoid this address range to prevent undesired device activity.
367 extern void *device_arena_alloc(size_t size
, int vm_flag
);
368 extern void device_arena_free(void * vaddr
, size_t size
);
374 static int rootnex_dma_init();
375 static void rootnex_add_props(dev_info_t
*);
376 static int rootnex_ctl_reportdev(dev_info_t
*dip
);
377 static struct intrspec
*rootnex_get_ispec(dev_info_t
*rdip
, int inum
);
378 static int rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
379 static int rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
380 static int rootnex_map_handle(ddi_map_req_t
*mp
);
381 static void rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
);
382 static int rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegsize
);
383 static int rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
,
384 ddi_dma_attr_t
*attr
);
385 static void rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
386 rootnex_sglinfo_t
*sglinfo
);
387 static void rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
,
388 ddi_dma_cookie_t
*sgl
, rootnex_sglinfo_t
*sglinfo
);
389 static int rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
390 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
391 static int rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
392 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
);
393 static void rootnex_teardown_copybuf(rootnex_dma_t
*dma
);
394 static int rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
395 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
396 static void rootnex_teardown_windows(rootnex_dma_t
*dma
);
397 static void rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
398 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
);
399 static void rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
,
400 rootnex_dma_t
*dma
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
401 size_t *copybuf_used
, page_t
**cur_pp
);
402 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
,
403 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
,
404 ddi_dma_attr_t
*attr
, off_t cur_offset
);
405 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
,
406 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
,
407 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
);
408 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
,
409 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
);
410 static int rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
411 off_t offset
, size_t size
, uint_t cache_flags
);
412 static int rootnex_verify_buffer(rootnex_dma_t
*dma
);
413 static int rootnex_dma_check(dev_info_t
*dip
, const void *handle
,
414 const void *comp_addr
, const void *not_used
);
415 static boolean_t
rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
,
416 rootnex_sglinfo_t
*sglinfo
);
417 static struct as
*rootnex_get_as(ddi_dma_obj_t
*dmar_object
);
427 rootnex_state
= NULL
;
428 return (mod_install(&rootnex_modlinkage
));
437 _info(struct modinfo
*modinfop
)
439 return (mod_info(&rootnex_modlinkage
, modinfop
));
459 rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
468 #if defined(__amd64) && !defined(__xpv)
469 return (immu_unquiesce());
471 return (DDI_SUCCESS
);
474 return (DDI_FAILURE
);
478 * We should only have one instance of rootnex. Save it away since we
479 * don't have an easy way to get it back later.
481 ASSERT(rootnex_state
== NULL
);
482 rootnex_state
= kmem_zalloc(sizeof (rootnex_state_t
), KM_SLEEP
);
484 rootnex_state
->r_dip
= dip
;
485 rootnex_state
->r_err_ibc
= (ddi_iblock_cookie_t
)ipltospl(15);
486 rootnex_state
->r_reserved_msg_printed
= B_FALSE
;
488 rootnex_cnt
= &rootnex_state
->r_counters
[0];
492 * Set minimum fm capability level for i86pc platforms and then
493 * initialize error handling. Since we're the rootnex, we don't
494 * care what's returned in the fmcap field.
496 ddi_system_fmcap
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ERRCB_CAPABLE
|
497 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
;
498 fmcap
= ddi_system_fmcap
;
499 ddi_fm_init(dip
, &fmcap
, &rootnex_state
->r_err_ibc
);
501 /* initialize DMA related state */
502 e
= rootnex_dma_init();
503 if (e
!= DDI_SUCCESS
) {
504 kmem_free(rootnex_state
, sizeof (rootnex_state_t
));
505 return (DDI_FAILURE
);
508 /* Add static root node properties */
509 rootnex_add_props(dip
);
511 /* since we can't call ddi_report_dev() */
512 cmn_err(CE_CONT
, "?root nexus = %s\n", ddi_get_name(dip
));
514 /* Initialize rootnex event handle */
515 i_ddi_rootnex_init_events(dip
);
517 #if defined(__amd64) && !defined(__xpv)
518 e
= iommulib_nexus_register(dip
, &iommulib_nexops
,
519 &rootnex_state
->r_iommulib_handle
);
521 ASSERT(e
== DDI_SUCCESS
);
524 return (DDI_SUCCESS
);
534 rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
538 #if defined(__amd64) && !defined(__xpv)
539 return (immu_quiesce());
541 return (DDI_SUCCESS
);
544 return (DDI_FAILURE
);
563 * size of our cookie/window/copybuf state needed in dma bind that we
564 * pre-alloc in dma_alloc_handle
566 rootnex_state
->r_prealloc_cookies
= rootnex_prealloc_cookies
;
567 rootnex_state
->r_prealloc_size
=
568 (rootnex_state
->r_prealloc_cookies
* sizeof (ddi_dma_cookie_t
)) +
569 (rootnex_prealloc_windows
* sizeof (rootnex_window_t
)) +
570 (rootnex_prealloc_copybuf
* sizeof (rootnex_pgmap_t
));
573 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
574 * allocate 16 extra bytes for struct pointer alignment
575 * (p->dmai_private & dma->dp_prealloc_buffer)
577 bufsize
= sizeof (ddi_dma_impl_t
) + sizeof (rootnex_dma_t
) +
578 rootnex_state
->r_prealloc_size
+ 0x10;
579 rootnex_state
->r_dmahdl_cache
= kmem_cache_create("rootnex_dmahdl",
580 bufsize
, 64, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
581 if (rootnex_state
->r_dmahdl_cache
== NULL
) {
582 return (DDI_FAILURE
);
586 * allocate array to track which major numbers we have printed warnings
589 rootnex_warn_list
= kmem_zalloc(devcnt
* sizeof (*rootnex_warn_list
),
592 return (DDI_SUCCESS
);
597 * rootnex_add_props()
601 rootnex_add_props(dev_info_t
*dip
)
603 rootnex_intprop_t
*rpp
;
606 /* Add static integer/boolean properties to the root node */
607 rpp
= rootnex_intprp
;
608 for (i
= 0; i
< NROOT_INTPROPS
; i
++) {
609 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
610 rpp
[i
].prop_name
, rpp
[i
].prop_value
);
617 * *************************
618 * ctlops related routines
619 * *************************
628 rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t ctlop
,
629 void *arg
, void *result
)
632 struct ddi_parent_private_data
*pdp
;
635 case DDI_CTLOPS_DMAPMAPC
:
637 * Return 'partial' to indicate that dma mapping
638 * has to be done in the main MMU.
640 return (DDI_DMA_PARTIAL
);
642 case DDI_CTLOPS_BTOP
:
644 * Convert byte count input to physical page units.
645 * (byte counts that are not a page-size multiple
648 *(ulong_t
*)result
= btop(*(ulong_t
*)arg
);
649 return (DDI_SUCCESS
);
651 case DDI_CTLOPS_PTOB
:
653 * Convert size in physical pages to bytes
655 *(ulong_t
*)result
= ptob(*(ulong_t
*)arg
);
656 return (DDI_SUCCESS
);
658 case DDI_CTLOPS_BTOPR
:
660 * Convert byte count input to physical page units
661 * (byte counts that are not a page-size multiple
664 *(ulong_t
*)result
= btopr(*(ulong_t
*)arg
);
665 return (DDI_SUCCESS
);
667 case DDI_CTLOPS_INITCHILD
:
668 return (impl_ddi_sunbus_initchild(arg
));
670 case DDI_CTLOPS_UNINITCHILD
:
671 impl_ddi_sunbus_removechild(arg
);
672 return (DDI_SUCCESS
);
674 case DDI_CTLOPS_REPORTDEV
:
675 return (rootnex_ctl_reportdev(rdip
));
677 case DDI_CTLOPS_IOMIN
:
679 * Nothing to do here but reflect back..
681 return (DDI_SUCCESS
);
683 case DDI_CTLOPS_REGSIZE
:
684 case DDI_CTLOPS_NREGS
:
687 case DDI_CTLOPS_SIDDEV
:
688 if (ndi_dev_is_prom_node(rdip
))
689 return (DDI_SUCCESS
);
690 if (ndi_dev_is_persistent_node(rdip
))
691 return (DDI_SUCCESS
);
692 return (DDI_FAILURE
);
694 case DDI_CTLOPS_POWER
:
695 return ((*pm_platform_power
)((power_req_t
*)arg
));
697 case DDI_CTLOPS_RESERVED0
: /* Was DDI_CTLOPS_NINTRS, obsolete */
698 case DDI_CTLOPS_RESERVED1
: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
699 case DDI_CTLOPS_RESERVED2
: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
700 case DDI_CTLOPS_RESERVED3
: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
701 case DDI_CTLOPS_RESERVED4
: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
702 case DDI_CTLOPS_RESERVED5
: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
703 if (!rootnex_state
->r_reserved_msg_printed
) {
704 rootnex_state
->r_reserved_msg_printed
= B_TRUE
;
705 cmn_err(CE_WARN
, "Failing ddi_ctlops call(s) for "
706 "1 or more reserved/obsolete operations.");
708 return (DDI_FAILURE
);
711 return (DDI_FAILURE
);
714 * The rest are for "hardware" properties
716 if ((pdp
= ddi_get_parent_data(rdip
)) == NULL
)
717 return (DDI_FAILURE
);
719 if (ctlop
== DDI_CTLOPS_NREGS
) {
721 *ptr
= pdp
->par_nreg
;
723 off_t
*size
= (off_t
*)result
;
727 if (n
>= pdp
->par_nreg
) {
728 return (DDI_FAILURE
);
730 *size
= (off_t
)pdp
->par_reg
[n
].regspec_size
;
732 return (DDI_SUCCESS
);
737 * rootnex_ctl_reportdev()
741 rootnex_ctl_reportdev(dev_info_t
*dev
)
743 int i
, n
, len
, f_len
= 0;
746 buf
= kmem_alloc(REPORTDEV_BUFSIZE
, KM_SLEEP
);
747 f_len
+= snprintf(buf
, REPORTDEV_BUFSIZE
,
748 "%s%d at root", ddi_driver_name(dev
), ddi_get_instance(dev
));
751 for (i
= 0; i
< sparc_pd_getnreg(dev
); i
++) {
753 struct regspec
*rp
= sparc_pd_getreg(dev
, i
);
756 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
759 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
763 switch (rp
->regspec_bustype
) {
766 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
767 "%s 0x%x", DEVI_EISA_NEXNAME
, rp
->regspec_addr
);
771 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
772 "%s 0x%x", DEVI_ISA_NEXNAME
, rp
->regspec_addr
);
776 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
777 "space %x offset %x",
778 rp
->regspec_bustype
, rp
->regspec_addr
);
783 for (i
= 0, n
= sparc_pd_getnintr(dev
); i
< n
; i
++) {
787 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
791 pri
= INT_IPL(sparc_pd_getintr(dev
, i
)->intrspec_pri
);
792 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
793 " sparc ipl %d", pri
);
797 if (f_len
+ 1 >= REPORTDEV_BUFSIZE
) {
798 cmn_err(CE_NOTE
, "next message is truncated: "
799 "printed length 1024, real length %d", f_len
);
802 cmn_err(CE_CONT
, "?%s\n", buf
);
803 kmem_free(buf
, REPORTDEV_BUFSIZE
);
804 return (DDI_SUCCESS
);
819 rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
, off_t offset
,
820 off_t len
, caddr_t
*vaddrp
)
822 struct regspec
*orp
= NULL
;
823 struct regspec64 rp
= { 0 };
824 ddi_map_req_t mr
= *mp
; /* Get private copy of request */
828 switch (mp
->map_op
) {
829 case DDI_MO_MAP_LOCKED
:
831 case DDI_MO_MAP_HANDLE
:
835 cmn_err(CE_WARN
, "rootnex_map: unimplemented map op %d.",
837 #endif /* DDI_MAP_DEBUG */
838 return (DDI_ME_UNIMPLEMENTED
);
841 if (mp
->map_flags
& DDI_MF_USER_MAPPING
) {
843 cmn_err(CE_WARN
, "rootnex_map: unimplemented map type: user.");
844 #endif /* DDI_MAP_DEBUG */
845 return (DDI_ME_UNIMPLEMENTED
);
849 * First, we need to get the original regspec out before we convert it
850 * to the extended format. If we have a register number, then we need to
851 * convert that to a regspec.
853 if (mp
->map_type
== DDI_MT_RNUMBER
) {
855 int rnumber
= mp
->map_obj
.rnumber
;
857 static char *out_of_range
=
858 "rootnex_map: Out of range rnumber <%d>, device <%s>";
859 #endif /* DDI_MAP_DEBUG */
861 orp
= i_ddi_rnumber_to_regspec(rdip
, rnumber
);
864 cmn_err(CE_WARN
, out_of_range
, rnumber
,
866 #endif /* DDI_MAP_DEBUG */
867 return (DDI_ME_RNUMBER_RANGE
);
869 } else if (!(mp
->map_flags
& DDI_MF_EXT_REGSPEC
)) {
870 orp
= mp
->map_obj
.rp
;
874 * Ensure that we are always using a 64-bit extended regspec regardless
875 * of what was passed into us. If the child driver is using a 64-bit
876 * regspec, then we need to make sure that we copy this to the local
880 rp
.regspec_bustype
= orp
->regspec_bustype
;
881 rp
.regspec_addr
= orp
->regspec_addr
;
882 rp
.regspec_size
= orp
->regspec_size
;
884 struct regspec64
*rp64
;
885 rp64
= (struct regspec64
*)mp
->map_obj
.rp
;
889 mp
->map_type
= DDI_MT_REGSPEC
;
890 mp
->map_flags
|= DDI_MF_EXT_REGSPEC
;
891 mp
->map_obj
.rp
= (struct regspec
*)&rp
;
894 * Adjust offset and length correspnding to called values...
895 * XXX: A non-zero length means override the one in the regspec
896 * XXX: (regardless of what's in the parent's range?)
900 cmn_err(CE_CONT
, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
901 "handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
902 rp
.regspec_bustype
, rp
.regspec_addr
, rp
.regspec_size
, offset
,
903 len
, mp
->map_handlep
);
904 #endif /* DDI_MAP_DEBUG */
907 * I/O or memory mapping:
909 * <bustype=0, addr=x, len=x>: memory
910 * <bustype=1, addr=x, len=x>: i/o
911 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
914 if (rp
.regspec_bustype
> 1 && rp
.regspec_addr
!= 0) {
915 cmn_err(CE_WARN
, "<%s,%s> invalid register spec"
916 " <0x%" PRIx64
", 0x%" PRIx64
", 0x%" PRIx64
">",
917 ddi_get_name(dip
), ddi_get_name(rdip
), rp
.regspec_bustype
,
918 rp
.regspec_addr
, rp
.regspec_size
);
919 return (DDI_ME_INVAL
);
922 if (rp
.regspec_bustype
> 1 && rp
.regspec_addr
== 0) {
924 * compatibility i/o mapping
926 rp
.regspec_bustype
+= offset
;
929 * Normal memory or i/o mapping
931 rp
.regspec_addr
+= offset
;
935 rp
.regspec_size
= len
;
938 cmn_err(CE_CONT
, " <%s,%s> <0x%" PRIx64
", 0x%" PRIx64
939 ", 0x%" PRId64
"> offset %d len %d handle 0x%x\n",
940 ddi_get_name(dip
), ddi_get_name(rdip
), rp
.regspec_bustype
,
941 rp
.regspec_addr
, rp
.regspec_size
, offset
, len
, mp
->map_handlep
);
942 #endif /* DDI_MAP_DEBUG */
946 * The x86 root nexus does not have any notion of valid ranges of
947 * addresses. Its children have valid ranges, but because there are none
948 * for the nexus, we don't need to call i_ddi_apply_range(). Verify
951 ASSERT0(sparc_pd_getnrng(dip
));
953 switch (mp
->map_op
) {
954 case DDI_MO_MAP_LOCKED
:
957 * Set up the locked down kernel mapping to the regspec...
960 return (rootnex_map_regspec(mp
, vaddrp
));
968 return (rootnex_unmap_regspec(mp
, vaddrp
));
970 case DDI_MO_MAP_HANDLE
:
972 return (rootnex_map_handle(mp
));
975 return (DDI_ME_UNIMPLEMENTED
);
981 * rootnex_map_fault()
983 * fault in mappings for requestors
987 rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
, struct hat
*hat
,
988 struct seg
*seg
, caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
,
993 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr
, pfn
);
994 ddi_map_debug(" Seg <%s>\n",
995 seg
->s_ops
== &segdev_ops
? "segdev" :
996 seg
== &kvseg
? "segkmem" : "NONE!");
997 #endif /* DDI_MAP_DEBUG */
1000 * This is all terribly broken, but it is a start
1002 * XXX Note that this test means that segdev_ops
1003 * must be exported from seg_dev.c.
1004 * XXX What about devices with their own segment drivers?
1006 if (seg
->s_ops
== &segdev_ops
) {
1007 struct segdev_data
*sdp
= (struct segdev_data
*)seg
->s_data
;
1011 * This is one plausible interpretation of
1012 * a null hat i.e. use the first hat on the
1013 * address space hat list which by convention is
1014 * the hat of the system MMU. At alternative
1015 * would be to panic .. this might well be better ..
1017 ASSERT(AS_READ_HELD(seg
->s_as
));
1018 hat
= seg
->s_as
->a_hat
;
1019 cmn_err(CE_NOTE
, "rootnex_map_fault: nil hat");
1021 hat_devload(hat
, addr
, MMU_PAGESIZE
, pfn
, prot
| sdp
->hat_attr
,
1022 (lock
? HAT_LOAD_LOCK
: HAT_LOAD
));
1023 } else if (seg
== &kvseg
&& dp
== NULL
) {
1024 hat_devload(kas
.a_hat
, addr
, MMU_PAGESIZE
, pfn
, prot
,
1027 return (DDI_FAILURE
);
1028 return (DDI_SUCCESS
);
1033 rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1035 rootnex_addr_t rbase
;
1037 uint64_t npages
, pgoffset
;
1038 struct regspec64
*rp
;
1041 uint_t hat_acc_flags
;
1044 ASSERT(mp
->map_flags
& DDI_MF_EXT_REGSPEC
);
1045 rp
= (struct regspec64
*)mp
->map_obj
.rp
;
1046 hp
= mp
->map_handlep
;
1048 #ifdef DDI_MAP_DEBUG
1050 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1051 rp
->regspec_bustype
, rp
->regspec_addr
,
1052 rp
->regspec_size
, mp
->map_handlep
);
1053 #endif /* DDI_MAP_DEBUG */
1056 * I/O or memory mapping
1058 * <bustype=0, addr=x, len=x>: memory
1059 * <bustype=1, addr=x, len=x>: i/o
1060 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1063 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
1064 cmn_err(CE_WARN
, "rootnex: invalid register spec"
1065 " <0x%" PRIx64
", 0x%" PRIx64
", 0x%" PRIx64
">",
1066 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
);
1067 return (DDI_FAILURE
);
1070 if (rp
->regspec_bustype
!= 0) {
1072 * I/O space - needs a handle.
1075 return (DDI_FAILURE
);
1077 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1078 ap
->ahi_acc_attr
|= DDI_ACCATTR_IO_SPACE
;
1079 impl_acc_hdl_init(hp
);
1081 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1082 #ifdef DDI_MAP_DEBUG
1083 ddi_map_debug("rootnex_map_regspec: mmap() "
1084 "to I/O space is not supported.\n");
1085 #endif /* DDI_MAP_DEBUG */
1086 return (DDI_ME_INVAL
);
1089 * 1275-compliant vs. compatibility i/o mapping
1092 (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) ?
1093 ((caddr_t
)(uintptr_t)rp
->regspec_bustype
) :
1094 ((caddr_t
)(uintptr_t)rp
->regspec_addr
);
1096 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1097 hp
->ah_pfn
= xen_assign_pfn(
1098 mmu_btop((ulong_t
)rp
->regspec_addr
&
1101 hp
->ah_pfn
= mmu_btop(
1102 (ulong_t
)rp
->regspec_addr
& MMU_PAGEMASK
);
1105 hp
->ah_pfn
= mmu_btop((ulong_t
)rp
->regspec_addr
&
1108 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+
1109 (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
);
1112 #ifdef DDI_MAP_DEBUG
1114 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1115 rp
->regspec_size
, *vaddrp
);
1116 #endif /* DDI_MAP_DEBUG */
1117 return (DDI_SUCCESS
);
1127 * hp->ah_acc.devacc_attr_endian_flags.
1129 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1130 case DDI_STRICTORDER_ACC
:
1131 hat_acc_flags
= HAT_STRICTORDER
;
1133 case DDI_UNORDERED_OK_ACC
:
1134 hat_acc_flags
= HAT_UNORDERED_OK
;
1136 case DDI_MERGING_OK_ACC
:
1137 hat_acc_flags
= HAT_MERGING_OK
;
1139 case DDI_LOADCACHING_OK_ACC
:
1140 hat_acc_flags
= HAT_LOADCACHING_OK
;
1142 case DDI_STORECACHING_OK_ACC
:
1143 hat_acc_flags
= HAT_STORECACHING_OK
;
1146 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1147 ap
->ahi_acc_attr
|= DDI_ACCATTR_CPU_VADDR
;
1148 impl_acc_hdl_init(hp
);
1149 hp
->ah_hat_flags
= hat_acc_flags
;
1151 hat_acc_flags
= HAT_STRICTORDER
;
1154 rbase
= (rootnex_addr_t
)(rp
->regspec_addr
& MMU_PAGEMASK
);
1157 * If we're dom0, we're using a real device so we need to translate
1160 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1161 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
)));
1168 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1170 if (rp
->regspec_size
== 0) {
1171 #ifdef DDI_MAP_DEBUG
1172 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1173 #endif /* DDI_MAP_DEBUG */
1174 return (DDI_ME_INVAL
);
1177 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1178 /* extra cast to make gcc happy */
1179 *vaddrp
= (caddr_t
)((uintptr_t)mmu_btop(pbase
));
1181 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1183 #ifdef DDI_MAP_DEBUG
1184 ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1185 "physical %llx", npages
, pbase
);
1186 #endif /* DDI_MAP_DEBUG */
1188 cvaddr
= device_arena_alloc(ptob(npages
), VM_NOSLEEP
);
1190 return (DDI_ME_NORESOURCES
);
1193 * Now map in the pages we've allocated...
1195 hat_devload(kas
.a_hat
, cvaddr
, mmu_ptob(npages
),
1196 mmu_btop(pbase
), mp
->map_prot
| hat_acc_flags
,
1198 *vaddrp
= (caddr_t
)cvaddr
+ pgoffset
;
1200 /* save away pfn and npages for FMA */
1201 hp
= mp
->map_handlep
;
1203 hp
->ah_pfn
= mmu_btop(pbase
);
1204 hp
->ah_pnum
= npages
;
1208 #ifdef DDI_MAP_DEBUG
1209 ddi_map_debug("at virtual 0x%x\n", *vaddrp
);
1210 #endif /* DDI_MAP_DEBUG */
1211 return (DDI_SUCCESS
);
1216 rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1218 caddr_t addr
= (caddr_t
)*vaddrp
;
1219 uint64_t npages
, pgoffset
;
1220 struct regspec64
*rp
;
1222 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
)
1225 ASSERT(mp
->map_flags
& DDI_MF_EXT_REGSPEC
);
1226 rp
= (struct regspec64
*)mp
->map_obj
.rp
;
1228 if (rp
->regspec_size
== 0) {
1229 #ifdef DDI_MAP_DEBUG
1230 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1231 #endif /* DDI_MAP_DEBUG */
1232 return (DDI_ME_INVAL
);
1236 * I/O or memory mapping:
1238 * <bustype=0, addr=x, len=x>: memory
1239 * <bustype=1, addr=x, len=x>: i/o
1240 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1242 if (rp
->regspec_bustype
!= 0) {
1244 * This is I/O space, which requires no particular
1245 * processing on unmap since it isn't mapped in the
1248 return (DDI_SUCCESS
);
1254 pgoffset
= (uintptr_t)addr
& MMU_PAGEOFFSET
;
1255 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1256 hat_unload(kas
.a_hat
, addr
- pgoffset
, ptob(npages
), HAT_UNLOAD_UNLOCK
);
1257 device_arena_free(addr
- pgoffset
, ptob(npages
));
1260 * Destroy the pointer - the mapping has logically gone
1264 return (DDI_SUCCESS
);
1268 rootnex_map_handle(ddi_map_req_t
*mp
)
1270 rootnex_addr_t rbase
;
1273 struct regspec64
*rp
;
1276 rp
= (struct regspec64
*)mp
->map_obj
.rp
;
1278 #ifdef DDI_MAP_DEBUG
1280 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1281 rp
->regspec_bustype
, rp
->regspec_addr
,
1282 rp
->regspec_size
, mp
->map_handlep
);
1283 #endif /* DDI_MAP_DEBUG */
1286 * I/O or memory mapping:
1288 * <bustype=0, addr=x, len=x>: memory
1289 * <bustype=1, addr=x, len=x>: i/o
1290 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1292 if (rp
->regspec_bustype
!= 0) {
1294 * This refers to I/O space, and we don't support "mapping"
1295 * I/O space to a user.
1297 return (DDI_FAILURE
);
1301 * Set up the hat_flags for the mapping.
1303 hp
= mp
->map_handlep
;
1305 switch (hp
->ah_acc
.devacc_attr_endian_flags
) {
1306 case DDI_NEVERSWAP_ACC
:
1307 hp
->ah_hat_flags
= HAT_NEVERSWAP
| HAT_STRICTORDER
;
1309 case DDI_STRUCTURE_LE_ACC
:
1310 hp
->ah_hat_flags
= HAT_STRUCTURE_LE
;
1312 case DDI_STRUCTURE_BE_ACC
:
1313 return (DDI_FAILURE
);
1315 return (DDI_REGS_ACC_CONFLICT
);
1318 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1319 case DDI_STRICTORDER_ACC
:
1321 case DDI_UNORDERED_OK_ACC
:
1322 hp
->ah_hat_flags
|= HAT_UNORDERED_OK
;
1324 case DDI_MERGING_OK_ACC
:
1325 hp
->ah_hat_flags
|= HAT_MERGING_OK
;
1327 case DDI_LOADCACHING_OK_ACC
:
1328 hp
->ah_hat_flags
|= HAT_LOADCACHING_OK
;
1330 case DDI_STORECACHING_OK_ACC
:
1331 hp
->ah_hat_flags
|= HAT_STORECACHING_OK
;
1334 return (DDI_FAILURE
);
1337 rbase
= (rootnex_addr_t
)rp
->regspec_addr
&
1338 (~(rootnex_addr_t
)MMU_PAGEOFFSET
);
1339 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1341 if (rp
->regspec_size
== 0)
1342 return (DDI_ME_INVAL
);
1346 * If we're dom0, we're using a real device so we need to translate
1349 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1350 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
))) |
1351 (rbase
& MMU_PAGEOFFSET
);
1359 hp
->ah_pfn
= mmu_btop(pbase
);
1360 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1362 return (DDI_SUCCESS
);
1368 * ************************
1369 * interrupt related code
1370 * ************************
1374 * rootnex_intr_ops()
1375 * bus_intr_op() function for interrupt support
1379 rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
1380 ddi_intr_handle_impl_t
*hdlp
, void *result
)
1382 struct intrspec
*ispec
;
1384 DDI_INTR_NEXDBG((CE_CONT
,
1385 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1386 (void *)pdip
, (void *)rdip
, intr_op
, (void *)hdlp
));
1388 /* Process the interrupt operation */
1390 case DDI_INTROP_GETCAP
:
1391 /* First check with pcplusmp */
1392 if (psm_intr_ops
== NULL
)
1393 return (DDI_FAILURE
);
1395 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_CAP
, result
)) {
1397 return (DDI_FAILURE
);
1400 case DDI_INTROP_SETCAP
:
1401 if (psm_intr_ops
== NULL
)
1402 return (DDI_FAILURE
);
1404 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_CAP
, result
))
1405 return (DDI_FAILURE
);
1407 case DDI_INTROP_ALLOC
:
1408 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1409 return (rootnex_alloc_intr_fixed(rdip
, hdlp
, result
));
1410 case DDI_INTROP_FREE
:
1411 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1412 return (rootnex_free_intr_fixed(rdip
, hdlp
));
1413 case DDI_INTROP_GETPRI
:
1414 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1415 return (DDI_FAILURE
);
1416 *(int *)result
= ispec
->intrspec_pri
;
1418 case DDI_INTROP_SETPRI
:
1419 /* Validate the interrupt priority passed to us */
1420 if (*(int *)result
> LOCK_LEVEL
)
1421 return (DDI_FAILURE
);
1423 /* Ensure that PSM is all initialized and ispec is ok */
1424 if ((psm_intr_ops
== NULL
) ||
1425 ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
))
1426 return (DDI_FAILURE
);
1428 /* Change the priority */
1429 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_PRI
, result
) ==
1431 return (DDI_FAILURE
);
1433 /* update the ispec with the new priority */
1434 ispec
->intrspec_pri
= *(int *)result
;
1436 case DDI_INTROP_ADDISR
:
1437 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1438 return (DDI_FAILURE
);
1439 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
1441 case DDI_INTROP_REMISR
:
1442 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1443 return (DDI_FAILURE
);
1444 ispec
->intrspec_func
= (uint_t (*)()) 0;
1446 case DDI_INTROP_ENABLE
:
1447 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1448 return (DDI_FAILURE
);
1450 /* Call psmi to translate irq with the dip */
1451 if (psm_intr_ops
== NULL
)
1452 return (DDI_FAILURE
);
1454 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1455 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_XLATE_VECTOR
,
1456 (int *)&hdlp
->ih_vector
) == PSM_FAILURE
)
1457 return (DDI_FAILURE
);
1459 /* Add the interrupt handler */
1460 if (!add_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1461 hdlp
->ih_cb_func
, DEVI(rdip
)->devi_name
, hdlp
->ih_vector
,
1462 hdlp
->ih_cb_arg1
, hdlp
->ih_cb_arg2
, NULL
, rdip
))
1463 return (DDI_FAILURE
);
1465 case DDI_INTROP_DISABLE
:
1466 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1467 return (DDI_FAILURE
);
1469 /* Call psm_ops() to translate irq with the dip */
1470 if (psm_intr_ops
== NULL
)
1471 return (DDI_FAILURE
);
1473 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1474 (void) (*psm_intr_ops
)(rdip
, hdlp
,
1475 PSM_INTR_OP_XLATE_VECTOR
, (int *)&hdlp
->ih_vector
);
1477 /* Remove the interrupt handler */
1478 rem_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1479 hdlp
->ih_cb_func
, hdlp
->ih_vector
);
1481 case DDI_INTROP_SETMASK
:
1482 if (psm_intr_ops
== NULL
)
1483 return (DDI_FAILURE
);
1485 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_MASK
, NULL
))
1486 return (DDI_FAILURE
);
1488 case DDI_INTROP_CLRMASK
:
1489 if (psm_intr_ops
== NULL
)
1490 return (DDI_FAILURE
);
1492 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_CLEAR_MASK
, NULL
))
1493 return (DDI_FAILURE
);
1495 case DDI_INTROP_GETPENDING
:
1496 if (psm_intr_ops
== NULL
)
1497 return (DDI_FAILURE
);
1499 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_PENDING
,
1502 return (DDI_FAILURE
);
1505 case DDI_INTROP_NAVAIL
:
1506 case DDI_INTROP_NINTRS
:
1507 *(int *)result
= i_ddi_get_intx_nintrs(rdip
);
1508 if (*(int *)result
== 0) {
1510 * Special case for 'pcic' driver' only. This driver
1511 * driver is a child of 'isa' and 'rootnex' drivers.
1513 * See detailed comments on this in the function
1514 * rootnex_get_ispec().
1516 * Children of 'pcic' send 'NINITR' request all the
1517 * way to rootnex driver. But, the 'pdp->par_nintr'
1518 * field may not initialized. So, we fake it here
1519 * to return 1 (a la what PCMCIA nexus does).
1521 if (strcmp(ddi_get_name(rdip
), "pcic") == 0)
1524 return (DDI_FAILURE
);
1527 case DDI_INTROP_SUPPORTED_TYPES
:
1528 *(int *)result
= DDI_INTR_TYPE_FIXED
; /* Always ... */
1531 return (DDI_FAILURE
);
1534 return (DDI_SUCCESS
);
1539 * rootnex_get_ispec()
1540 * convert an interrupt number to an interrupt specification.
1541 * The interrupt number determines which interrupt spec will be
1542 * returned if more than one exists.
1544 * Look into the parent private data area of the 'rdip' to find out
1545 * the interrupt specification. First check to make sure there is
1546 * one that matchs "inumber" and then return a pointer to it.
1548 * Return NULL if one could not be found.
1550 * NOTE: This is needed for rootnex_intr_ops()
1552 static struct intrspec
*
1553 rootnex_get_ispec(dev_info_t
*rdip
, int inum
)
1555 struct ddi_parent_private_data
*pdp
= ddi_get_parent_data(rdip
);
1558 * Special case handling for drivers that provide their own
1559 * intrspec structures instead of relying on the DDI framework.
1561 * A broken hardware driver in ON could potentially provide its
1562 * own intrspec structure, instead of relying on the hardware.
1563 * If these drivers are children of 'rootnex' then we need to
1564 * continue to provide backward compatibility to them here.
1566 * Following check is a special case for 'pcic' driver which
1567 * was found to have broken hardwre andby provides its own intrspec.
1569 * Verbatim comments from this driver are shown here:
1570 * "Don't use the ddi_add_intr since we don't have a
1571 * default intrspec in all cases."
1573 * Since an 'ispec' may not be always created for it,
1574 * check for that and create one if so.
1576 * NOTE: Currently 'pcic' is the only driver found to do this.
1578 if (!pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1580 pdp
->par_intr
= kmem_zalloc(sizeof (struct intrspec
) *
1581 pdp
->par_nintr
, KM_SLEEP
);
1584 /* Validate the interrupt number */
1585 if (inum
>= pdp
->par_nintr
)
1588 /* Get the interrupt structure pointer and return that */
1589 return ((struct intrspec
*)&pdp
->par_intr
[inum
]);
1593 * Allocate interrupt vector for FIXED (legacy) type.
1596 rootnex_alloc_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
,
1599 struct intrspec
*ispec
;
1600 ddi_intr_handle_impl_t info_hdl
;
1603 apic_get_type_t type_info
;
1605 if (psm_intr_ops
== NULL
)
1606 return (DDI_FAILURE
);
1608 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1609 return (DDI_FAILURE
);
1612 * If the PSM module is "APIX" then pass the request for it
1613 * to allocate the vector now.
1615 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1616 info_hdl
.ih_private
= &type_info
;
1617 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1618 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1619 if (hdlp
->ih_private
== NULL
) { /* allocate phdl structure */
1621 i_ddi_alloc_intr_phdl(hdlp
);
1623 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1624 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1625 PSM_INTR_OP_ALLOC_VECTORS
, result
);
1626 if (free_phdl
) { /* free up the phdl structure */
1628 i_ddi_free_intr_phdl(hdlp
);
1629 hdlp
->ih_private
= NULL
;
1633 * No APIX module; fall back to the old scheme where the
1634 * interrupt vector is allocated during ddi_enable_intr() call.
1636 hdlp
->ih_pri
= ispec
->intrspec_pri
;
1637 *(int *)result
= hdlp
->ih_scratch1
;
1645 * Free up interrupt vector for FIXED (legacy) type.
1648 rootnex_free_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
)
1650 struct intrspec
*ispec
;
1651 struct ddi_parent_private_data
*pdp
;
1652 ddi_intr_handle_impl_t info_hdl
;
1654 apic_get_type_t type_info
;
1656 if (psm_intr_ops
== NULL
)
1657 return (DDI_FAILURE
);
1660 * If the PSM module is "APIX" then pass the request for it
1661 * to free up the vector now.
1663 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1664 info_hdl
.ih_private
= &type_info
;
1665 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1666 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1667 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1668 return (DDI_FAILURE
);
1669 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1670 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1671 PSM_INTR_OP_FREE_VECTORS
, NULL
);
1674 * No APIX module; fall back to the old scheme where
1675 * the interrupt vector was already freed during
1676 * ddi_disable_intr() call.
1681 pdp
= ddi_get_parent_data(rdip
);
1684 * Special case for 'pcic' driver' only.
1685 * If an intrspec was created for it, clean it up here
1686 * See detailed comments on this in the function
1687 * rootnex_get_ispec().
1689 if (pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1690 kmem_free(pdp
->par_intr
, sizeof (struct intrspec
) *
1693 * Set it to zero; so that
1694 * DDI framework doesn't free it again
1696 pdp
->par_intr
= NULL
;
1705 * ******************
1707 * ******************
1712 rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1713 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
1714 ddi_dma_handle_t
*handlep
)
1716 uint64_t maxsegmentsize_ll
;
1717 uint_t maxsegmentsize
;
1726 /* convert our sleep flags */
1727 if (waitfp
== DDI_DMA_SLEEP
) {
1730 kmflag
= KM_NOSLEEP
;
1734 * We try to do only one memory allocation here. We'll do a little
1735 * pointer manipulation later. If the bind ends up taking more than
1736 * our prealloc's space, we'll have to allocate more memory in the
1737 * bind operation. Not great, but much better than before and the
1738 * best we can do with the current bind interfaces.
1740 hp
= kmem_cache_alloc(rootnex_state
->r_dmahdl_cache
, kmflag
);
1742 return (DDI_DMA_NORESOURCES
);
1744 /* Do our pointer manipulation now, align the structures */
1745 hp
->dmai_private
= (void *)(((uintptr_t)hp
+
1746 (uintptr_t)sizeof (ddi_dma_impl_t
) + 0x7) & ~0x7);
1747 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1748 dma
->dp_prealloc_buffer
= (uchar_t
*)(((uintptr_t)dma
+
1749 sizeof (rootnex_dma_t
) + 0x7) & ~0x7);
1751 /* setup the handle */
1752 rootnex_clean_dmahdl(hp
);
1753 hp
->dmai_error
.err_fep
= NULL
;
1754 hp
->dmai_error
.err_cf
= NULL
;
1756 dma
->dp_sglinfo
.si_flags
= attr
->dma_attr_flags
;
1757 dma
->dp_sglinfo
.si_min_addr
= attr
->dma_attr_addr_lo
;
1760 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1761 * is being used. Set the upper limit to the seg value.
1762 * There will be enough DVMA space to always get addresses
1763 * that will match the constraints.
1765 if (IOMMU_USED(rdip
) &&
1766 (attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
)) {
1767 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_seg
;
1768 dma
->dp_sglinfo
.si_flags
&= ~_DDI_DMA_BOUNCE_ON_SEG
;
1770 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_addr_hi
;
1772 hp
->dmai_minxfer
= attr
->dma_attr_minxfer
;
1773 hp
->dmai_burstsizes
= attr
->dma_attr_burstsizes
;
1774 hp
->dmai_rdip
= rdip
;
1775 hp
->dmai_attr
= *attr
;
1777 if (attr
->dma_attr_seg
>= dma
->dp_sglinfo
.si_max_addr
)
1778 dma
->dp_sglinfo
.si_cancross
= B_FALSE
;
1780 dma
->dp_sglinfo
.si_cancross
= B_TRUE
;
1782 /* we don't need to worry about the SPL since we do a tryenter */
1783 mutex_init(&dma
->dp_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1786 * Figure out our maximum segment size. If the segment size is greater
1787 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1788 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1789 * dma_attr_count_max are size-1 type values.
1791 * Maximum segment size is the largest physically contiguous chunk of
1792 * memory that we can return from a bind (i.e. the maximum size of a
1796 /* handle the rollover cases */
1797 seg
= attr
->dma_attr_seg
+ 1;
1798 if (seg
< attr
->dma_attr_seg
) {
1799 seg
= attr
->dma_attr_seg
;
1801 count_max
= attr
->dma_attr_count_max
+ 1;
1802 if (count_max
< attr
->dma_attr_count_max
) {
1803 count_max
= attr
->dma_attr_count_max
;
1807 * granularity may or may not be a power of two. If it isn't, we can't
1808 * use a simple mask.
1810 if (!ISP2(attr
->dma_attr_granular
)) {
1811 dma
->dp_granularity_power_2
= B_FALSE
;
1813 dma
->dp_granularity_power_2
= B_TRUE
;
1817 * maxxfer should be a whole multiple of granularity. If we're going to
1818 * break up a window because we're greater than maxxfer, we might as
1819 * well make sure it's maxxfer is a whole multiple so we don't have to
1820 * worry about triming the window later on for this case.
1822 if (attr
->dma_attr_granular
> 1) {
1823 if (dma
->dp_granularity_power_2
) {
1824 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1825 (attr
->dma_attr_maxxfer
&
1826 (attr
->dma_attr_granular
- 1));
1828 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1829 (attr
->dma_attr_maxxfer
% attr
->dma_attr_granular
);
1832 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
;
1835 maxsegmentsize_ll
= MIN(seg
, dma
->dp_maxxfer
);
1836 maxsegmentsize_ll
= MIN(maxsegmentsize_ll
, count_max
);
1837 if (maxsegmentsize_ll
== 0 || (maxsegmentsize_ll
> 0xFFFFFFFF)) {
1838 maxsegmentsize
= 0xFFFFFFFF;
1840 maxsegmentsize
= maxsegmentsize_ll
;
1842 dma
->dp_sglinfo
.si_max_cookie_size
= maxsegmentsize
;
1843 dma
->dp_sglinfo
.si_segmask
= attr
->dma_attr_seg
;
1845 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1846 if (rootnex_alloc_check_parms
) {
1847 e
= rootnex_valid_alloc_parms(attr
, maxsegmentsize
);
1848 if (e
!= DDI_SUCCESS
) {
1849 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ALLOC_FAIL
]);
1850 (void) rootnex_dma_freehdl(dip
, rdip
,
1851 (ddi_dma_handle_t
)hp
);
1856 *handlep
= (ddi_dma_handle_t
)hp
;
1858 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1859 ROOTNEX_DPROBE1(rootnex__alloc__handle
, uint64_t,
1860 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1862 return (DDI_SUCCESS
);
1867 * rootnex_dma_allochdl()
1868 * called from ddi_dma_alloc_handle().
1871 rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
1872 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
1874 int retval
= DDI_SUCCESS
;
1875 #if defined(__amd64) && !defined(__xpv)
1877 if (IOMMU_UNITIALIZED(rdip
)) {
1878 retval
= iommulib_nex_open(dip
, rdip
);
1880 if (retval
!= DDI_SUCCESS
&& retval
!= DDI_ENOTSUP
)
1884 if (IOMMU_UNUSED(rdip
)) {
1885 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1888 retval
= iommulib_nexdma_allochdl(dip
, rdip
, attr
,
1889 waitfp
, arg
, handlep
);
1892 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1896 case DDI_DMA_NORESOURCES
:
1897 if (waitfp
!= DDI_DMA_DONTWAIT
) {
1898 ddi_set_callback(waitfp
, arg
,
1899 &rootnex_state
->r_dvma_call_list_id
);
1903 ndi_fmc_insert(rdip
, DMA_HANDLE
, *handlep
, NULL
);
1913 rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1914 ddi_dma_handle_t handle
)
1920 hp
= (ddi_dma_impl_t
*)handle
;
1921 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1923 /* unbind should have been called first */
1924 ASSERT(!dma
->dp_inuse
);
1926 mutex_destroy(&dma
->dp_mutex
);
1927 kmem_cache_free(rootnex_state
->r_dmahdl_cache
, hp
);
1929 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1930 ROOTNEX_DPROBE1(rootnex__free__handle
, uint64_t,
1931 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1933 return (DDI_SUCCESS
);
1937 * rootnex_dma_freehdl()
1938 * called from ddi_dma_free_handle().
1941 rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
1945 ndi_fmc_remove(rdip
, DMA_HANDLE
, handle
);
1946 #if defined(__amd64) && !defined(__xpv)
1947 if (IOMMU_USED(rdip
))
1948 ret
= iommulib_nexdma_freehdl(dip
, rdip
, handle
);
1951 ret
= rootnex_coredma_freehdl(dip
, rdip
, handle
);
1953 if (rootnex_state
->r_dvma_call_list_id
)
1954 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
1961 rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1962 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
1963 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
1965 rootnex_sglinfo_t
*sinfo
;
1966 ddi_dma_obj_t
*dmao
;
1967 #if defined(__amd64) && !defined(__xpv)
1968 struct dvmaseg
*dvs
;
1969 ddi_dma_cookie_t
*cookie
;
1971 ddi_dma_attr_t
*attr
;
1978 hp
= (ddi_dma_impl_t
*)handle
;
1979 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1980 dmao
= &dma
->dp_dma
;
1981 sinfo
= &dma
->dp_sglinfo
;
1982 attr
= &hp
->dmai_attr
;
1984 /* convert the sleep flags */
1985 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
1986 dma
->dp_sleep_flags
= kmflag
= KM_SLEEP
;
1988 dma
->dp_sleep_flags
= kmflag
= KM_NOSLEEP
;
1991 hp
->dmai_rflags
= dmareq
->dmar_flags
& DMP_DDIFLAGS
;
1994 * This is useful for debugging a driver. Not as useful in a production
1995 * system. The only time this will fail is if you have a driver bug.
1997 if (rootnex_bind_check_inuse
) {
1999 * No one else should ever have this lock unless someone else
2000 * is trying to use this handle. So contention on the lock
2001 * is the same as inuse being set.
2003 e
= mutex_tryenter(&dma
->dp_mutex
);
2005 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2006 return (DDI_DMA_INUSE
);
2008 if (dma
->dp_inuse
) {
2009 mutex_exit(&dma
->dp_mutex
);
2010 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2011 return (DDI_DMA_INUSE
);
2013 dma
->dp_inuse
= B_TRUE
;
2014 mutex_exit(&dma
->dp_mutex
);
2017 /* check the ddi_dma_attr arg to make sure it makes a little sense */
2018 if (rootnex_bind_check_parms
) {
2019 e
= rootnex_valid_bind_parms(dmareq
, attr
);
2020 if (e
!= DDI_SUCCESS
) {
2021 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2022 rootnex_clean_dmahdl(hp
);
2027 /* save away the original bind info */
2028 dma
->dp_dma
= dmareq
->dmar_object
;
2030 #if defined(__amd64) && !defined(__xpv)
2031 if (IOMMU_USED(rdip
)) {
2032 dmao
= &dma
->dp_dvma
;
2033 e
= iommulib_nexdma_mapobject(dip
, rdip
, handle
, dmareq
, dmao
);
2036 if (sinfo
->si_cancross
||
2037 dmao
->dmao_obj
.dvma_obj
.dv_nseg
!= 1 ||
2038 dmao
->dmao_size
> sinfo
->si_max_cookie_size
) {
2039 dma
->dp_dvma_used
= B_TRUE
;
2042 sinfo
->si_sgl_size
= 1;
2043 hp
->dmai_rflags
|= DMP_NOSYNC
;
2045 dma
->dp_dvma_used
= B_TRUE
;
2046 dma
->dp_need_to_free_cookie
= B_FALSE
;
2048 dvs
= &dmao
->dmao_obj
.dvma_obj
.dv_seg
[0];
2049 cookie
= hp
->dmai_cookie
= dma
->dp_cookies
=
2050 (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2051 cookie
->dmac_laddress
= dvs
->dvs_start
+
2052 dmao
->dmao_obj
.dvma_obj
.dv_off
;
2053 cookie
->dmac_size
= dvs
->dvs_len
;
2054 cookie
->dmac_type
= 0;
2056 ROOTNEX_DPROBE1(rootnex__bind__dvmafast
, dev_info_t
*,
2062 rootnex_clean_dmahdl(hp
);
2069 * Figure out a rough estimate of what maximum number of pages
2070 * this buffer could use (a high estimate of course).
2072 sinfo
->si_max_pages
= mmu_btopr(dma
->dp_dma
.dmao_size
) + 1;
2074 if (dma
->dp_dvma_used
) {
2076 * The number of physical pages is the worst case.
2078 * For DVMA, the worst case is the length divided
2079 * by the maximum cookie length, plus 1. Add to that
2080 * the number of segment boundaries potentially crossed, and
2081 * the additional number of DVMA segments that was returned.
2083 * In the normal case, for modern devices, si_cancross will
2084 * be false, and dv_nseg will be 1, and the fast path will
2085 * have been taken above.
2087 ncookies
= (dma
->dp_dma
.dmao_size
/ sinfo
->si_max_cookie_size
)
2089 if (sinfo
->si_cancross
)
2091 (dma
->dp_dma
.dmao_size
/ attr
->dma_attr_seg
) + 1;
2092 ncookies
+= (dmao
->dmao_obj
.dvma_obj
.dv_nseg
- 1);
2094 sinfo
->si_max_pages
= MIN(sinfo
->si_max_pages
, ncookies
);
2098 * We'll use the pre-allocated cookies for any bind that will *always*
2099 * fit (more important to be consistent, we don't want to create
2100 * additional degenerate cases).
2102 if (sinfo
->si_max_pages
<= rootnex_state
->r_prealloc_cookies
) {
2103 dma
->dp_cookies
= (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2104 dma
->dp_need_to_free_cookie
= B_FALSE
;
2105 ROOTNEX_DPROBE2(rootnex__bind__prealloc
, dev_info_t
*, rdip
,
2106 uint_t
, sinfo
->si_max_pages
);
2109 * For anything larger than that, we'll go ahead and allocate the
2110 * maximum number of pages we expect to see. Hopefuly, we won't be
2111 * seeing this path in the fast path for high performance devices very
2114 * a ddi bind interface that allowed the driver to provide storage to
2115 * the bind interface would speed this case up.
2119 * Save away how much memory we allocated. If we're doing a
2120 * nosleep, the alloc could fail...
2122 dma
->dp_cookie_size
= sinfo
->si_max_pages
*
2123 sizeof (ddi_dma_cookie_t
);
2124 dma
->dp_cookies
= kmem_alloc(dma
->dp_cookie_size
, kmflag
);
2125 if (dma
->dp_cookies
== NULL
) {
2126 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2127 rootnex_clean_dmahdl(hp
);
2128 return (DDI_DMA_NORESOURCES
);
2130 dma
->dp_need_to_free_cookie
= B_TRUE
;
2131 ROOTNEX_DPROBE2(rootnex__bind__alloc
, dev_info_t
*, rdip
,
2132 uint_t
, sinfo
->si_max_pages
);
2134 hp
->dmai_cookie
= dma
->dp_cookies
;
2137 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2138 * looking at the constraints in the dma structure. It will then put
2139 * some additional state about the sgl in the dma struct (i.e. is
2140 * the sgl clean, or do we need to do some munging; how many pages
2141 * need to be copied, etc.)
2143 if (dma
->dp_dvma_used
)
2144 rootnex_dvma_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2146 rootnex_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2149 ASSERT(sinfo
->si_sgl_size
<= sinfo
->si_max_pages
);
2150 /* if we don't need a copy buffer, we don't need to sync */
2151 if (sinfo
->si_copybuf_req
== 0) {
2152 hp
->dmai_rflags
|= DMP_NOSYNC
;
2156 * if we don't need the copybuf and we don't need to do a partial, we
2157 * hit the fast path. All the high performance devices should be trying
2158 * to hit this path. To hit this path, a device should be able to reach
2159 * all of memory, shouldn't try to bind more than it can transfer, and
2160 * the buffer shouldn't require more cookies than the driver/device can
2163 * Note that negative values of dma_attr_sgllen are supposed
2164 * to mean unlimited, but we just cast them to mean a
2165 * "ridiculous large limit". This saves some extra checks on
2168 if ((sinfo
->si_copybuf_req
== 0) &&
2169 (sinfo
->si_sgl_size
<= (unsigned)attr
->dma_attr_sgllen
) &&
2170 (dmao
->dmao_size
<= dma
->dp_maxxfer
)) {
2173 * If the driver supports FMA, insert the handle in the FMA DMA
2176 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2177 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2180 * copy out the first cookie and ccountp, set the cookie
2181 * pointer to the second cookie. The first cookie is passed
2182 * back on the stack. Additional cookies are accessed via
2183 * ddi_dma_nextcookie()
2185 *cookiep
= dma
->dp_cookies
[0];
2186 *ccountp
= sinfo
->si_sgl_size
;
2188 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2189 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2190 ROOTNEX_DPROBE4(rootnex__bind__fast
, dev_info_t
*, rdip
,
2191 uint64_t, rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
],
2192 uint_t
, dmao
->dmao_size
, uint_t
, *ccountp
);
2195 return (DDI_DMA_MAPPED
);
2199 * go to the slow path, we may need to alloc more memory, create
2200 * multiple windows, and munge up a sgl to make the device happy.
2204 * With the IOMMU mapobject method used, we should never hit
2205 * the slow path. If we do, something is seriously wrong.
2206 * Clean up and return an error.
2209 #if defined(__amd64) && !defined(__xpv)
2211 if (dma
->dp_dvma_used
) {
2212 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2214 e
= DDI_DMA_NOMAPPING
;
2217 e
= rootnex_bind_slowpath(hp
, dmareq
, dma
, attr
, &dma
->dp_dma
,
2219 #if defined(__amd64) && !defined(__xpv)
2222 if ((e
!= DDI_DMA_MAPPED
) && (e
!= DDI_DMA_PARTIAL_MAP
)) {
2223 if (dma
->dp_need_to_free_cookie
) {
2224 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2226 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2227 rootnex_clean_dmahdl(hp
); /* must be after free cookie */
2232 * If the driver supports FMA, insert the handle in the FMA DMA handle
2235 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2236 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2238 /* if the first window uses the copy buffer, sync it for the device */
2239 if ((dma
->dp_window
[dma
->dp_current_win
].wd_dosync
) &&
2240 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
2241 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2242 DDI_DMA_SYNC_FORDEV
);
2246 * copy out the first cookie and ccountp, set the cookie pointer to the
2247 * second cookie. Make sure the partial flag is set/cleared correctly.
2248 * If we have a partial map (i.e. multiple windows), the number of
2249 * cookies we return is the number of cookies in the first window.
2251 if (e
== DDI_DMA_MAPPED
) {
2252 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2253 *ccountp
= sinfo
->si_sgl_size
;
2256 hp
->dmai_rflags
|= DDI_DMA_PARTIAL
;
2257 *ccountp
= dma
->dp_window
[dma
->dp_current_win
].wd_cookie_cnt
;
2258 ASSERT(hp
->dmai_nwin
<= dma
->dp_max_win
);
2260 *cookiep
= dma
->dp_cookies
[0];
2263 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2264 ROOTNEX_DPROBE4(rootnex__bind__slow
, dev_info_t
*, rdip
, uint64_t,
2265 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
], uint_t
,
2266 dmao
->dmao_size
, uint_t
, *ccountp
);
2271 * rootnex_dma_bindhdl()
2272 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2275 rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2276 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
2277 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
2280 #if defined(__amd64) && !defined(__xpv)
2281 if (IOMMU_USED(rdip
))
2282 ret
= iommulib_nexdma_bindhdl(dip
, rdip
, handle
, dmareq
,
2286 ret
= rootnex_coredma_bindhdl(dip
, rdip
, handle
, dmareq
,
2289 if (ret
== DDI_DMA_NORESOURCES
&& dmareq
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
2290 ddi_set_callback(dmareq
->dmar_fp
, dmareq
->dmar_arg
,
2291 &rootnex_state
->r_dvma_call_list_id
);
2301 rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2302 ddi_dma_handle_t handle
)
2308 hp
= (ddi_dma_impl_t
*)handle
;
2309 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2311 /* make sure the buffer wasn't free'd before calling unbind */
2312 if (rootnex_unbind_verify_buffer
) {
2313 e
= rootnex_verify_buffer(dma
);
2314 if (e
!= DDI_SUCCESS
) {
2316 return (DDI_FAILURE
);
2320 /* sync the current window before unbinding the buffer */
2321 if (dma
->dp_window
&& dma
->dp_window
[dma
->dp_current_win
].wd_dosync
&&
2322 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
2323 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2324 DDI_DMA_SYNC_FORCPU
);
2328 * cleanup and copy buffer or window state. if we didn't use the copy
2329 * buffer or windows, there won't be much to do :-)
2331 rootnex_teardown_copybuf(dma
);
2332 rootnex_teardown_windows(dma
);
2334 #if defined(__amd64) && !defined(__xpv)
2335 if (IOMMU_USED(rdip
) && dma
->dp_dvma_used
)
2336 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2341 * If we had to allocate space to for the worse case sgl (it didn't
2342 * fit into our pre-allocate buffer), free that up now
2344 if (dma
->dp_need_to_free_cookie
) {
2345 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2349 * clean up the handle so it's ready for the next bind (i.e. if the
2350 * handle is reused).
2352 rootnex_clean_dmahdl(hp
);
2353 hp
->dmai_error
.err_cf
= NULL
;
2355 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2356 ROOTNEX_DPROBE1(rootnex__unbind
, uint64_t,
2357 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2359 return (DDI_SUCCESS
);
2363 * rootnex_dma_unbindhdl()
2364 * called from ddi_dma_unbind_handle()
2368 rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2369 ddi_dma_handle_t handle
)
2373 #if defined(__amd64) && !defined(__xpv)
2374 if (IOMMU_USED(rdip
))
2375 ret
= iommulib_nexdma_unbindhdl(dip
, rdip
, handle
);
2378 ret
= rootnex_coredma_unbindhdl(dip
, rdip
, handle
);
2380 if (rootnex_state
->r_dvma_call_list_id
)
2381 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
2386 #if defined(__amd64) && !defined(__xpv)
2389 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
)
2391 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2392 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2394 if (dma
->dp_sleep_flags
!= KM_SLEEP
&&
2395 dma
->dp_sleep_flags
!= KM_NOSLEEP
)
2396 cmn_err(CE_PANIC
, "kmem sleep flags not set in DMA handle");
2397 return (dma
->dp_sleep_flags
);
2401 rootnex_coredma_reset_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2403 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2404 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2405 rootnex_window_t
*window
;
2407 if (dma
->dp_window
) {
2408 window
= &dma
->dp_window
[dma
->dp_current_win
];
2409 hp
->dmai_cookie
= window
->wd_first_cookie
;
2411 hp
->dmai_cookie
= dma
->dp_cookies
;
2418 rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2419 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
)
2423 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2424 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2425 rootnex_window_t
*window
;
2426 ddi_dma_cookie_t
*cp
;
2427 ddi_dma_cookie_t
*cookie
;
2429 ASSERT(*cookiepp
== NULL
);
2430 ASSERT(*ccountp
== 0);
2432 if (dma
->dp_window
) {
2433 window
= &dma
->dp_window
[dma
->dp_current_win
];
2434 cp
= window
->wd_first_cookie
;
2435 *ccountp
= window
->wd_cookie_cnt
;
2437 cp
= dma
->dp_cookies
;
2438 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
2441 km_flags
= rootnex_coredma_get_sleep_flags(handle
);
2442 cookie
= kmem_zalloc(sizeof (ddi_dma_cookie_t
) * (*ccountp
), km_flags
);
2443 if (cookie
== NULL
) {
2444 return (DDI_DMA_NORESOURCES
);
2447 for (i
= 0; i
< *ccountp
; i
++) {
2448 cookie
[i
].dmac_notused
= cp
[i
].dmac_notused
;
2449 cookie
[i
].dmac_type
= cp
[i
].dmac_type
;
2450 cookie
[i
].dmac_address
= cp
[i
].dmac_address
;
2451 cookie
[i
].dmac_size
= cp
[i
].dmac_size
;
2456 return (DDI_SUCCESS
);
2461 rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2462 ddi_dma_cookie_t
*cookiep
, uint_t ccount
)
2464 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2465 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2466 rootnex_window_t
*window
;
2467 ddi_dma_cookie_t
*cur_cookiep
;
2470 ASSERT(ccount
!= 0);
2471 ASSERT(dma
->dp_need_to_switch_cookies
== B_FALSE
);
2473 if (dma
->dp_window
) {
2474 window
= &dma
->dp_window
[dma
->dp_current_win
];
2475 dma
->dp_saved_cookies
= window
->wd_first_cookie
;
2476 window
->wd_first_cookie
= cookiep
;
2477 ASSERT(ccount
== window
->wd_cookie_cnt
);
2478 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2479 + window
->wd_first_cookie
;
2481 dma
->dp_saved_cookies
= dma
->dp_cookies
;
2482 dma
->dp_cookies
= cookiep
;
2483 ASSERT(ccount
== dma
->dp_sglinfo
.si_sgl_size
);
2484 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2488 dma
->dp_need_to_switch_cookies
= B_TRUE
;
2489 hp
->dmai_cookie
= cur_cookiep
;
2491 return (DDI_SUCCESS
);
2496 rootnex_coredma_clear_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2498 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2499 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2500 rootnex_window_t
*window
;
2501 ddi_dma_cookie_t
*cur_cookiep
;
2502 ddi_dma_cookie_t
*cookie_array
;
2505 /* check if cookies have not been switched */
2506 if (dma
->dp_need_to_switch_cookies
== B_FALSE
)
2507 return (DDI_SUCCESS
);
2509 ASSERT(dma
->dp_saved_cookies
);
2511 if (dma
->dp_window
) {
2512 window
= &dma
->dp_window
[dma
->dp_current_win
];
2513 cookie_array
= window
->wd_first_cookie
;
2514 window
->wd_first_cookie
= dma
->dp_saved_cookies
;
2515 dma
->dp_saved_cookies
= NULL
;
2516 ccount
= window
->wd_cookie_cnt
;
2517 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2518 + window
->wd_first_cookie
;
2520 cookie_array
= dma
->dp_cookies
;
2521 dma
->dp_cookies
= dma
->dp_saved_cookies
;
2522 dma
->dp_saved_cookies
= NULL
;
2523 ccount
= dma
->dp_sglinfo
.si_sgl_size
;
2524 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2528 kmem_free(cookie_array
, sizeof (ddi_dma_cookie_t
) * ccount
);
2530 hp
->dmai_cookie
= cur_cookiep
;
2532 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2534 return (DDI_SUCCESS
);
2540 rootnex_get_as(ddi_dma_obj_t
*dmao
)
2544 switch (dmao
->dmao_type
) {
2545 case DMA_OTYP_VADDR
:
2546 case DMA_OTYP_BUFVADDR
:
2547 asp
= dmao
->dmao_obj
.virt_obj
.v_as
;
2559 * rootnex_verify_buffer()
2560 * verify buffer wasn't free'd
2563 rootnex_verify_buffer(rootnex_dma_t
*dma
)
2573 /* Figure out how many pages this buffer occupies */
2574 if (dma
->dp_dma
.dmao_type
== DMA_OTYP_PAGES
) {
2575 poff
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_offset
& MMU_PAGEOFFSET
;
2577 vaddr
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_addr
;
2578 poff
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2580 pcnt
= mmu_btopr(dma
->dp_dma
.dmao_size
+ poff
);
2582 switch (dma
->dp_dma
.dmao_type
) {
2583 case DMA_OTYP_PAGES
:
2585 * for a linked list of pp's walk through them to make sure
2586 * they're locked and not free.
2588 pp
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_pp
;
2589 for (i
= 0; i
< pcnt
; i
++) {
2590 if (PP_ISFREE(pp
) || !PAGE_LOCKED(pp
)) {
2591 return (DDI_FAILURE
);
2597 case DMA_OTYP_VADDR
:
2598 case DMA_OTYP_BUFVADDR
:
2599 pplist
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_priv
;
2601 * for an array of pp's walk through them to make sure they're
2602 * not free. It's possible that they may not be locked.
2605 for (i
= 0; i
< pcnt
; i
++) {
2606 if (PP_ISFREE(pplist
[i
])) {
2607 return (DDI_FAILURE
);
2611 /* For a virtual address, try to peek at each page */
2613 if (rootnex_get_as(&dma
->dp_dma
) == &kas
) {
2614 for (i
= 0; i
< pcnt
; i
++) {
2615 if (ddi_peek8(NULL
, vaddr
, &b
) ==
2617 return (DDI_FAILURE
);
2618 vaddr
+= MMU_PAGESIZE
;
2625 cmn_err(CE_PANIC
, "rootnex_verify_buffer: bad DMA object");
2629 return (DDI_SUCCESS
);
2634 * rootnex_clean_dmahdl()
2635 * Clean the dma handle. This should be called on a handle alloc and an
2636 * unbind handle. Set the handle state to the default settings.
2639 rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
)
2644 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2647 dma
->dp_current_cookie
= 0;
2648 dma
->dp_copybuf_size
= 0;
2649 dma
->dp_window
= NULL
;
2650 dma
->dp_cbaddr
= NULL
;
2651 dma
->dp_inuse
= B_FALSE
;
2652 dma
->dp_dvma_used
= B_FALSE
;
2653 dma
->dp_need_to_free_cookie
= B_FALSE
;
2654 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2655 dma
->dp_saved_cookies
= NULL
;
2656 dma
->dp_sleep_flags
= KM_PANIC
;
2657 dma
->dp_need_to_free_window
= B_FALSE
;
2658 dma
->dp_partial_required
= B_FALSE
;
2659 dma
->dp_trim_required
= B_FALSE
;
2660 dma
->dp_sglinfo
.si_copybuf_req
= 0;
2661 #if !defined(__amd64)
2662 dma
->dp_cb_remaping
= B_FALSE
;
2666 /* FMA related initialization */
2668 hp
->dmai_fault_check
= NULL
;
2669 hp
->dmai_fault_notify
= NULL
;
2670 hp
->dmai_error
.err_ena
= 0;
2671 hp
->dmai_error
.err_status
= DDI_FM_OK
;
2672 hp
->dmai_error
.err_expected
= DDI_FM_ERR_UNEXPECTED
;
2673 hp
->dmai_error
.err_ontrap
= NULL
;
2678 * rootnex_valid_alloc_parms()
2679 * Called in ddi_dma_alloc_handle path to validate its parameters.
2682 rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegmentsize
)
2684 if ((attr
->dma_attr_seg
< MMU_PAGEOFFSET
) ||
2685 (attr
->dma_attr_count_max
< MMU_PAGEOFFSET
) ||
2686 (attr
->dma_attr_granular
> MMU_PAGESIZE
) ||
2687 (attr
->dma_attr_maxxfer
< MMU_PAGESIZE
)) {
2688 return (DDI_DMA_BADATTR
);
2691 if (attr
->dma_attr_addr_hi
<= attr
->dma_attr_addr_lo
) {
2692 return (DDI_DMA_BADATTR
);
2695 if ((attr
->dma_attr_seg
& MMU_PAGEOFFSET
) != MMU_PAGEOFFSET
||
2696 MMU_PAGESIZE
& (attr
->dma_attr_granular
- 1) ||
2697 attr
->dma_attr_sgllen
== 0) {
2698 return (DDI_DMA_BADATTR
);
2701 /* We should be able to DMA into every byte offset in a page */
2702 if (maxsegmentsize
< MMU_PAGESIZE
) {
2703 return (DDI_DMA_BADATTR
);
2706 /* if we're bouncing on seg, seg must be <= addr_hi */
2707 if ((attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
) &&
2708 (attr
->dma_attr_seg
> attr
->dma_attr_addr_hi
)) {
2709 return (DDI_DMA_BADATTR
);
2711 return (DDI_SUCCESS
);
2715 * rootnex_valid_bind_parms()
2716 * Called in ddi_dma_*_bind_handle path to validate its parameters.
2720 rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
, ddi_dma_attr_t
*attr
)
2722 #if !defined(__amd64)
2724 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2725 * we can track the offset for the obsoleted interfaces.
2727 if (dmareq
->dmar_object
.dmao_size
> 0x7FFFFFFF) {
2728 return (DDI_DMA_TOOBIG
);
2732 return (DDI_SUCCESS
);
2737 * rootnex_need_bounce_seg()
2738 * check to see if the buffer lives on both side of the seg.
2741 rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
, rootnex_sglinfo_t
*sglinfo
)
2743 ddi_dma_atyp_t buftype
;
2744 rootnex_addr_t raddr
;
2745 boolean_t lower_addr
;
2746 boolean_t upper_addr
;
2758 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2759 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2760 buftype
= dmar_object
->dmao_type
;
2761 size
= dmar_object
->dmao_size
;
2763 lower_addr
= B_FALSE
;
2764 upper_addr
= B_FALSE
;
2768 * Process the first page to handle the initial offset of the buffer.
2769 * We'll use the base address we get later when we loop through all
2772 if (buftype
== DMA_OTYP_PAGES
) {
2773 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2774 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2776 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2777 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2779 sglinfo
->si_asp
= NULL
;
2780 } else if (pplist
!= NULL
) {
2781 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2782 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2783 if (sglinfo
->si_asp
== NULL
) {
2784 sglinfo
->si_asp
= &kas
;
2786 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2788 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2791 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2792 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2793 if (sglinfo
->si_asp
== NULL
) {
2794 sglinfo
->si_asp
= &kas
;
2796 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2798 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2802 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2804 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2805 upper_addr
= B_TRUE
;
2807 lower_addr
= B_TRUE
;
2812 * Walk through the rest of the pages in the buffer. Track to see
2813 * if we have pages on both sides of the segment boundary.
2816 /* partial or full page */
2817 psize
= MIN(size
, MMU_PAGESIZE
);
2819 if (buftype
== DMA_OTYP_PAGES
) {
2820 /* get the paddr from the page_t */
2821 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2822 paddr
= pfn_to_pa(pp
->p_pagenum
);
2824 } else if (pplist
!= NULL
) {
2825 /* index into the array of page_t's to get the paddr */
2826 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2827 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2830 /* call into the VM to get the paddr */
2831 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
2836 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2838 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2839 upper_addr
= B_TRUE
;
2841 lower_addr
= B_TRUE
;
2844 * if the buffer lives both above and below the segment
2845 * boundary, or the current page is the page immediately
2846 * after the segment, we will use a copy/bounce buffer for
2849 if ((lower_addr
&& upper_addr
) ||
2850 (raddr
== (sglinfo
->si_segmask
+ 1))) {
2862 * Called in bind fastpath to get the sgl. Most of this will be replaced
2863 * with a call to the vm layer when vm2.0 comes around...
2866 rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
2867 rootnex_sglinfo_t
*sglinfo
)
2869 ddi_dma_atyp_t buftype
;
2870 rootnex_addr_t raddr
;
2887 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2888 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2889 maxseg
= sglinfo
->si_max_cookie_size
;
2890 buftype
= dmar_object
->dmao_type
;
2891 addrhi
= sglinfo
->si_max_addr
;
2892 addrlo
= sglinfo
->si_min_addr
;
2893 size
= dmar_object
->dmao_size
;
2900 * check to see if we need to use the copy buffer for pages over
2903 sglinfo
->si_bounce_on_seg
= B_FALSE
;
2904 if (sglinfo
->si_flags
& _DDI_DMA_BOUNCE_ON_SEG
) {
2905 sglinfo
->si_bounce_on_seg
= rootnex_need_bounce_seg(
2906 dmar_object
, sglinfo
);
2910 * if we were passed down a linked list of pages, i.e. pointer to
2911 * page_t, use this to get our physical address and buf offset.
2913 if (buftype
== DMA_OTYP_PAGES
) {
2914 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2915 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2916 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2918 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2919 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2921 sglinfo
->si_asp
= NULL
;
2924 * We weren't passed down a linked list of pages, but if we were passed
2925 * down an array of pages, use this to get our physical address and buf
2928 } else if (pplist
!= NULL
) {
2929 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2930 (buftype
== DMA_OTYP_BUFVADDR
));
2932 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2933 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2934 if (sglinfo
->si_asp
== NULL
) {
2935 sglinfo
->si_asp
= &kas
;
2938 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2939 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2941 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2945 * All we have is a virtual address, we'll need to call into the VM
2946 * to get the physical address.
2949 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2950 (buftype
== DMA_OTYP_BUFVADDR
));
2952 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2953 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2954 if (sglinfo
->si_asp
== NULL
) {
2955 sglinfo
->si_asp
= &kas
;
2958 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2960 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2964 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2967 * Setup the first cookie with the physical address of the page and the
2968 * size of the page (which takes into account the initial offset into
2971 sgl
[cnt
].dmac_laddress
= raddr
;
2972 sgl
[cnt
].dmac_size
= psize
;
2973 sgl
[cnt
].dmac_type
= 0;
2976 * Save away the buffer offset into the page. We'll need this later in
2977 * the copy buffer code to help figure out the page index within the
2978 * buffer and the offset into the current page.
2980 sglinfo
->si_buf_offset
= offset
;
2983 * If we are using the copy buffer for anything over the segment
2984 * boundary, and this page is over the segment boundary.
2986 * if the DMA engine can't reach the physical address.
2988 if (((sglinfo
->si_bounce_on_seg
) &&
2989 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
2990 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
2992 * Increase how much copy buffer we use. We always increase by
2993 * pagesize so we don't have to worry about converting offsets.
2994 * Set a flag in the cookies dmac_type to indicate that it uses
2995 * the copy buffer. If this isn't the last cookie, go to the
2996 * next cookie (since we separate each page which uses the copy
2997 * buffer in case the copy buffer is not physically contiguous.
2999 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3000 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3001 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3003 sgl
[cnt
].dmac_laddress
= 0;
3004 sgl
[cnt
].dmac_size
= 0;
3005 sgl
[cnt
].dmac_type
= 0;
3010 * save this page's physical address so we can figure out if the next
3011 * page is physically contiguous. Keep decrementing size until we are
3012 * done with the buffer.
3014 last_page
= raddr
& MMU_PAGEMASK
;
3018 /* Get the size for this page (i.e. partial or full page) */
3019 psize
= MIN(size
, MMU_PAGESIZE
);
3021 if (buftype
== DMA_OTYP_PAGES
) {
3022 /* get the paddr from the page_t */
3023 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
3024 paddr
= pfn_to_pa(pp
->p_pagenum
);
3026 } else if (pplist
!= NULL
) {
3027 /* index into the array of page_t's to get the paddr */
3028 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
3029 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
3032 /* call into the VM to get the paddr */
3033 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
3038 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3041 * If we are using the copy buffer for anything over the
3042 * segment boundary, and this page is over the segment
3045 * if the DMA engine can't reach the physical address.
3047 if (((sglinfo
->si_bounce_on_seg
) &&
3048 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
3049 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
3051 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3054 * if there is something in the current cookie, go to
3055 * the next one. We only want one page in a cookie which
3056 * uses the copybuf since the copybuf doesn't have to
3057 * be physically contiguous.
3059 if (sgl
[cnt
].dmac_size
!= 0) {
3062 sgl
[cnt
].dmac_laddress
= raddr
;
3063 sgl
[cnt
].dmac_size
= psize
;
3064 #if defined(__amd64)
3065 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3068 * save the buf offset for 32-bit kernel. used in the
3069 * obsoleted interfaces.
3071 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
|
3072 (dmar_object
->dmao_size
- size
);
3074 /* if this isn't the last cookie, go to the next one */
3075 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3077 sgl
[cnt
].dmac_laddress
= 0;
3078 sgl
[cnt
].dmac_size
= 0;
3079 sgl
[cnt
].dmac_type
= 0;
3083 * this page didn't need the copy buffer, if it's not physically
3084 * contiguous, or it would put us over a segment boundary, or it
3085 * puts us over the max cookie size, or the current sgl doesn't
3086 * have anything in it.
3088 } else if (((last_page
+ MMU_PAGESIZE
) != raddr
) ||
3089 !(raddr
& sglinfo
->si_segmask
) ||
3090 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3091 (sgl
[cnt
].dmac_size
== 0)) {
3093 * if we're not already in a new cookie, go to the next
3096 if (sgl
[cnt
].dmac_size
!= 0) {
3100 /* save the cookie information */
3101 sgl
[cnt
].dmac_laddress
= raddr
;
3102 sgl
[cnt
].dmac_size
= psize
;
3103 #if defined(__amd64)
3104 sgl
[cnt
].dmac_type
= 0;
3107 * save the buf offset for 32-bit kernel. used in the
3108 * obsoleted interfaces.
3110 sgl
[cnt
].dmac_type
= dmar_object
->dmao_size
- size
;
3114 * this page didn't need the copy buffer, it is physically
3115 * contiguous with the last page, and it's <= the max cookie
3119 sgl
[cnt
].dmac_size
+= psize
;
3122 * if this exactly == the maximum cookie size, and
3123 * it isn't the last cookie, go to the next cookie.
3125 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3126 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3128 sgl
[cnt
].dmac_laddress
= 0;
3129 sgl
[cnt
].dmac_size
= 0;
3130 sgl
[cnt
].dmac_type
= 0;
3135 * save this page's physical address so we can figure out if the
3136 * next page is physically contiguous. Keep decrementing size
3137 * until we are done with the buffer.
3143 /* we're done, save away how many cookies the sgl has */
3144 if (sgl
[cnt
].dmac_size
== 0) {
3145 ASSERT(cnt
< sglinfo
->si_max_pages
);
3146 sglinfo
->si_sgl_size
= cnt
;
3148 sglinfo
->si_sgl_size
= cnt
+ 1;
3153 rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
3154 rootnex_sglinfo_t
*sglinfo
)
3159 struct dvmaseg
*dvs
;
3161 uint32_t psize
, ssize
;
3166 ASSERT(dmar_object
->dmao_type
== DMA_OTYP_DVADDR
);
3169 maxseg
= sglinfo
->si_max_cookie_size
;
3170 size
= dmar_object
->dmao_size
;
3173 sglinfo
->si_bounce_on_seg
= B_FALSE
;
3175 dvs
= dmar_object
->dmao_obj
.dvma_obj
.dv_seg
;
3176 offset
= dmar_object
->dmao_obj
.dvma_obj
.dv_off
;
3177 ssize
= dvs
->dvs_len
;
3178 paddr
= dvs
->dvs_start
;
3180 psize
= MIN(ssize
, (maxseg
- offset
));
3181 dvaddr
= paddr
+ psize
;
3184 sgl
[cnt
].dmac_laddress
= paddr
;
3185 sgl
[cnt
].dmac_size
= psize
;
3186 sgl
[cnt
].dmac_type
= 0;
3192 ssize
= dvs
->dvs_len
;
3193 dvaddr
= dvs
->dvs_start
;
3199 psize
= MIN(ssize
, maxseg
);
3203 if (!physcontig
|| !(paddr
& sglinfo
->si_segmask
) ||
3204 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3205 (sgl
[cnt
].dmac_size
== 0)) {
3207 * if we're not already in a new cookie, go to the next
3210 if (sgl
[cnt
].dmac_size
!= 0) {
3214 /* save the cookie information */
3215 sgl
[cnt
].dmac_laddress
= paddr
;
3216 sgl
[cnt
].dmac_size
= psize
;
3217 sgl
[cnt
].dmac_type
= 0;
3219 sgl
[cnt
].dmac_size
+= psize
;
3222 * if this exactly == the maximum cookie size, and
3223 * it isn't the last cookie, go to the next cookie.
3225 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3226 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3228 sgl
[cnt
].dmac_laddress
= 0;
3229 sgl
[cnt
].dmac_size
= 0;
3230 sgl
[cnt
].dmac_type
= 0;
3236 /* we're done, save away how many cookies the sgl has */
3237 if (sgl
[cnt
].dmac_size
== 0) {
3238 sglinfo
->si_sgl_size
= cnt
;
3240 sglinfo
->si_sgl_size
= cnt
+ 1;
3245 * rootnex_bind_slowpath()
3246 * Call in the bind path if the calling driver can't use the sgl without
3247 * modifying it. We either need to use the copy buffer and/or we will end up
3248 * with a partial bind.
3251 rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3252 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3254 rootnex_sglinfo_t
*sinfo
;
3255 rootnex_window_t
*window
;
3256 ddi_dma_cookie_t
*cookie
;
3257 size_t copybuf_used
;
3267 sinfo
= &dma
->dp_sglinfo
;
3272 * If we're using the copybuf, set the copybuf state in dma struct.
3273 * Needs to be first since it sets the copy buffer size.
3275 if (sinfo
->si_copybuf_req
!= 0) {
3276 e
= rootnex_setup_copybuf(hp
, dmareq
, dma
, attr
);
3277 if (e
!= DDI_SUCCESS
) {
3281 dma
->dp_copybuf_size
= 0;
3285 * Figure out if we need to do a partial mapping. If so, figure out
3286 * if we need to trim the buffers when we munge the sgl.
3288 if ((dma
->dp_copybuf_size
< sinfo
->si_copybuf_req
) ||
3289 (dmao
->dmao_size
> dma
->dp_maxxfer
) ||
3290 ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
)) {
3291 dma
->dp_partial_required
= B_TRUE
;
3292 if (attr
->dma_attr_granular
!= 1) {
3293 dma
->dp_trim_required
= B_TRUE
;
3296 dma
->dp_partial_required
= B_FALSE
;
3297 dma
->dp_trim_required
= B_FALSE
;
3300 /* If we need to do a partial bind, make sure the driver supports it */
3301 if (dma
->dp_partial_required
&&
3302 !(dmareq
->dmar_flags
& DDI_DMA_PARTIAL
)) {
3304 mnum
= ddi_driver_major(dma
->dp_dip
);
3306 * patchable which allows us to print one warning per major
3309 if ((rootnex_bind_warn
) &&
3310 ((rootnex_warn_list
[mnum
] & ROOTNEX_BIND_WARNING
) == 0)) {
3311 rootnex_warn_list
[mnum
] |= ROOTNEX_BIND_WARNING
;
3312 cmn_err(CE_WARN
, "!%s: coding error detected, the "
3313 "driver is using ddi_dma_attr(9S) incorrectly. "
3314 "There is a small risk of data corruption in "
3315 "particular with large I/Os. The driver should be "
3316 "replaced with a corrected version for proper "
3317 "system operation. To disable this warning, add "
3318 "'set rootnex:rootnex_bind_warn=0' to "
3319 "/etc/system(4).", ddi_driver_name(dma
->dp_dip
));
3321 return (DDI_DMA_TOOBIG
);
3325 * we might need multiple windows, setup state to handle them. In this
3326 * code path, we will have at least one window.
3328 e
= rootnex_setup_windows(hp
, dma
, attr
, dmao
, kmflag
);
3329 if (e
!= DDI_SUCCESS
) {
3330 rootnex_teardown_copybuf(dma
);
3334 window
= &dma
->dp_window
[0];
3335 cookie
= &dma
->dp_cookies
[0];
3337 rootnex_init_win(hp
, dma
, window
, cookie
, cur_offset
);
3338 if (dmao
->dmao_type
== DMA_OTYP_PAGES
) {
3339 cur_pp
= dmareq
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3342 /* loop though all the cookies we got back from get_sgl() */
3343 for (i
= 0; i
< sinfo
->si_sgl_size
; i
++) {
3345 * If we're using the copy buffer, check this cookie and setup
3346 * its associated copy buffer state. If this cookie uses the
3347 * copy buffer, make sure we sync this window during dma_sync.
3349 if (dma
->dp_copybuf_size
> 0) {
3350 rootnex_setup_cookie(dmao
, dma
, cookie
,
3351 cur_offset
, ©buf_used
, &cur_pp
);
3352 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3353 window
->wd_dosync
= B_TRUE
;
3358 * save away the cookie size, since it could be modified in
3359 * the windowing code.
3361 dmac_size
= cookie
->dmac_size
;
3363 /* if we went over max copybuf size */
3364 if (dma
->dp_copybuf_size
&&
3365 (copybuf_used
> dma
->dp_copybuf_size
)) {
3367 e
= rootnex_copybuf_window_boundary(hp
, dma
, &window
,
3368 cookie
, cur_offset
, ©buf_used
);
3369 if (e
!= DDI_SUCCESS
) {
3370 rootnex_teardown_copybuf(dma
);
3371 rootnex_teardown_windows(dma
);
3376 * if the coookie uses the copy buffer, make sure the
3377 * new window we just moved to is set to sync.
3379 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3380 window
->wd_dosync
= B_TRUE
;
3382 ROOTNEX_DPROBE1(rootnex__copybuf__window
, dev_info_t
*,
3385 /* if the cookie cnt == max sgllen, move to the next window */
3386 } else if (window
->wd_cookie_cnt
>=
3387 (unsigned)attr
->dma_attr_sgllen
) {
3389 ASSERT(window
->wd_cookie_cnt
== attr
->dma_attr_sgllen
);
3390 e
= rootnex_sgllen_window_boundary(hp
, dma
, &window
,
3391 cookie
, attr
, cur_offset
);
3392 if (e
!= DDI_SUCCESS
) {
3393 rootnex_teardown_copybuf(dma
);
3394 rootnex_teardown_windows(dma
);
3399 * if the coookie uses the copy buffer, make sure the
3400 * new window we just moved to is set to sync.
3402 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3403 window
->wd_dosync
= B_TRUE
;
3405 ROOTNEX_DPROBE1(rootnex__sgllen__window
, dev_info_t
*,
3408 /* else if we will be over maxxfer */
3409 } else if ((window
->wd_size
+ dmac_size
) >
3412 e
= rootnex_maxxfer_window_boundary(hp
, dma
, &window
,
3414 if (e
!= DDI_SUCCESS
) {
3415 rootnex_teardown_copybuf(dma
);
3416 rootnex_teardown_windows(dma
);
3421 * if the coookie uses the copy buffer, make sure the
3422 * new window we just moved to is set to sync.
3424 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3425 window
->wd_dosync
= B_TRUE
;
3427 ROOTNEX_DPROBE1(rootnex__maxxfer__window
, dev_info_t
*,
3430 /* else this cookie fits in the current window */
3432 window
->wd_cookie_cnt
++;
3433 window
->wd_size
+= dmac_size
;
3436 /* track our offset into the buffer, go to the next cookie */
3437 ASSERT(dmac_size
<= dmao
->dmao_size
);
3438 ASSERT(cookie
->dmac_size
<= dmac_size
);
3439 cur_offset
+= dmac_size
;
3443 /* if we ended up with a zero sized window in the end, clean it up */
3444 if (window
->wd_size
== 0) {
3449 ASSERT(window
->wd_trim
.tr_trim_last
== B_FALSE
);
3452 return (DDI_DMA_MAPPED
);
3455 ASSERT(dma
->dp_partial_required
);
3456 return (DDI_DMA_PARTIAL_MAP
);
3460 * rootnex_setup_copybuf()
3461 * Called in bind slowpath. Figures out if we're going to use the copy
3462 * buffer, and if we do, sets up the basic state to handle it.
3465 rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3466 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
)
3468 rootnex_sglinfo_t
*sinfo
;
3469 ddi_dma_attr_t lattr
;
3473 #if !defined(__amd64)
3477 ASSERT(!dma
->dp_dvma_used
);
3479 sinfo
= &dma
->dp_sglinfo
;
3481 /* read this first so it's consistent through the routine */
3482 max_copybuf
= i_ddi_copybuf_size() & MMU_PAGEMASK
;
3484 /* We need to call into the rootnex on ddi_dma_sync() */
3485 hp
->dmai_rflags
&= ~DMP_NOSYNC
;
3487 /* make sure the copybuf size <= the max size */
3488 dma
->dp_copybuf_size
= MIN(sinfo
->si_copybuf_req
, max_copybuf
);
3489 ASSERT((dma
->dp_copybuf_size
& MMU_PAGEOFFSET
) == 0);
3491 #if !defined(__amd64)
3493 * if we don't have kva space to copy to/from, allocate the KVA space
3494 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3495 * the 64-bit kernel.
3497 if ((dmareq
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) ||
3498 (dmareq
->dmar_object
.dmao_obj
.virt_obj
.v_as
!= NULL
)) {
3500 /* convert the sleep flags */
3501 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3504 vmflag
= VM_NOSLEEP
;
3507 /* allocate Kernel VA space that we can bcopy to/from */
3508 dma
->dp_kva
= vmem_alloc(heap_arena
, dma
->dp_copybuf_size
,
3510 if (dma
->dp_kva
== NULL
) {
3511 return (DDI_DMA_NORESOURCES
);
3516 /* convert the sleep flags */
3517 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3524 * Allocate the actual copy buffer. This needs to fit within the DMA
3525 * engine limits, so we can't use kmem_alloc... We don't need
3526 * contiguous memory (sgllen) since we will be forcing windows on
3530 lattr
.dma_attr_align
= MMU_PAGESIZE
;
3531 lattr
.dma_attr_sgllen
= -1; /* no limit */
3533 * if we're using the copy buffer because of seg, use that for our
3534 * upper address limit.
3536 if (sinfo
->si_bounce_on_seg
) {
3537 lattr
.dma_attr_addr_hi
= lattr
.dma_attr_seg
;
3539 e
= i_ddi_mem_alloc(dma
->dp_dip
, &lattr
, dma
->dp_copybuf_size
, cansleep
,
3540 0, NULL
, &dma
->dp_cbaddr
, &dma
->dp_cbsize
, NULL
);
3541 if (e
!= DDI_SUCCESS
) {
3542 #if !defined(__amd64)
3543 if (dma
->dp_kva
!= NULL
) {
3544 vmem_free(heap_arena
, dma
->dp_kva
,
3545 dma
->dp_copybuf_size
);
3548 return (DDI_DMA_NORESOURCES
);
3551 ROOTNEX_DPROBE2(rootnex__alloc__copybuf
, dev_info_t
*, dma
->dp_dip
,
3552 size_t, dma
->dp_copybuf_size
);
3554 return (DDI_SUCCESS
);
3559 * rootnex_setup_windows()
3560 * Called in bind slowpath to setup the window state. We always have windows
3561 * in the slowpath. Even if the window count = 1.
3564 rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3565 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3567 rootnex_window_t
*windowp
;
3568 rootnex_sglinfo_t
*sinfo
;
3569 size_t copy_state_size
;
3570 size_t win_state_size
;
3571 size_t state_available
;
3572 size_t space_needed
;
3579 sinfo
= &dma
->dp_sglinfo
;
3581 dma
->dp_current_win
= 0;
3584 /* If we don't need to do a partial, we only have one window */
3585 if (!dma
->dp_partial_required
) {
3586 dma
->dp_max_win
= 1;
3589 * we need multiple windows, need to figure out the worse case number
3594 * if we need windows because we need more copy buffer that
3595 * we allow, the worse case number of windows we could need
3596 * here would be (copybuf space required / copybuf space that
3597 * we have) plus one for remainder, and plus 2 to handle the
3598 * extra pages on the trim for the first and last pages of the
3599 * buffer (a page is the minimum window size so under the right
3600 * attr settings, you could have a window for each page).
3601 * The last page will only be hit here if the size is not a
3602 * multiple of the granularity (which theoretically shouldn't
3603 * be the case but never has been enforced, so we could have
3604 * broken things without it).
3606 if (sinfo
->si_copybuf_req
> dma
->dp_copybuf_size
) {
3607 ASSERT(dma
->dp_copybuf_size
> 0);
3608 copybuf_win
= (sinfo
->si_copybuf_req
/
3609 dma
->dp_copybuf_size
) + 1 + 2;
3615 * if we need windows because we have more cookies than the H/W
3616 * can handle, the number of windows we would need here would
3617 * be (cookie count / cookies count H/W supports minus 1[for
3618 * trim]) plus one for remainder.
3620 if ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
) {
3621 sglwin
= (sinfo
->si_sgl_size
/
3622 (attr
->dma_attr_sgllen
- 1)) + 1;
3628 * if we need windows because we're binding more memory than the
3629 * H/W can transfer at once, the number of windows we would need
3630 * here would be (xfer count / max xfer H/W supports) plus one
3631 * for remainder, and plus 2 to handle the extra pages on the
3632 * trim (see above comment about trim)
3634 if (dmao
->dmao_size
> dma
->dp_maxxfer
) {
3635 maxxfer_win
= (dmao
->dmao_size
/
3636 dma
->dp_maxxfer
) + 1 + 2;
3640 dma
->dp_max_win
= copybuf_win
+ sglwin
+ maxxfer_win
;
3641 ASSERT(dma
->dp_max_win
> 0);
3643 win_state_size
= dma
->dp_max_win
* sizeof (rootnex_window_t
);
3646 * Get space for window and potential copy buffer state. Before we
3647 * go and allocate memory, see if we can get away with using what's
3648 * left in the pre-allocted state or the dynamically allocated sgl.
3650 space_used
= (uintptr_t)(sinfo
->si_sgl_size
*
3651 sizeof (ddi_dma_cookie_t
));
3653 /* if we dynamically allocated space for the cookies */
3654 if (dma
->dp_need_to_free_cookie
) {
3655 /* if we have more space in the pre-allocted buffer, use it */
3656 ASSERT(space_used
<= dma
->dp_cookie_size
);
3657 if ((dma
->dp_cookie_size
- space_used
) <=
3658 rootnex_state
->r_prealloc_size
) {
3659 state_available
= rootnex_state
->r_prealloc_size
;
3660 windowp
= (rootnex_window_t
*)dma
->dp_prealloc_buffer
;
3663 * else, we have more free space in the dynamically allocated
3664 * buffer, i.e. the buffer wasn't worse case fragmented so we
3665 * didn't need a lot of cookies.
3668 state_available
= dma
->dp_cookie_size
- space_used
;
3669 windowp
= (rootnex_window_t
*)
3670 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3673 /* we used the pre-alloced buffer */
3675 ASSERT(space_used
<= rootnex_state
->r_prealloc_size
);
3676 state_available
= rootnex_state
->r_prealloc_size
- space_used
;
3677 windowp
= (rootnex_window_t
*)
3678 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3682 * figure out how much state we need to track the copy buffer. Add an
3683 * addition 8 bytes for pointer alignemnt later.
3685 if (dma
->dp_copybuf_size
> 0) {
3686 copy_state_size
= sinfo
->si_max_pages
*
3687 sizeof (rootnex_pgmap_t
);
3689 copy_state_size
= 0;
3691 /* add an additional 8 bytes for pointer alignment */
3692 space_needed
= win_state_size
+ copy_state_size
+ 0x8;
3694 /* if we have enough space already, use it */
3695 if (state_available
>= space_needed
) {
3696 dma
->dp_window
= windowp
;
3697 dma
->dp_need_to_free_window
= B_FALSE
;
3699 /* not enough space, need to allocate more. */
3701 dma
->dp_window
= kmem_alloc(space_needed
, kmflag
);
3702 if (dma
->dp_window
== NULL
) {
3703 return (DDI_DMA_NORESOURCES
);
3705 dma
->dp_need_to_free_window
= B_TRUE
;
3706 dma
->dp_window_size
= space_needed
;
3707 ROOTNEX_DPROBE2(rootnex__bind__sp__alloc
, dev_info_t
*,
3708 dma
->dp_dip
, size_t, space_needed
);
3712 * we allocate copy buffer state and window state at the same time.
3713 * setup our copy buffer state pointers. Make sure it's aligned.
3715 if (dma
->dp_copybuf_size
> 0) {
3716 dma
->dp_pgmap
= (rootnex_pgmap_t
*)(((uintptr_t)
3717 &dma
->dp_window
[dma
->dp_max_win
] + 0x7) & ~0x7);
3719 #if !defined(__amd64)
3721 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3722 * false/NULL. Should be quicker to bzero vs loop and set.
3724 bzero(dma
->dp_pgmap
, copy_state_size
);
3727 dma
->dp_pgmap
= NULL
;
3730 return (DDI_SUCCESS
);
3735 * rootnex_teardown_copybuf()
3736 * cleans up after rootnex_setup_copybuf()
3739 rootnex_teardown_copybuf(rootnex_dma_t
*dma
)
3741 #if !defined(__amd64)
3745 * if we allocated kernel heap VMEM space, go through all the pages and
3746 * map out any of the ones that we're mapped into the kernel heap VMEM
3747 * arena. Then free the VMEM space.
3749 if (dma
->dp_kva
!= NULL
) {
3750 for (i
= 0; i
< dma
->dp_sglinfo
.si_max_pages
; i
++) {
3751 if (dma
->dp_pgmap
[i
].pm_mapped
) {
3752 hat_unload(kas
.a_hat
, dma
->dp_pgmap
[i
].pm_kaddr
,
3753 MMU_PAGESIZE
, HAT_UNLOAD
);
3754 dma
->dp_pgmap
[i
].pm_mapped
= B_FALSE
;
3758 vmem_free(heap_arena
, dma
->dp_kva
, dma
->dp_copybuf_size
);
3763 /* if we allocated a copy buffer, free it */
3764 if (dma
->dp_cbaddr
!= NULL
) {
3765 i_ddi_mem_free(dma
->dp_cbaddr
, NULL
);
3771 * rootnex_teardown_windows()
3772 * cleans up after rootnex_setup_windows()
3775 rootnex_teardown_windows(rootnex_dma_t
*dma
)
3778 * if we had to allocate window state on the last bind (because we
3779 * didn't have enough pre-allocated space in the handle), free it.
3781 if (dma
->dp_need_to_free_window
) {
3782 kmem_free(dma
->dp_window
, dma
->dp_window_size
);
3788 * rootnex_init_win()
3789 * Called in bind slow path during creation of a new window. Initializes
3790 * window state to default values.
3794 rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3795 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
)
3798 window
->wd_dosync
= B_FALSE
;
3799 window
->wd_offset
= cur_offset
;
3800 window
->wd_size
= 0;
3801 window
->wd_first_cookie
= cookie
;
3802 window
->wd_cookie_cnt
= 0;
3803 window
->wd_trim
.tr_trim_first
= B_FALSE
;
3804 window
->wd_trim
.tr_trim_last
= B_FALSE
;
3805 window
->wd_trim
.tr_first_copybuf_win
= B_FALSE
;
3806 window
->wd_trim
.tr_last_copybuf_win
= B_FALSE
;
3807 #if !defined(__amd64)
3808 window
->wd_remap_copybuf
= dma
->dp_cb_remaping
;
3814 * rootnex_setup_cookie()
3815 * Called in the bind slow path when the sgl uses the copy buffer. If any of
3816 * the sgl uses the copy buffer, we need to go through each cookie, figure
3817 * out if it uses the copy buffer, and if it does, save away everything we'll
3821 rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
, rootnex_dma_t
*dma
,
3822 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
,
3825 boolean_t copybuf_sz_power_2
;
3826 rootnex_sglinfo_t
*sinfo
;
3831 #if defined(__amd64)
3837 ASSERT(dmar_object
->dmao_type
!= DMA_OTYP_DVADDR
);
3839 sinfo
= &dma
->dp_sglinfo
;
3842 * Calculate the page index relative to the start of the buffer. The
3843 * index to the current page for our buffer is the offset into the
3844 * first page of the buffer plus our current offset into the buffer
3845 * itself, shifted of course...
3847 pidx
= (sinfo
->si_buf_offset
+ cur_offset
) >> MMU_PAGESHIFT
;
3848 ASSERT(pidx
< sinfo
->si_max_pages
);
3850 /* if this cookie uses the copy buffer */
3851 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3853 * NOTE: we know that since this cookie uses the copy buffer, it
3854 * is <= MMU_PAGESIZE.
3858 * get the offset into the page. For the 64-bit kernel, get the
3859 * pfn which we'll use with seg kpm.
3861 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
3862 #if defined(__amd64)
3863 /* mfn_to_pfn() is a NOP on i86pc */
3864 pfn
= mfn_to_pfn(cookie
->dmac_laddress
>> MMU_PAGESHIFT
);
3865 #endif /* __amd64 */
3867 /* figure out if the copybuf size is a power of 2 */
3868 if (!ISP2(dma
->dp_copybuf_size
)) {
3869 copybuf_sz_power_2
= B_FALSE
;
3871 copybuf_sz_power_2
= B_TRUE
;
3874 /* This page uses the copy buffer */
3875 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_TRUE
;
3878 * save the copy buffer KVA that we'll use with this page.
3879 * if we still fit within the copybuf, it's a simple add.
3880 * otherwise, we need to wrap over using & or % accordingly.
3882 if ((*copybuf_used
+ MMU_PAGESIZE
) <= dma
->dp_copybuf_size
) {
3883 dma
->dp_pgmap
[pidx
].pm_cbaddr
= dma
->dp_cbaddr
+
3886 if (copybuf_sz_power_2
) {
3887 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3888 (uintptr_t)dma
->dp_cbaddr
+
3890 (dma
->dp_copybuf_size
- 1)));
3892 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3893 (uintptr_t)dma
->dp_cbaddr
+
3894 (*copybuf_used
% dma
->dp_copybuf_size
));
3899 * over write the cookie physical address with the address of
3900 * the physical address of the copy buffer page that we will
3903 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
3904 dma
->dp_pgmap
[pidx
].pm_cbaddr
)) + poff
;
3906 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3908 /* if we have a kernel VA, it's easy, just save that address */
3909 if ((dmar_object
->dmao_type
!= DMA_OTYP_PAGES
) &&
3910 (sinfo
->si_asp
== &kas
)) {
3912 * save away the page aligned virtual address of the
3913 * driver buffer. Offsets are handled in the sync code.
3915 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)(((uintptr_t)
3916 dmar_object
->dmao_obj
.virt_obj
.v_addr
+ cur_offset
)
3918 #if !defined(__amd64)
3920 * we didn't need to, and will never need to map this
3923 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
3926 /* we don't have a kernel VA. We need one for the bcopy. */
3928 #if defined(__amd64)
3930 * for the 64-bit kernel, it's easy. We use seg kpm to
3931 * get a Kernel VA for the corresponding pfn.
3933 dma
->dp_pgmap
[pidx
].pm_kaddr
= hat_kpm_pfn2va(pfn
);
3936 * for the 32-bit kernel, this is a pain. First we'll
3937 * save away the page_t or user VA for this page. This
3938 * is needed in rootnex_dma_win() when we switch to a
3939 * new window which requires us to re-map the copy
3942 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
3943 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
3944 dma
->dp_pgmap
[pidx
].pm_pp
= *cur_pp
;
3945 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3946 } else if (pplist
!= NULL
) {
3947 dma
->dp_pgmap
[pidx
].pm_pp
= pplist
[pidx
];
3948 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3950 dma
->dp_pgmap
[pidx
].pm_pp
= NULL
;
3951 dma
->dp_pgmap
[pidx
].pm_vaddr
= (caddr_t
)
3953 dmar_object
->dmao_obj
.virt_obj
.v_addr
+
3954 cur_offset
) & MMU_PAGEMASK
);
3958 * save away the page aligned virtual address which was
3959 * allocated from the kernel heap arena (taking into
3960 * account if we need more copy buffer than we alloced
3961 * and use multiple windows to handle this, i.e. &,%).
3962 * NOTE: there isn't and physical memory backing up this
3963 * virtual address space currently.
3965 if ((*copybuf_used
+ MMU_PAGESIZE
) <=
3966 dma
->dp_copybuf_size
) {
3967 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3968 (((uintptr_t)dma
->dp_kva
+ *copybuf_used
) &
3971 if (copybuf_sz_power_2
) {
3972 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3973 (((uintptr_t)dma
->dp_kva
+
3975 (dma
->dp_copybuf_size
- 1))) &
3978 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3979 (((uintptr_t)dma
->dp_kva
+
3981 dma
->dp_copybuf_size
)) &
3987 * if we haven't used up the available copy buffer yet,
3988 * map the kva to the physical page.
3990 if (!dma
->dp_cb_remaping
&& ((*copybuf_used
+
3991 MMU_PAGESIZE
) <= dma
->dp_copybuf_size
)) {
3992 dma
->dp_pgmap
[pidx
].pm_mapped
= B_TRUE
;
3993 if (dma
->dp_pgmap
[pidx
].pm_pp
!= NULL
) {
3994 i86_pp_map(dma
->dp_pgmap
[pidx
].pm_pp
,
3995 dma
->dp_pgmap
[pidx
].pm_kaddr
);
3997 i86_va_map(dma
->dp_pgmap
[pidx
].pm_vaddr
,
3999 dma
->dp_pgmap
[pidx
].pm_kaddr
);
4003 * we've used up the available copy buffer, this page
4004 * will have to be mapped during rootnex_dma_win() when
4005 * we switch to a new window which requires a re-map
4006 * the copy buffer. (32-bit kernel only)
4009 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4012 /* go to the next page_t */
4013 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4014 *cur_pp
= (*cur_pp
)->p_next
;
4018 /* add to the copy buffer count */
4019 *copybuf_used
+= MMU_PAGESIZE
;
4022 * This cookie doesn't use the copy buffer. Walk through the pages this
4023 * cookie occupies to reflect this.
4027 * figure out how many pages the cookie occupies. We need to
4028 * use the original page offset of the buffer and the cookies
4029 * offset in the buffer to do this.
4031 poff
= (sinfo
->si_buf_offset
+ cur_offset
) & MMU_PAGEOFFSET
;
4032 pcnt
= mmu_btopr(cookie
->dmac_size
+ poff
);
4035 #if !defined(__amd64)
4037 * the 32-bit kernel doesn't have seg kpm, so we need
4038 * to map in the driver buffer (if it didn't come down
4039 * with a kernel VA) on the fly. Since this page doesn't
4040 * use the copy buffer, it's not, or will it ever, have
4043 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4045 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_FALSE
;
4048 * we need to update pidx and cur_pp or we'll loose
4049 * track of where we are.
4051 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4052 *cur_pp
= (*cur_pp
)->p_next
;
4062 * rootnex_sgllen_window_boundary()
4063 * Called in the bind slow path when the next cookie causes us to exceed (in
4064 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4065 * length supported by the DMA H/W.
4068 rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4069 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, ddi_dma_attr_t
*attr
,
4078 * if we know we'll never have to trim, it's pretty easy. Just move to
4079 * the next window and init it. We're done.
4081 if (!dma
->dp_trim_required
) {
4083 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4084 (*windowp
)->wd_cookie_cnt
++;
4085 (*windowp
)->wd_size
= cookie
->dmac_size
;
4086 return (DDI_SUCCESS
);
4089 /* figure out how much we need to trim from the window */
4090 ASSERT(attr
->dma_attr_granular
!= 0);
4091 if (dma
->dp_granularity_power_2
) {
4092 trim_sz
= (*windowp
)->wd_size
& (attr
->dma_attr_granular
- 1);
4094 trim_sz
= (*windowp
)->wd_size
% attr
->dma_attr_granular
;
4097 /* The window's a whole multiple of granularity. We're done */
4100 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4101 (*windowp
)->wd_cookie_cnt
++;
4102 (*windowp
)->wd_size
= cookie
->dmac_size
;
4103 return (DDI_SUCCESS
);
4107 * The window's not a whole multiple of granularity, since we know this
4108 * is due to the sgllen, we need to go back to the last cookie and trim
4109 * that one, add the left over part of the old cookie into the new
4110 * window, and then add in the new cookie into the new window.
4114 * make sure the driver isn't making us do something bad... Trimming and
4115 * sgllen == 1 don't go together.
4117 if (attr
->dma_attr_sgllen
== 1) {
4118 return (DDI_DMA_NOMAPPING
);
4122 * first, setup the current window to account for the trim. Need to go
4123 * back to the last cookie for this.
4126 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4127 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4128 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4129 ASSERT(cookie
->dmac_size
> trim_sz
);
4130 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4131 (*windowp
)->wd_size
-= trim_sz
;
4133 /* save the buffer offsets for the next window */
4134 coffset
= cookie
->dmac_size
- trim_sz
;
4135 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4138 * set this now in case this is the first window. all other cases are
4141 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4144 * initialize the next window using what's left over in the previous
4148 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4149 (*windowp
)->wd_cookie_cnt
++;
4150 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4151 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4152 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4153 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4154 (*windowp
)->wd_dosync
= B_TRUE
;
4158 * now go back to the current cookie and add it to the new window. set
4159 * the new window size to the what was left over from the previous
4160 * cookie and what's in the current cookie.
4163 (*windowp
)->wd_cookie_cnt
++;
4164 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4167 * trim plus the next cookie could put us over maxxfer (a cookie can be
4168 * a max size of maxxfer). Handle that case.
4170 if ((*windowp
)->wd_size
> dma
->dp_maxxfer
) {
4172 * maxxfer is already a whole multiple of granularity, and this
4173 * trim will be <= the previous trim (since a cookie can't be
4174 * larger than maxxfer). Make things simple here.
4176 trim_sz
= (*windowp
)->wd_size
- dma
->dp_maxxfer
;
4177 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4178 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4179 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4180 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4181 (*windowp
)->wd_size
-= trim_sz
;
4182 ASSERT((*windowp
)->wd_size
== dma
->dp_maxxfer
);
4184 /* save the buffer offsets for the next window */
4185 coffset
= cookie
->dmac_size
- trim_sz
;
4186 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4188 /* setup the next window */
4190 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4191 (*windowp
)->wd_cookie_cnt
++;
4192 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4193 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4195 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4198 return (DDI_SUCCESS
);
4203 * rootnex_copybuf_window_boundary()
4204 * Called in bind slowpath when we get to a window boundary because we used
4205 * up all the copy buffer that we have.
4208 rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4209 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
4210 size_t *copybuf_used
)
4212 rootnex_sglinfo_t
*sinfo
;
4221 sinfo
= &dma
->dp_sglinfo
;
4224 * the copy buffer should be a whole multiple of page size. We know that
4225 * this cookie is <= MMU_PAGESIZE.
4227 ASSERT(cookie
->dmac_size
<= MMU_PAGESIZE
);
4230 * from now on, all new windows in this bind need to be re-mapped during
4231 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4234 #if !defined(__amd64)
4235 dma
->dp_cb_remaping
= B_TRUE
;
4238 /* reset copybuf used */
4242 * if we don't have to trim (since granularity is set to 1), go to the
4243 * next window and add the current cookie to it. We know the current
4244 * cookie uses the copy buffer since we're in this code path.
4246 if (!dma
->dp_trim_required
) {
4248 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4250 /* Add this cookie to the new window */
4251 (*windowp
)->wd_cookie_cnt
++;
4252 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4253 *copybuf_used
+= MMU_PAGESIZE
;
4254 return (DDI_SUCCESS
);
4258 * *** may need to trim, figure it out.
4261 /* figure out how much we need to trim from the window */
4262 if (dma
->dp_granularity_power_2
) {
4263 trim_sz
= (*windowp
)->wd_size
&
4264 (hp
->dmai_attr
.dma_attr_granular
- 1);
4266 trim_sz
= (*windowp
)->wd_size
% hp
->dmai_attr
.dma_attr_granular
;
4270 * if the window's a whole multiple of granularity, go to the next
4271 * window, init it, then add in the current cookie. We know the current
4272 * cookie uses the copy buffer since we're in this code path.
4276 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4278 /* Add this cookie to the new window */
4279 (*windowp
)->wd_cookie_cnt
++;
4280 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4281 *copybuf_used
+= MMU_PAGESIZE
;
4282 return (DDI_SUCCESS
);
4286 * *** We figured it out, we definitly need to trim
4290 * make sure the driver isn't making us do something bad...
4291 * Trimming and sgllen == 1 don't go together.
4293 if (hp
->dmai_attr
.dma_attr_sgllen
== 1) {
4294 return (DDI_DMA_NOMAPPING
);
4298 * first, setup the current window to account for the trim. Need to go
4299 * back to the last cookie for this. Some of the last cookie will be in
4300 * the current window, and some of the last cookie will be in the new
4301 * window. All of the current cookie will be in the new window.
4304 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4305 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4306 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4307 ASSERT(cookie
->dmac_size
> trim_sz
);
4308 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4309 (*windowp
)->wd_size
-= trim_sz
;
4312 * we're trimming the last cookie (not the current cookie). So that
4313 * last cookie may have or may not have been using the copy buffer (
4314 * we know the cookie passed in uses the copy buffer since we're in
4317 * If the last cookie doesn't use the copy buffer, nothing special to
4318 * do. However, if it does uses the copy buffer, it will be both the
4319 * last page in the current window and the first page in the next
4320 * window. Since we are reusing the copy buffer (and KVA space on the
4321 * 32-bit kernel), this page will use the end of the copy buffer in the
4322 * current window, and the start of the copy buffer in the next window.
4323 * Track that info... The cookie physical address was already set to
4324 * the copy buffer physical address in setup_cookie..
4326 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4327 pidx
= (sinfo
->si_buf_offset
+ (*windowp
)->wd_offset
+
4328 (*windowp
)->wd_size
) >> MMU_PAGESHIFT
;
4329 (*windowp
)->wd_trim
.tr_last_copybuf_win
= B_TRUE
;
4330 (*windowp
)->wd_trim
.tr_last_pidx
= pidx
;
4331 (*windowp
)->wd_trim
.tr_last_cbaddr
=
4332 dma
->dp_pgmap
[pidx
].pm_cbaddr
;
4333 #if !defined(__amd64)
4334 (*windowp
)->wd_trim
.tr_last_kaddr
=
4335 dma
->dp_pgmap
[pidx
].pm_kaddr
;
4339 /* save the buffer offsets for the next window */
4340 coffset
= cookie
->dmac_size
- trim_sz
;
4341 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4344 * set this now in case this is the first window. all other cases are
4347 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4350 * initialize the next window using what's left over in the previous
4354 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4355 (*windowp
)->wd_cookie_cnt
++;
4356 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4357 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4358 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4361 * again, we're tracking if the last cookie uses the copy buffer.
4362 * read the comment above for more info on why we need to track
4365 * For the first cookie in the new window, we need reset the physical
4366 * address to DMA into to the start of the copy buffer plus any
4367 * initial page offset which may be present.
4369 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4370 (*windowp
)->wd_dosync
= B_TRUE
;
4371 (*windowp
)->wd_trim
.tr_first_copybuf_win
= B_TRUE
;
4372 (*windowp
)->wd_trim
.tr_first_pidx
= pidx
;
4373 (*windowp
)->wd_trim
.tr_first_cbaddr
= dma
->dp_cbaddr
;
4374 poff
= (*windowp
)->wd_trim
.tr_first_paddr
& MMU_PAGEOFFSET
;
4376 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
, dma
->dp_cbaddr
)) +
4378 (*windowp
)->wd_trim
.tr_first_paddr
=
4379 ROOTNEX_PADDR_TO_RBASE(paddr
);
4381 #if !defined(__amd64)
4382 (*windowp
)->wd_trim
.tr_first_kaddr
= dma
->dp_kva
;
4384 /* account for the cookie copybuf usage in the new window */
4385 *copybuf_used
+= MMU_PAGESIZE
;
4388 * every piece of code has to have a hack, and here is this
4391 * There is a complex interaction between setup_cookie and the
4392 * copybuf window boundary. The complexity had to be in either
4393 * the maxxfer window, or the copybuf window, and I chose the
4396 * So in this code path, we have taken the last cookie,
4397 * virtually broken it in half due to the trim, and it happens
4398 * to use the copybuf which further complicates life. At the
4399 * same time, we have already setup the current cookie, which
4400 * is now wrong. More background info: the current cookie uses
4401 * the copybuf, so it is only a page long max. So we need to
4402 * fix the current cookies copy buffer address, physical
4403 * address, and kva for the 32-bit kernel. We due this by
4404 * bumping them by page size (of course, we can't due this on
4405 * the physical address since the copy buffer may not be
4406 * physically contiguous).
4409 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
+= MMU_PAGESIZE
;
4410 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
4412 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
4413 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
)) + poff
;
4414 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
4416 #if !defined(__amd64)
4417 ASSERT(dma
->dp_pgmap
[pidx
+ 1].pm_mapped
== B_FALSE
);
4418 dma
->dp_pgmap
[pidx
+ 1].pm_kaddr
+= MMU_PAGESIZE
;
4421 /* go back to the current cookie */
4426 * add the current cookie to the new window. set the new window size to
4427 * the what was left over from the previous cookie and what's in the
4430 (*windowp
)->wd_cookie_cnt
++;
4431 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4432 ASSERT((*windowp
)->wd_size
< dma
->dp_maxxfer
);
4435 * we know that the cookie passed in always uses the copy buffer. We
4436 * wouldn't be here if it didn't.
4438 *copybuf_used
+= MMU_PAGESIZE
;
4440 return (DDI_SUCCESS
);
4445 * rootnex_maxxfer_window_boundary()
4446 * Called in bind slowpath when we get to a window boundary because we will
4450 rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4451 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
)
4460 * calculate how much we have to trim off of the current cookie to equal
4461 * maxxfer. We don't have to account for granularity here since our
4462 * maxxfer already takes that into account.
4464 trim_sz
= ((*windowp
)->wd_size
+ cookie
->dmac_size
) - dma
->dp_maxxfer
;
4465 ASSERT(trim_sz
<= cookie
->dmac_size
);
4466 ASSERT(trim_sz
<= dma
->dp_maxxfer
);
4468 /* save cookie size since we need it later and we might change it */
4469 dmac_size
= cookie
->dmac_size
;
4472 * if we're not trimming the entire cookie, setup the current window to
4473 * account for the trim.
4475 if (trim_sz
< cookie
->dmac_size
) {
4476 (*windowp
)->wd_cookie_cnt
++;
4477 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4478 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4479 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4480 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4481 (*windowp
)->wd_size
= dma
->dp_maxxfer
;
4484 * set the adjusted cookie size now in case this is the first
4485 * window. All other windows are taken care of in get win
4487 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4491 * coffset is the current offset within the cookie, new_offset is the
4492 * current offset with the entire buffer.
4494 coffset
= dmac_size
- trim_sz
;
4495 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4497 /* initialize the next window */
4499 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4500 (*windowp
)->wd_cookie_cnt
++;
4501 (*windowp
)->wd_size
= trim_sz
;
4502 if (trim_sz
< dmac_size
) {
4503 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4504 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4506 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4509 return (DDI_SUCCESS
);
4515 rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4516 off_t off
, size_t len
, uint_t cache_flags
)
4518 rootnex_sglinfo_t
*sinfo
;
4519 rootnex_pgmap_t
*cbpage
;
4520 rootnex_window_t
*win
;
4533 hp
= (ddi_dma_impl_t
*)handle
;
4534 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4535 sinfo
= &dma
->dp_sglinfo
;
4538 * if we don't have any windows, we don't need to sync. A copybuf
4539 * will cause us to have at least one window.
4541 if (dma
->dp_window
== NULL
) {
4542 return (DDI_SUCCESS
);
4545 /* This window may not need to be sync'd */
4546 win
= &dma
->dp_window
[dma
->dp_current_win
];
4547 if (!win
->wd_dosync
) {
4548 return (DDI_SUCCESS
);
4551 /* handle off and len special cases */
4552 if ((off
== 0) || (rootnex_sync_ignore_params
)) {
4553 offset
= win
->wd_offset
;
4557 if ((len
== 0) || (rootnex_sync_ignore_params
)) {
4558 size
= win
->wd_size
;
4563 /* check the sync args to make sure they make a little sense */
4564 if (rootnex_sync_check_parms
) {
4565 e
= rootnex_valid_sync_parms(hp
, win
, offset
, size
,
4567 if (e
!= DDI_SUCCESS
) {
4568 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_SYNC_FAIL
]);
4569 return (DDI_FAILURE
);
4574 * special case the first page to handle the offset into the page. The
4575 * offset to the current page for our buffer is the offset into the
4576 * first page of the buffer plus our current offset into the buffer
4577 * itself, masked of course.
4579 poff
= (sinfo
->si_buf_offset
+ offset
) & MMU_PAGEOFFSET
;
4580 psize
= MIN((MMU_PAGESIZE
- poff
), size
);
4582 /* go through all the pages that we want to sync */
4585 * Calculate the page index relative to the start of the buffer.
4586 * The index to the current page for our buffer is the offset
4587 * into the first page of the buffer plus our current offset
4588 * into the buffer itself, shifted of course...
4590 pidx
= (sinfo
->si_buf_offset
+ offset
) >> MMU_PAGESHIFT
;
4591 ASSERT(pidx
< sinfo
->si_max_pages
);
4594 * if this page uses the copy buffer, we need to sync it,
4595 * otherwise, go on to the next page.
4597 cbpage
= &dma
->dp_pgmap
[pidx
];
4598 ASSERT((cbpage
->pm_uses_copybuf
== B_TRUE
) ||
4599 (cbpage
->pm_uses_copybuf
== B_FALSE
));
4600 if (cbpage
->pm_uses_copybuf
) {
4601 /* cbaddr and kaddr should be page aligned */
4602 ASSERT(((uintptr_t)cbpage
->pm_cbaddr
&
4603 MMU_PAGEOFFSET
) == 0);
4604 ASSERT(((uintptr_t)cbpage
->pm_kaddr
&
4605 MMU_PAGEOFFSET
) == 0);
4608 * if we're copying for the device, we are going to
4609 * copy from the drivers buffer and to the rootnex
4610 * allocated copy buffer.
4612 if (cache_flags
== DDI_DMA_SYNC_FORDEV
) {
4613 fromaddr
= cbpage
->pm_kaddr
+ poff
;
4614 toaddr
= cbpage
->pm_cbaddr
+ poff
;
4615 ROOTNEX_DPROBE2(rootnex__sync__dev
,
4616 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4619 * if we're copying for the cpu/kernel, we are going to
4620 * copy from the rootnex allocated copy buffer to the
4624 fromaddr
= cbpage
->pm_cbaddr
+ poff
;
4625 toaddr
= cbpage
->pm_kaddr
+ poff
;
4626 ROOTNEX_DPROBE2(rootnex__sync__cpu
,
4627 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4630 bcopy(fromaddr
, toaddr
, psize
);
4634 * decrement size until we're done, update our offset into the
4635 * buffer, and get the next page size.
4639 psize
= MIN(MMU_PAGESIZE
, size
);
4641 /* page offset is zero for the rest of this loop */
4645 return (DDI_SUCCESS
);
4649 * rootnex_dma_sync()
4650 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4651 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4652 * is set, ddi_dma_sync() returns immediately passing back success.
4656 rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4657 off_t off
, size_t len
, uint_t cache_flags
)
4659 #if defined(__amd64) && !defined(__xpv)
4660 if (IOMMU_USED(rdip
)) {
4661 return (iommulib_nexdma_sync(dip
, rdip
, handle
, off
, len
,
4665 return (rootnex_coredma_sync(dip
, rdip
, handle
, off
, len
,
4670 * rootnex_valid_sync_parms()
4671 * checks the parameters passed to sync to verify they are correct.
4674 rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
4675 off_t offset
, size_t size
, uint_t cache_flags
)
4681 * the first part of the test to make sure the offset passed in is
4682 * within the window.
4684 if (offset
< win
->wd_offset
) {
4685 return (DDI_FAILURE
);
4689 * second and last part of the test to make sure the offset and length
4690 * passed in is within the window.
4692 woffset
= offset
- win
->wd_offset
;
4693 if ((woffset
+ size
) > win
->wd_size
) {
4694 return (DDI_FAILURE
);
4698 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4701 if ((cache_flags
== DDI_DMA_SYNC_FORDEV
) &&
4702 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4703 return (DDI_SUCCESS
);
4707 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4708 * should be set. Also DDI_DMA_READ should be set in the flags.
4710 if (((cache_flags
== DDI_DMA_SYNC_FORCPU
) ||
4711 (cache_flags
== DDI_DMA_SYNC_FORKERNEL
)) &&
4712 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4713 return (DDI_SUCCESS
);
4716 return (DDI_FAILURE
);
4722 rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4723 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4726 rootnex_window_t
*window
;
4727 rootnex_trim_t
*trim
;
4730 ddi_dma_obj_t
*dmao
;
4731 #if !defined(__amd64)
4732 rootnex_sglinfo_t
*sinfo
;
4733 rootnex_pgmap_t
*pmap
;
4741 hp
= (ddi_dma_impl_t
*)handle
;
4742 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4743 #if !defined(__amd64)
4744 sinfo
= &dma
->dp_sglinfo
;
4747 /* If we try and get a window which doesn't exist, return failure */
4748 if (win
>= hp
->dmai_nwin
) {
4749 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4750 return (DDI_FAILURE
);
4753 dmao
= dma
->dp_dvma_used
? &dma
->dp_dvma
: &dma
->dp_dma
;
4756 * if we don't have any windows, and they're asking for the first
4757 * window, setup the cookie pointer to the first cookie in the bind.
4758 * setup our return values, then increment the cookie since we return
4759 * the first cookie on the stack.
4761 if (dma
->dp_window
== NULL
) {
4764 &rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4765 return (DDI_FAILURE
);
4767 hp
->dmai_cookie
= dma
->dp_cookies
;
4769 *lenp
= dmao
->dmao_size
;
4770 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
4771 *cookiep
= hp
->dmai_cookie
[0];
4773 return (DDI_SUCCESS
);
4776 /* sync the old window before moving on to the new one */
4777 window
= &dma
->dp_window
[dma
->dp_current_win
];
4778 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4779 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4780 DDI_DMA_SYNC_FORCPU
);
4783 #if !defined(__amd64)
4785 * before we move to the next window, if we need to re-map, unmap all
4786 * the pages in this window.
4788 if (dma
->dp_cb_remaping
) {
4790 * If we switch to this window again, we'll need to map in
4791 * on the fly next time.
4793 window
->wd_remap_copybuf
= B_TRUE
;
4796 * calculate the page index into the buffer where this window
4797 * starts, and the number of pages this window takes up.
4799 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4801 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4803 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4804 ASSERT((pidx
+ pcnt
) <= sinfo
->si_max_pages
);
4806 /* unmap pages which are currently mapped in this window */
4807 for (i
= 0; i
< pcnt
; i
++) {
4808 if (dma
->dp_pgmap
[pidx
].pm_mapped
) {
4809 hat_unload(kas
.a_hat
,
4810 dma
->dp_pgmap
[pidx
].pm_kaddr
, MMU_PAGESIZE
,
4812 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4820 * Move to the new window.
4821 * NOTE: current_win must be set for sync to work right
4823 dma
->dp_current_win
= win
;
4824 window
= &dma
->dp_window
[win
];
4826 /* if needed, adjust the first and/or last cookies for trim */
4827 trim
= &window
->wd_trim
;
4828 if (trim
->tr_trim_first
) {
4829 window
->wd_first_cookie
->dmac_laddress
= trim
->tr_first_paddr
;
4830 window
->wd_first_cookie
->dmac_size
= trim
->tr_first_size
;
4831 #if !defined(__amd64)
4832 window
->wd_first_cookie
->dmac_type
=
4833 (window
->wd_first_cookie
->dmac_type
&
4834 ROOTNEX_USES_COPYBUF
) + window
->wd_offset
;
4836 if (trim
->tr_first_copybuf_win
) {
4837 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_cbaddr
=
4838 trim
->tr_first_cbaddr
;
4839 #if !defined(__amd64)
4840 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_kaddr
=
4841 trim
->tr_first_kaddr
;
4845 if (trim
->tr_trim_last
) {
4846 trim
->tr_last_cookie
->dmac_laddress
= trim
->tr_last_paddr
;
4847 trim
->tr_last_cookie
->dmac_size
= trim
->tr_last_size
;
4848 if (trim
->tr_last_copybuf_win
) {
4849 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_cbaddr
=
4850 trim
->tr_last_cbaddr
;
4851 #if !defined(__amd64)
4852 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_kaddr
=
4853 trim
->tr_last_kaddr
;
4859 * setup the cookie pointer to the first cookie in the window. setup
4860 * our return values, then increment the cookie since we return the
4861 * first cookie on the stack.
4863 hp
->dmai_cookie
= window
->wd_first_cookie
;
4864 *offp
= window
->wd_offset
;
4865 *lenp
= window
->wd_size
;
4866 *ccountp
= window
->wd_cookie_cnt
;
4867 *cookiep
= hp
->dmai_cookie
[0];
4870 #if !defined(__amd64)
4871 /* re-map copybuf if required for this window */
4872 if (dma
->dp_cb_remaping
) {
4874 * calculate the page index into the buffer where this
4877 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4879 ASSERT(pidx
< sinfo
->si_max_pages
);
4882 * the first page can get unmapped if it's shared with the
4883 * previous window. Even if the rest of this window is already
4884 * mapped in, we need to still check this one.
4886 pmap
= &dma
->dp_pgmap
[pidx
];
4887 if ((pmap
->pm_uses_copybuf
) && (pmap
->pm_mapped
== B_FALSE
)) {
4888 if (pmap
->pm_pp
!= NULL
) {
4889 pmap
->pm_mapped
= B_TRUE
;
4890 i86_pp_map(pmap
->pm_pp
, pmap
->pm_kaddr
);
4891 } else if (pmap
->pm_vaddr
!= NULL
) {
4892 pmap
->pm_mapped
= B_TRUE
;
4893 i86_va_map(pmap
->pm_vaddr
, sinfo
->si_asp
,
4899 /* map in the rest of the pages if required */
4900 if (window
->wd_remap_copybuf
) {
4901 window
->wd_remap_copybuf
= B_FALSE
;
4903 /* figure out many pages this window takes up */
4904 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4906 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4907 ASSERT(((pidx
- 1) + pcnt
) <= sinfo
->si_max_pages
);
4909 /* map pages which require it */
4910 for (i
= 1; i
< pcnt
; i
++) {
4911 pmap
= &dma
->dp_pgmap
[pidx
];
4912 if (pmap
->pm_uses_copybuf
) {
4913 ASSERT(pmap
->pm_mapped
== B_FALSE
);
4914 if (pmap
->pm_pp
!= NULL
) {
4915 pmap
->pm_mapped
= B_TRUE
;
4916 i86_pp_map(pmap
->pm_pp
,
4918 } else if (pmap
->pm_vaddr
!= NULL
) {
4919 pmap
->pm_mapped
= B_TRUE
;
4920 i86_va_map(pmap
->pm_vaddr
,
4931 /* if the new window uses the copy buffer, sync it for the device */
4932 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4933 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4934 DDI_DMA_SYNC_FORDEV
);
4937 return (DDI_SUCCESS
);
4942 * called from ddi_dma_getwin()
4946 rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4947 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4950 #if defined(__amd64) && !defined(__xpv)
4951 if (IOMMU_USED(rdip
)) {
4952 return (iommulib_nexdma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4957 return (rootnex_coredma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4961 #if defined(__amd64) && !defined(__xpv)
4964 rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4965 ddi_dma_handle_t handle
, void *v
)
4970 hp
= (ddi_dma_impl_t
*)handle
;
4971 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4972 dma
->dp_iommu_private
= v
;
4974 return (DDI_SUCCESS
);
4979 rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4980 ddi_dma_handle_t handle
)
4985 hp
= (ddi_dma_impl_t
*)handle
;
4986 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4988 return (dma
->dp_iommu_private
);
4993 * ************************
4994 * obsoleted dma routines
4995 * ************************
4999 * rootnex_dma_mctl()
5001 * We don't support this legacy interface any more on x86.
5005 rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
5006 enum ddi_dma_ctlops request
, off_t
*offp
, size_t *lenp
, caddr_t
*objpp
,
5010 * The only thing dma_mctl is usef for anymore is legacy SPARC
5011 * dvma and sbus-specific routines.
5013 return (DDI_FAILURE
);
5028 rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
5029 ddi_iblock_cookie_t
*ibc
)
5031 *ibc
= rootnex_state
->r_err_ibc
;
5033 return (ddi_system_fmcap
);
5037 * rootnex_dma_check()
5038 * Function called after a dma fault occurred to find out whether the
5039 * fault address is associated with a driver that is able to handle faults
5040 * and recover from faults.
5044 rootnex_dma_check(dev_info_t
*dip
, const void *handle
, const void *addr
,
5045 const void *not_used
)
5047 rootnex_window_t
*window
;
5048 uint64_t start_addr
;
5049 uint64_t fault_addr
;
5058 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5059 hp
= (ddi_dma_impl_t
*)handle
;
5062 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5064 /* Get the address that we need to search for */
5065 fault_addr
= *(uint64_t *)addr
;
5068 * if we don't have any windows, we can just walk through all the
5071 if (dma
->dp_window
== NULL
) {
5072 /* for each cookie */
5073 for (i
= 0; i
< dma
->dp_sglinfo
.si_sgl_size
; i
++) {
5075 * if the faulted address is within the physical address
5076 * range of the cookie, return DDI_FM_NONFATAL.
5078 if ((fault_addr
>= dma
->dp_cookies
[i
].dmac_laddress
) &&
5079 (fault_addr
<= (dma
->dp_cookies
[i
].dmac_laddress
+
5080 dma
->dp_cookies
[i
].dmac_size
))) {
5081 return (DDI_FM_NONFATAL
);
5085 /* fault_addr not within this DMA handle */
5086 return (DDI_FM_UNKNOWN
);
5089 /* we have mutiple windows, walk through each window */
5090 for (i
= 0; i
< hp
->dmai_nwin
; i
++) {
5091 window
= &dma
->dp_window
[i
];
5093 /* Go through all the cookies in the window */
5094 for (j
= 0; j
< window
->wd_cookie_cnt
; j
++) {
5096 start_addr
= window
->wd_first_cookie
[j
].dmac_laddress
;
5097 csize
= window
->wd_first_cookie
[j
].dmac_size
;
5100 * if we are trimming the first cookie in the window,
5101 * and this is the first cookie, adjust the start
5102 * address and size of the cookie to account for the
5105 if (window
->wd_trim
.tr_trim_first
&& (j
== 0)) {
5106 start_addr
= window
->wd_trim
.tr_first_paddr
;
5107 csize
= window
->wd_trim
.tr_first_size
;
5111 * if we are trimming the last cookie in the window,
5112 * and this is the last cookie, adjust the start
5113 * address and size of the cookie to account for the
5116 if (window
->wd_trim
.tr_trim_last
&&
5117 (j
== (window
->wd_cookie_cnt
- 1))) {
5118 start_addr
= window
->wd_trim
.tr_last_paddr
;
5119 csize
= window
->wd_trim
.tr_last_size
;
5122 end_addr
= start_addr
+ csize
;
5125 * if the faulted address is within the physical
5126 * address of the cookie, return DDI_FM_NONFATAL.
5128 if ((fault_addr
>= start_addr
) &&
5129 (fault_addr
<= end_addr
)) {
5130 return (DDI_FM_NONFATAL
);
5135 /* fault_addr not within this DMA handle */
5136 return (DDI_FM_UNKNOWN
);
5141 rootnex_quiesce(dev_info_t
*dip
)
5143 #if defined(__amd64) && !defined(__xpv)
5144 return (immu_quiesce());
5146 return (DDI_SUCCESS
);
5164 immu_physmem_update(uint64_t addr
, uint64_t size
)