4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
31 * x86 root nexus driver
34 #include <sys/sysmacros.h>
36 #include <sys/autoconf.h>
37 #include <sys/sysmacros.h>
38 #include <sys/debug.h>
40 #include <sys/ddidmareq.h>
41 #include <sys/promif.h>
42 #include <sys/devops.h>
44 #include <sys/cmn_err.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_dev.h>
53 #include <sys/avintr.h>
54 #include <sys/errno.h>
55 #include <sys/modctl.h>
56 #include <sys/ddi_impldefs.h>
57 #include <sys/sunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/mach_intr.h>
61 #include <sys/ontrap.h>
62 #include <sys/atomic.h>
64 #include <sys/rootnex.h>
65 #include <vm/hat_i86.h>
66 #include <sys/ddifm.h>
67 #include <sys/ddi_isa.h>
71 #if defined(__amd64) && !defined(__xpv)
77 * enable/disable extra checking of function parameters. Useful for debugging
81 int rootnex_alloc_check_parms
= 1;
82 int rootnex_bind_check_parms
= 1;
83 int rootnex_bind_check_inuse
= 1;
84 int rootnex_unbind_verify_buffer
= 0;
85 int rootnex_sync_check_parms
= 1;
87 int rootnex_alloc_check_parms
= 0;
88 int rootnex_bind_check_parms
= 0;
89 int rootnex_bind_check_inuse
= 0;
90 int rootnex_unbind_verify_buffer
= 0;
91 int rootnex_sync_check_parms
= 0;
94 boolean_t rootnex_dmar_not_setup
;
96 /* Master Abort and Target Abort panic flag */
97 int rootnex_fm_ma_ta_panic_flag
= 0;
99 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
100 int rootnex_bind_fail
= 1;
101 int rootnex_bind_warn
= 1;
102 uint8_t *rootnex_warn_list
;
103 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
104 #define ROOTNEX_BIND_WARNING (0x1 << 0)
107 * revert back to old broken behavior of always sync'ing entire copy buffer.
108 * This is useful if be have a buggy driver which doesn't correctly pass in
109 * the offset and size into ddi_dma_sync().
111 int rootnex_sync_ignore_params
= 0;
114 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
115 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
116 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
117 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
118 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
119 * (< 8K). We will still need to allocate the copy buffer during bind though
120 * (if we need one). These can only be modified in /etc/system before rootnex
124 int rootnex_prealloc_cookies
= 65;
125 int rootnex_prealloc_windows
= 4;
126 int rootnex_prealloc_copybuf
= 2;
128 int rootnex_prealloc_cookies
= 33;
129 int rootnex_prealloc_windows
= 4;
130 int rootnex_prealloc_copybuf
= 2;
133 /* driver global state */
134 static rootnex_state_t
*rootnex_state
;
137 /* shortcut to rootnex counters */
138 static uint64_t *rootnex_cnt
;
142 * XXX - does x86 even need these or are they left over from the SPARC days?
144 /* statically defined integer/boolean properties for the root node */
145 static rootnex_intprop_t rootnex_intprp
[] = {
146 { "PAGESIZE", PAGESIZE
},
147 { "MMU_PAGESIZE", MMU_PAGESIZE
},
148 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET
},
149 { DDI_RELATIVE_ADDRESSING
, 1 },
151 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
154 * If we're dom0, we're using a real device so we need to load
155 * the cookies with MFNs instead of PFNs.
157 typedef paddr_t rootnex_addr_t
;
158 #define ROOTNEX_PADDR_TO_RBASE(pa) (pa)
160 static struct cb_ops rootnex_cb_ops
= {
163 nodev
, /* strategy */
172 nochpoll
, /* chpoll */
173 ddi_prop_op
, /* cb_prop_op */
174 NULL
, /* struct streamtab */
175 D_NEW
| D_MP
| D_HOTPLUG
, /* compatibility flags */
177 nodev
, /* cb_aread */
178 nodev
/* cb_awrite */
181 static int rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
182 off_t offset
, off_t len
, caddr_t
*vaddrp
);
183 static int rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
,
184 struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
185 struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
);
186 static int rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
187 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
188 ddi_dma_handle_t
*handlep
);
189 static int rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
190 ddi_dma_handle_t handle
);
191 static int rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
192 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
193 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
194 static int rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
195 ddi_dma_handle_t handle
);
196 static int rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
197 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
198 static int rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
199 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
200 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
201 static int rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
202 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
203 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t cache_flags
);
204 static int rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
,
205 ddi_ctl_enum_t ctlop
, void *arg
, void *result
);
206 static int rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
207 ddi_iblock_cookie_t
*ibc
);
208 static int rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
,
209 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
, void *result
);
210 static int rootnex_alloc_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*,
212 static int rootnex_free_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*);
214 static int rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
215 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
216 ddi_dma_handle_t
*handlep
);
217 static int rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
218 ddi_dma_handle_t handle
);
219 static int rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
220 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
221 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
222 static int rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
223 ddi_dma_handle_t handle
);
224 #if defined(__amd64) && !defined(__xpv)
225 static void rootnex_coredma_reset_cookies(dev_info_t
*dip
,
226 ddi_dma_handle_t handle
);
227 static int rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
228 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
);
229 static int rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
230 ddi_dma_cookie_t
*cookiep
, uint_t ccount
);
231 static int rootnex_coredma_clear_cookies(dev_info_t
*dip
,
232 ddi_dma_handle_t handle
);
233 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
);
235 static int rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
236 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
237 static int rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
238 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
239 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
241 #if defined(__amd64) && !defined(__xpv)
242 static int rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
243 ddi_dma_handle_t handle
, void *v
);
244 static void *rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
245 ddi_dma_handle_t handle
);
249 static struct bus_ops rootnex_bus_ops
= {
257 rootnex_dma_allochdl
,
260 rootnex_dma_unbindhdl
,
266 i_ddi_rootnex_get_eventcookie
,
267 i_ddi_rootnex_add_eventcall
,
268 i_ddi_rootnex_remove_eventcall
,
269 i_ddi_rootnex_post_event
,
270 0, /* bus_intr_ctl */
272 0, /* bus_unconfig */
273 rootnex_fm_init
, /* bus_fm_init */
274 NULL
, /* bus_fm_fini */
275 NULL
, /* bus_fm_access_enter */
276 NULL
, /* bus_fm_access_exit */
278 rootnex_intr_ops
/* bus_intr_op */
281 static int rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
282 static int rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
283 static int rootnex_quiesce(dev_info_t
*dip
);
285 static struct dev_ops rootnex_ops
= {
297 rootnex_quiesce
, /* quiesce */
300 static struct modldrv rootnex_modldrv
= {
306 static struct modlinkage rootnex_modlinkage
= {
308 (void *)&rootnex_modldrv
,
312 #if defined(__amd64) && !defined(__xpv)
313 static iommulib_nexops_t iommulib_nexops
= {
314 IOMMU_NEXOPS_VERSION
,
315 "Rootnex IOMMU ops Vers 1.1",
317 rootnex_coredma_allochdl
,
318 rootnex_coredma_freehdl
,
319 rootnex_coredma_bindhdl
,
320 rootnex_coredma_unbindhdl
,
321 rootnex_coredma_reset_cookies
,
322 rootnex_coredma_get_cookies
,
323 rootnex_coredma_set_cookies
,
324 rootnex_coredma_clear_cookies
,
325 rootnex_coredma_get_sleep_flags
,
326 rootnex_coredma_sync
,
328 rootnex_coredma_hdl_setprivate
,
329 rootnex_coredma_hdl_getprivate
336 extern const struct seg_ops segdev_ops
;
337 extern int ignore_hardware_nodes
; /* force flag from ddi_impl.c */
339 extern int ddi_map_debug_flag
;
340 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf
342 extern void i86_pp_map(page_t
*pp
, caddr_t kaddr
);
343 extern void i86_va_map(caddr_t vaddr
, struct as
*asp
, caddr_t kaddr
);
344 extern int (*psm_intr_ops
)(dev_info_t
*, ddi_intr_handle_impl_t
*,
345 psm_intr_op_t
, int *);
346 extern int impl_ddi_sunbus_initchild(dev_info_t
*dip
);
347 extern void impl_ddi_sunbus_removechild(dev_info_t
*dip
);
350 * Use device arena to use for device control register mappings.
351 * Various kernel memory walkers (debugger, dtrace) need to know
352 * to avoid this address range to prevent undesired device activity.
354 extern void *device_arena_alloc(size_t size
, int vm_flag
);
355 extern void device_arena_free(void * vaddr
, size_t size
);
361 static int rootnex_dma_init();
362 static void rootnex_add_props(dev_info_t
*);
363 static int rootnex_ctl_reportdev(dev_info_t
*dip
);
364 static struct intrspec
*rootnex_get_ispec(dev_info_t
*rdip
, int inum
);
365 static int rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
366 static int rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
367 static int rootnex_map_handle(ddi_map_req_t
*mp
);
368 static void rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
);
369 static int rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegsize
);
370 static int rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
,
371 ddi_dma_attr_t
*attr
);
372 static void rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
373 rootnex_sglinfo_t
*sglinfo
);
374 static void rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
,
375 ddi_dma_cookie_t
*sgl
, rootnex_sglinfo_t
*sglinfo
);
376 static int rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
377 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
378 static int rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
379 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
);
380 static void rootnex_teardown_copybuf(rootnex_dma_t
*dma
);
381 static int rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
382 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
383 static void rootnex_teardown_windows(rootnex_dma_t
*dma
);
384 static void rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
385 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
);
386 static void rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
,
387 rootnex_dma_t
*dma
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
388 size_t *copybuf_used
, page_t
**cur_pp
);
389 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
,
390 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
,
391 ddi_dma_attr_t
*attr
, off_t cur_offset
);
392 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
,
393 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
,
394 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
);
395 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
,
396 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
);
397 static int rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
398 off_t offset
, size_t size
, uint_t cache_flags
);
399 static int rootnex_verify_buffer(rootnex_dma_t
*dma
);
400 static int rootnex_dma_check(dev_info_t
*dip
, const void *handle
,
401 const void *comp_addr
, const void *not_used
);
402 static boolean_t
rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
,
403 rootnex_sglinfo_t
*sglinfo
);
404 static struct as
*rootnex_get_as(ddi_dma_obj_t
*dmar_object
);
414 rootnex_state
= NULL
;
415 return (mod_install(&rootnex_modlinkage
));
424 _info(struct modinfo
*modinfop
)
426 return (mod_info(&rootnex_modlinkage
, modinfop
));
446 rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
455 #if defined(__amd64) && !defined(__xpv)
456 return (immu_unquiesce());
458 return (DDI_SUCCESS
);
461 return (DDI_FAILURE
);
465 * We should only have one instance of rootnex. Save it away since we
466 * don't have an easy way to get it back later.
468 ASSERT(rootnex_state
== NULL
);
469 rootnex_state
= kmem_zalloc(sizeof (rootnex_state_t
), KM_SLEEP
);
471 rootnex_state
->r_dip
= dip
;
472 rootnex_state
->r_err_ibc
= (ddi_iblock_cookie_t
)ipltospl(15);
473 rootnex_state
->r_reserved_msg_printed
= B_FALSE
;
475 rootnex_cnt
= &rootnex_state
->r_counters
[0];
479 * Set minimum fm capability level for i86pc platforms and then
480 * initialize error handling. Since we're the rootnex, we don't
481 * care what's returned in the fmcap field.
483 ddi_system_fmcap
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ERRCB_CAPABLE
|
484 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
;
485 fmcap
= ddi_system_fmcap
;
486 ddi_fm_init(dip
, &fmcap
, &rootnex_state
->r_err_ibc
);
488 /* initialize DMA related state */
489 e
= rootnex_dma_init();
490 if (e
!= DDI_SUCCESS
) {
491 kmem_free(rootnex_state
, sizeof (rootnex_state_t
));
492 return (DDI_FAILURE
);
495 /* Add static root node properties */
496 rootnex_add_props(dip
);
498 /* since we can't call ddi_report_dev() */
499 cmn_err(CE_CONT
, "?root nexus = %s\n", ddi_get_name(dip
));
501 /* Initialize rootnex event handle */
502 i_ddi_rootnex_init_events(dip
);
504 #if defined(__amd64) && !defined(__xpv)
505 e
= iommulib_nexus_register(dip
, &iommulib_nexops
,
506 &rootnex_state
->r_iommulib_handle
);
508 ASSERT(e
== DDI_SUCCESS
);
511 return (DDI_SUCCESS
);
521 rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
525 #if defined(__amd64) && !defined(__xpv)
526 return (immu_quiesce());
528 return (DDI_SUCCESS
);
531 return (DDI_FAILURE
);
550 * size of our cookie/window/copybuf state needed in dma bind that we
551 * pre-alloc in dma_alloc_handle
553 rootnex_state
->r_prealloc_cookies
= rootnex_prealloc_cookies
;
554 rootnex_state
->r_prealloc_size
=
555 (rootnex_state
->r_prealloc_cookies
* sizeof (ddi_dma_cookie_t
)) +
556 (rootnex_prealloc_windows
* sizeof (rootnex_window_t
)) +
557 (rootnex_prealloc_copybuf
* sizeof (rootnex_pgmap_t
));
560 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
561 * allocate 16 extra bytes for struct pointer alignment
562 * (p->dmai_private & dma->dp_prealloc_buffer)
564 bufsize
= sizeof (ddi_dma_impl_t
) + sizeof (rootnex_dma_t
) +
565 rootnex_state
->r_prealloc_size
+ 0x10;
566 rootnex_state
->r_dmahdl_cache
= kmem_cache_create("rootnex_dmahdl",
567 bufsize
, 64, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
568 if (rootnex_state
->r_dmahdl_cache
== NULL
) {
569 return (DDI_FAILURE
);
573 * allocate array to track which major numbers we have printed warnings
576 rootnex_warn_list
= kmem_zalloc(devcnt
* sizeof (*rootnex_warn_list
),
579 return (DDI_SUCCESS
);
584 * rootnex_add_props()
588 rootnex_add_props(dev_info_t
*dip
)
590 rootnex_intprop_t
*rpp
;
593 /* Add static integer/boolean properties to the root node */
594 rpp
= rootnex_intprp
;
595 for (i
= 0; i
< NROOT_INTPROPS
; i
++) {
596 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
597 rpp
[i
].prop_name
, rpp
[i
].prop_value
);
604 * *************************
605 * ctlops related routines
606 * *************************
615 rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t ctlop
,
616 void *arg
, void *result
)
619 struct ddi_parent_private_data
*pdp
;
622 case DDI_CTLOPS_DMAPMAPC
:
624 * Return 'partial' to indicate that dma mapping
625 * has to be done in the main MMU.
627 return (DDI_DMA_PARTIAL
);
629 case DDI_CTLOPS_BTOP
:
631 * Convert byte count input to physical page units.
632 * (byte counts that are not a page-size multiple
635 *(ulong_t
*)result
= btop(*(ulong_t
*)arg
);
636 return (DDI_SUCCESS
);
638 case DDI_CTLOPS_PTOB
:
640 * Convert size in physical pages to bytes
642 *(ulong_t
*)result
= ptob(*(ulong_t
*)arg
);
643 return (DDI_SUCCESS
);
645 case DDI_CTLOPS_BTOPR
:
647 * Convert byte count input to physical page units
648 * (byte counts that are not a page-size multiple
651 *(ulong_t
*)result
= btopr(*(ulong_t
*)arg
);
652 return (DDI_SUCCESS
);
654 case DDI_CTLOPS_INITCHILD
:
655 return (impl_ddi_sunbus_initchild(arg
));
657 case DDI_CTLOPS_UNINITCHILD
:
658 impl_ddi_sunbus_removechild(arg
);
659 return (DDI_SUCCESS
);
661 case DDI_CTLOPS_REPORTDEV
:
662 return (rootnex_ctl_reportdev(rdip
));
664 case DDI_CTLOPS_IOMIN
:
666 * Nothing to do here but reflect back..
668 return (DDI_SUCCESS
);
670 case DDI_CTLOPS_REGSIZE
:
671 case DDI_CTLOPS_NREGS
:
674 case DDI_CTLOPS_SIDDEV
:
675 if (ndi_dev_is_prom_node(rdip
))
676 return (DDI_SUCCESS
);
677 if (ndi_dev_is_persistent_node(rdip
))
678 return (DDI_SUCCESS
);
679 return (DDI_FAILURE
);
681 case DDI_CTLOPS_POWER
:
682 return ((*pm_platform_power
)((power_req_t
*)arg
));
684 case DDI_CTLOPS_RESERVED0
: /* Was DDI_CTLOPS_NINTRS, obsolete */
685 case DDI_CTLOPS_RESERVED1
: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
686 case DDI_CTLOPS_RESERVED2
: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
687 case DDI_CTLOPS_RESERVED3
: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
688 case DDI_CTLOPS_RESERVED4
: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
689 case DDI_CTLOPS_RESERVED5
: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
690 if (!rootnex_state
->r_reserved_msg_printed
) {
691 rootnex_state
->r_reserved_msg_printed
= B_TRUE
;
692 cmn_err(CE_WARN
, "Failing ddi_ctlops call(s) for "
693 "1 or more reserved/obsolete operations.");
695 return (DDI_FAILURE
);
698 return (DDI_FAILURE
);
701 * The rest are for "hardware" properties
703 if ((pdp
= ddi_get_parent_data(rdip
)) == NULL
)
704 return (DDI_FAILURE
);
706 if (ctlop
== DDI_CTLOPS_NREGS
) {
708 *ptr
= pdp
->par_nreg
;
710 off_t
*size
= (off_t
*)result
;
714 if (n
>= pdp
->par_nreg
) {
715 return (DDI_FAILURE
);
717 *size
= (off_t
)pdp
->par_reg
[n
].regspec_size
;
719 return (DDI_SUCCESS
);
724 * rootnex_ctl_reportdev()
728 rootnex_ctl_reportdev(dev_info_t
*dev
)
730 int i
, n
, len
, f_len
= 0;
733 buf
= kmem_alloc(REPORTDEV_BUFSIZE
, KM_SLEEP
);
734 f_len
+= snprintf(buf
, REPORTDEV_BUFSIZE
,
735 "%s%d at root", ddi_driver_name(dev
), ddi_get_instance(dev
));
738 for (i
= 0; i
< sparc_pd_getnreg(dev
); i
++) {
740 struct regspec
*rp
= sparc_pd_getreg(dev
, i
);
743 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
746 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
750 switch (rp
->regspec_bustype
) {
753 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
754 "%s 0x%x", DEVI_EISA_NEXNAME
, rp
->regspec_addr
);
758 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
759 "%s 0x%x", DEVI_ISA_NEXNAME
, rp
->regspec_addr
);
763 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
764 "space %x offset %x",
765 rp
->regspec_bustype
, rp
->regspec_addr
);
770 for (i
= 0, n
= sparc_pd_getnintr(dev
); i
< n
; i
++) {
774 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
778 pri
= INT_IPL(sparc_pd_getintr(dev
, i
)->intrspec_pri
);
779 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
780 " sparc ipl %d", pri
);
784 if (f_len
+ 1 >= REPORTDEV_BUFSIZE
) {
785 cmn_err(CE_NOTE
, "next message is truncated: "
786 "printed length 1024, real length %d", f_len
);
789 cmn_err(CE_CONT
, "?%s\n", buf
);
790 kmem_free(buf
, REPORTDEV_BUFSIZE
);
791 return (DDI_SUCCESS
);
806 rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
, off_t offset
,
807 off_t len
, caddr_t
*vaddrp
)
809 struct regspec
*rp
, tmp_reg
;
810 ddi_map_req_t mr
= *mp
; /* Get private copy of request */
815 switch (mp
->map_op
) {
816 case DDI_MO_MAP_LOCKED
:
818 case DDI_MO_MAP_HANDLE
:
822 cmn_err(CE_WARN
, "rootnex_map: unimplemented map op %d.",
824 #endif /* DDI_MAP_DEBUG */
825 return (DDI_ME_UNIMPLEMENTED
);
828 if (mp
->map_flags
& DDI_MF_USER_MAPPING
) {
830 cmn_err(CE_WARN
, "rootnex_map: unimplemented map type: user.");
831 #endif /* DDI_MAP_DEBUG */
832 return (DDI_ME_UNIMPLEMENTED
);
836 * First, if given an rnumber, convert it to a regspec...
837 * (Presumably, this is on behalf of a child of the root node?)
840 if (mp
->map_type
== DDI_MT_RNUMBER
) {
842 int rnumber
= mp
->map_obj
.rnumber
;
844 static char *out_of_range
=
845 "rootnex_map: Out of range rnumber <%d>, device <%s>";
846 #endif /* DDI_MAP_DEBUG */
848 rp
= i_ddi_rnumber_to_regspec(rdip
, rnumber
);
851 cmn_err(CE_WARN
, out_of_range
, rnumber
,
853 #endif /* DDI_MAP_DEBUG */
854 return (DDI_ME_RNUMBER_RANGE
);
858 * Convert the given ddi_map_req_t from rnumber to regspec...
861 mp
->map_type
= DDI_MT_REGSPEC
;
866 * Adjust offset and length correspnding to called values...
867 * XXX: A non-zero length means override the one in the regspec
868 * XXX: (regardless of what's in the parent's range?)
871 tmp_reg
= *(mp
->map_obj
.rp
); /* Preserve underlying data */
872 rp
= mp
->map_obj
.rp
= &tmp_reg
; /* Use tmp_reg in request */
875 cmn_err(CE_CONT
, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
876 "handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
877 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
, offset
,
878 len
, mp
->map_handlep
);
879 #endif /* DDI_MAP_DEBUG */
882 * I/O or memory mapping:
884 * <bustype=0, addr=x, len=x>: memory
885 * <bustype=1, addr=x, len=x>: i/o
886 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
889 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
890 cmn_err(CE_WARN
, "<%s,%s> invalid register spec"
891 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip
),
892 ddi_get_name(rdip
), rp
->regspec_bustype
,
893 rp
->regspec_addr
, rp
->regspec_size
);
894 return (DDI_ME_INVAL
);
897 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) {
899 * compatibility i/o mapping
901 rp
->regspec_bustype
+= (uint_t
)offset
;
904 * Normal memory or i/o mapping
906 rp
->regspec_addr
+= (uint_t
)offset
;
910 rp
->regspec_size
= (uint_t
)len
;
913 cmn_err(CE_CONT
, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
914 "len %d handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
915 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
,
916 offset
, len
, mp
->map_handlep
);
917 #endif /* DDI_MAP_DEBUG */
920 * Apply any parent ranges at this level, if applicable.
921 * (This is where nexus specific regspec translation takes place.
922 * Use of this function is implicit agreement that translation is
923 * provided via ddi_apply_range.)
927 ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
928 ddi_get_name(dip
), ddi_get_name(rdip
));
929 #endif /* DDI_MAP_DEBUG */
931 if ((error
= i_ddi_apply_range(dip
, rdip
, mp
->map_obj
.rp
)) != 0)
934 switch (mp
->map_op
) {
935 case DDI_MO_MAP_LOCKED
:
938 * Set up the locked down kernel mapping to the regspec...
941 return (rootnex_map_regspec(mp
, vaddrp
));
949 return (rootnex_unmap_regspec(mp
, vaddrp
));
951 case DDI_MO_MAP_HANDLE
:
953 return (rootnex_map_handle(mp
));
956 return (DDI_ME_UNIMPLEMENTED
);
962 * rootnex_map_fault()
964 * fault in mappings for requestors
968 rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
, struct hat
*hat
,
969 struct seg
*seg
, caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
,
974 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr
, pfn
);
975 ddi_map_debug(" Seg <%s>\n",
976 seg
->s_ops
== &segdev_ops
? "segdev" :
977 seg
== &kvseg
? "segkmem" : "NONE!");
978 #endif /* DDI_MAP_DEBUG */
981 * This is all terribly broken, but it is a start
983 * XXX Note that this test means that segdev_ops
984 * must be exported from seg_dev.c.
985 * XXX What about devices with their own segment drivers?
987 if (seg
->s_ops
== &segdev_ops
) {
988 struct segdev_data
*sdp
= (struct segdev_data
*)seg
->s_data
;
992 * This is one plausible interpretation of
993 * a null hat i.e. use the first hat on the
994 * address space hat list which by convention is
995 * the hat of the system MMU. At alternative
996 * would be to panic .. this might well be better ..
998 ASSERT(AS_READ_HELD(seg
->s_as
));
999 hat
= seg
->s_as
->a_hat
;
1000 cmn_err(CE_NOTE
, "rootnex_map_fault: nil hat");
1002 hat_devload(hat
, addr
, MMU_PAGESIZE
, pfn
, prot
| sdp
->hat_attr
,
1003 (lock
? HAT_LOAD_LOCK
: HAT_LOAD
));
1004 } else if (seg
== &kvseg
&& dp
== NULL
) {
1005 hat_devload(kas
.a_hat
, addr
, MMU_PAGESIZE
, pfn
, prot
,
1008 return (DDI_FAILURE
);
1009 return (DDI_SUCCESS
);
1014 * rootnex_map_regspec()
1015 * we don't support mapping of I/O cards above 4Gb
1018 rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1020 rootnex_addr_t rbase
;
1022 uint_t npages
, pgoffset
;
1026 uint_t hat_acc_flags
;
1029 rp
= mp
->map_obj
.rp
;
1030 hp
= mp
->map_handlep
;
1032 #ifdef DDI_MAP_DEBUG
1034 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1035 rp
->regspec_bustype
, rp
->regspec_addr
,
1036 rp
->regspec_size
, mp
->map_handlep
);
1037 #endif /* DDI_MAP_DEBUG */
1040 * I/O or memory mapping
1042 * <bustype=0, addr=x, len=x>: memory
1043 * <bustype=1, addr=x, len=x>: i/o
1044 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1047 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
1048 cmn_err(CE_WARN
, "rootnex: invalid register spec"
1049 " <0x%x, 0x%x, 0x%x>", rp
->regspec_bustype
,
1050 rp
->regspec_addr
, rp
->regspec_size
);
1051 return (DDI_FAILURE
);
1054 if (rp
->regspec_bustype
!= 0) {
1056 * I/O space - needs a handle.
1059 return (DDI_FAILURE
);
1061 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1062 ap
->ahi_acc_attr
|= DDI_ACCATTR_IO_SPACE
;
1063 impl_acc_hdl_init(hp
);
1065 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1066 #ifdef DDI_MAP_DEBUG
1067 ddi_map_debug("rootnex_map_regspec: mmap() "
1068 "to I/O space is not supported.\n");
1069 #endif /* DDI_MAP_DEBUG */
1070 return (DDI_ME_INVAL
);
1073 * 1275-compliant vs. compatibility i/o mapping
1076 (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) ?
1077 ((caddr_t
)(uintptr_t)rp
->regspec_bustype
) :
1078 ((caddr_t
)(uintptr_t)rp
->regspec_addr
);
1079 hp
->ah_pfn
= mmu_btop((ulong_t
)rp
->regspec_addr
&
1081 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+
1082 (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
);
1085 #ifdef DDI_MAP_DEBUG
1087 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1088 rp
->regspec_size
, *vaddrp
);
1089 #endif /* DDI_MAP_DEBUG */
1090 return (DDI_SUCCESS
);
1100 * hp->ah_acc.devacc_attr_endian_flags.
1102 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1103 case DDI_STRICTORDER_ACC
:
1104 hat_acc_flags
= HAT_STRICTORDER
;
1106 case DDI_UNORDERED_OK_ACC
:
1107 hat_acc_flags
= HAT_UNORDERED_OK
;
1109 case DDI_MERGING_OK_ACC
:
1110 hat_acc_flags
= HAT_MERGING_OK
;
1112 case DDI_LOADCACHING_OK_ACC
:
1113 hat_acc_flags
= HAT_LOADCACHING_OK
;
1115 case DDI_STORECACHING_OK_ACC
:
1116 hat_acc_flags
= HAT_STORECACHING_OK
;
1119 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1120 ap
->ahi_acc_attr
|= DDI_ACCATTR_CPU_VADDR
;
1121 impl_acc_hdl_init(hp
);
1122 hp
->ah_hat_flags
= hat_acc_flags
;
1124 hat_acc_flags
= HAT_STRICTORDER
;
1127 rbase
= (rootnex_addr_t
)(rp
->regspec_addr
& MMU_PAGEMASK
);
1129 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1131 if (rp
->regspec_size
== 0) {
1132 #ifdef DDI_MAP_DEBUG
1133 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1134 #endif /* DDI_MAP_DEBUG */
1135 return (DDI_ME_INVAL
);
1138 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1139 /* extra cast to make gcc happy */
1140 *vaddrp
= (caddr_t
)((uintptr_t)mmu_btop(pbase
));
1142 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1144 #ifdef DDI_MAP_DEBUG
1145 ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1146 "physical %llx", npages
, pbase
);
1147 #endif /* DDI_MAP_DEBUG */
1149 cvaddr
= device_arena_alloc(ptob(npages
), VM_NOSLEEP
);
1151 return (DDI_ME_NORESOURCES
);
1154 * Now map in the pages we've allocated...
1156 hat_devload(kas
.a_hat
, cvaddr
, mmu_ptob(npages
),
1157 mmu_btop(pbase
), mp
->map_prot
| hat_acc_flags
,
1159 *vaddrp
= (caddr_t
)cvaddr
+ pgoffset
;
1161 /* save away pfn and npages for FMA */
1162 hp
= mp
->map_handlep
;
1164 hp
->ah_pfn
= mmu_btop(pbase
);
1165 hp
->ah_pnum
= npages
;
1169 #ifdef DDI_MAP_DEBUG
1170 ddi_map_debug("at virtual 0x%x\n", *vaddrp
);
1171 #endif /* DDI_MAP_DEBUG */
1172 return (DDI_SUCCESS
);
1177 * rootnex_unmap_regspec()
1181 rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1183 caddr_t addr
= (caddr_t
)*vaddrp
;
1184 uint_t npages
, pgoffset
;
1187 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
)
1190 rp
= mp
->map_obj
.rp
;
1192 if (rp
->regspec_size
== 0) {
1193 #ifdef DDI_MAP_DEBUG
1194 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1195 #endif /* DDI_MAP_DEBUG */
1196 return (DDI_ME_INVAL
);
1200 * I/O or memory mapping:
1202 * <bustype=0, addr=x, len=x>: memory
1203 * <bustype=1, addr=x, len=x>: i/o
1204 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1206 if (rp
->regspec_bustype
!= 0) {
1208 * This is I/O space, which requires no particular
1209 * processing on unmap since it isn't mapped in the
1212 return (DDI_SUCCESS
);
1218 pgoffset
= (uintptr_t)addr
& MMU_PAGEOFFSET
;
1219 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1220 hat_unload(kas
.a_hat
, addr
- pgoffset
, ptob(npages
), HAT_UNLOAD_UNLOCK
);
1221 device_arena_free(addr
- pgoffset
, ptob(npages
));
1224 * Destroy the pointer - the mapping has logically gone
1228 return (DDI_SUCCESS
);
1233 * rootnex_map_handle()
1237 rootnex_map_handle(ddi_map_req_t
*mp
)
1239 rootnex_addr_t rbase
;
1245 rp
= mp
->map_obj
.rp
;
1247 #ifdef DDI_MAP_DEBUG
1249 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1250 rp
->regspec_bustype
, rp
->regspec_addr
,
1251 rp
->regspec_size
, mp
->map_handlep
);
1252 #endif /* DDI_MAP_DEBUG */
1255 * I/O or memory mapping:
1257 * <bustype=0, addr=x, len=x>: memory
1258 * <bustype=1, addr=x, len=x>: i/o
1259 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1261 if (rp
->regspec_bustype
!= 0) {
1263 * This refers to I/O space, and we don't support "mapping"
1264 * I/O space to a user.
1266 return (DDI_FAILURE
);
1270 * Set up the hat_flags for the mapping.
1272 hp
= mp
->map_handlep
;
1274 switch (hp
->ah_acc
.devacc_attr_endian_flags
) {
1275 case DDI_NEVERSWAP_ACC
:
1276 hp
->ah_hat_flags
= HAT_NEVERSWAP
| HAT_STRICTORDER
;
1278 case DDI_STRUCTURE_LE_ACC
:
1279 hp
->ah_hat_flags
= HAT_STRUCTURE_LE
;
1281 case DDI_STRUCTURE_BE_ACC
:
1282 return (DDI_FAILURE
);
1284 return (DDI_REGS_ACC_CONFLICT
);
1287 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1288 case DDI_STRICTORDER_ACC
:
1290 case DDI_UNORDERED_OK_ACC
:
1291 hp
->ah_hat_flags
|= HAT_UNORDERED_OK
;
1293 case DDI_MERGING_OK_ACC
:
1294 hp
->ah_hat_flags
|= HAT_MERGING_OK
;
1296 case DDI_LOADCACHING_OK_ACC
:
1297 hp
->ah_hat_flags
|= HAT_LOADCACHING_OK
;
1299 case DDI_STORECACHING_OK_ACC
:
1300 hp
->ah_hat_flags
|= HAT_STORECACHING_OK
;
1303 return (DDI_FAILURE
);
1306 rbase
= (rootnex_addr_t
)rp
->regspec_addr
&
1307 (~(rootnex_addr_t
)MMU_PAGEOFFSET
);
1308 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1310 if (rp
->regspec_size
== 0)
1311 return (DDI_ME_INVAL
);
1315 hp
->ah_pfn
= mmu_btop(pbase
);
1316 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1318 return (DDI_SUCCESS
);
1324 * ************************
1325 * interrupt related code
1326 * ************************
1330 * rootnex_intr_ops()
1331 * bus_intr_op() function for interrupt support
1335 rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
1336 ddi_intr_handle_impl_t
*hdlp
, void *result
)
1338 struct intrspec
*ispec
;
1340 DDI_INTR_NEXDBG((CE_CONT
,
1341 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1342 (void *)pdip
, (void *)rdip
, intr_op
, (void *)hdlp
));
1344 /* Process the interrupt operation */
1346 case DDI_INTROP_GETCAP
:
1347 /* First check with pcplusmp */
1348 if (psm_intr_ops
== NULL
)
1349 return (DDI_FAILURE
);
1351 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_CAP
, result
)) {
1353 return (DDI_FAILURE
);
1356 case DDI_INTROP_SETCAP
:
1357 if (psm_intr_ops
== NULL
)
1358 return (DDI_FAILURE
);
1360 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_CAP
, result
))
1361 return (DDI_FAILURE
);
1363 case DDI_INTROP_ALLOC
:
1364 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1365 return (rootnex_alloc_intr_fixed(rdip
, hdlp
, result
));
1366 case DDI_INTROP_FREE
:
1367 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1368 return (rootnex_free_intr_fixed(rdip
, hdlp
));
1369 case DDI_INTROP_GETPRI
:
1370 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1371 return (DDI_FAILURE
);
1372 *(int *)result
= ispec
->intrspec_pri
;
1374 case DDI_INTROP_SETPRI
:
1375 /* Validate the interrupt priority passed to us */
1376 if (*(int *)result
> LOCK_LEVEL
)
1377 return (DDI_FAILURE
);
1379 /* Ensure that PSM is all initialized and ispec is ok */
1380 if ((psm_intr_ops
== NULL
) ||
1381 ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
))
1382 return (DDI_FAILURE
);
1384 /* Change the priority */
1385 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_PRI
, result
) ==
1387 return (DDI_FAILURE
);
1389 /* update the ispec with the new priority */
1390 ispec
->intrspec_pri
= *(int *)result
;
1392 case DDI_INTROP_ADDISR
:
1393 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1394 return (DDI_FAILURE
);
1395 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
1397 case DDI_INTROP_REMISR
:
1398 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1399 return (DDI_FAILURE
);
1400 ispec
->intrspec_func
= (uint_t (*)()) 0;
1402 case DDI_INTROP_ENABLE
:
1403 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1404 return (DDI_FAILURE
);
1406 /* Call psmi to translate irq with the dip */
1407 if (psm_intr_ops
== NULL
)
1408 return (DDI_FAILURE
);
1410 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1411 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_XLATE_VECTOR
,
1412 (int *)&hdlp
->ih_vector
) == PSM_FAILURE
)
1413 return (DDI_FAILURE
);
1415 /* Add the interrupt handler */
1416 if (!add_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1417 hdlp
->ih_cb_func
, DEVI(rdip
)->devi_name
, hdlp
->ih_vector
,
1418 hdlp
->ih_cb_arg1
, hdlp
->ih_cb_arg2
, NULL
, rdip
))
1419 return (DDI_FAILURE
);
1421 case DDI_INTROP_DISABLE
:
1422 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1423 return (DDI_FAILURE
);
1425 /* Call psm_ops() to translate irq with the dip */
1426 if (psm_intr_ops
== NULL
)
1427 return (DDI_FAILURE
);
1429 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1430 (void) (*psm_intr_ops
)(rdip
, hdlp
,
1431 PSM_INTR_OP_XLATE_VECTOR
, (int *)&hdlp
->ih_vector
);
1433 /* Remove the interrupt handler */
1434 rem_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1435 hdlp
->ih_cb_func
, hdlp
->ih_vector
);
1437 case DDI_INTROP_SETMASK
:
1438 if (psm_intr_ops
== NULL
)
1439 return (DDI_FAILURE
);
1441 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_MASK
, NULL
))
1442 return (DDI_FAILURE
);
1444 case DDI_INTROP_CLRMASK
:
1445 if (psm_intr_ops
== NULL
)
1446 return (DDI_FAILURE
);
1448 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_CLEAR_MASK
, NULL
))
1449 return (DDI_FAILURE
);
1451 case DDI_INTROP_GETPENDING
:
1452 if (psm_intr_ops
== NULL
)
1453 return (DDI_FAILURE
);
1455 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_PENDING
,
1458 return (DDI_FAILURE
);
1461 case DDI_INTROP_NAVAIL
:
1462 case DDI_INTROP_NINTRS
:
1463 *(int *)result
= i_ddi_get_intx_nintrs(rdip
);
1464 if (*(int *)result
== 0) {
1466 * Special case for 'pcic' driver' only. This driver
1467 * driver is a child of 'isa' and 'rootnex' drivers.
1469 * See detailed comments on this in the function
1470 * rootnex_get_ispec().
1472 * Children of 'pcic' send 'NINITR' request all the
1473 * way to rootnex driver. But, the 'pdp->par_nintr'
1474 * field may not initialized. So, we fake it here
1475 * to return 1 (a la what PCMCIA nexus does).
1477 if (strcmp(ddi_get_name(rdip
), "pcic") == 0)
1480 return (DDI_FAILURE
);
1483 case DDI_INTROP_SUPPORTED_TYPES
:
1484 *(int *)result
= DDI_INTR_TYPE_FIXED
; /* Always ... */
1487 return (DDI_FAILURE
);
1490 return (DDI_SUCCESS
);
1495 * rootnex_get_ispec()
1496 * convert an interrupt number to an interrupt specification.
1497 * The interrupt number determines which interrupt spec will be
1498 * returned if more than one exists.
1500 * Look into the parent private data area of the 'rdip' to find out
1501 * the interrupt specification. First check to make sure there is
1502 * one that matchs "inumber" and then return a pointer to it.
1504 * Return NULL if one could not be found.
1506 * NOTE: This is needed for rootnex_intr_ops()
1508 static struct intrspec
*
1509 rootnex_get_ispec(dev_info_t
*rdip
, int inum
)
1511 struct ddi_parent_private_data
*pdp
= ddi_get_parent_data(rdip
);
1514 * Special case handling for drivers that provide their own
1515 * intrspec structures instead of relying on the DDI framework.
1517 * A broken hardware driver in ON could potentially provide its
1518 * own intrspec structure, instead of relying on the hardware.
1519 * If these drivers are children of 'rootnex' then we need to
1520 * continue to provide backward compatibility to them here.
1522 * Following check is a special case for 'pcic' driver which
1523 * was found to have broken hardwre andby provides its own intrspec.
1525 * Verbatim comments from this driver are shown here:
1526 * "Don't use the ddi_add_intr since we don't have a
1527 * default intrspec in all cases."
1529 * Since an 'ispec' may not be always created for it,
1530 * check for that and create one if so.
1532 * NOTE: Currently 'pcic' is the only driver found to do this.
1534 if (!pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1536 pdp
->par_intr
= kmem_zalloc(sizeof (struct intrspec
) *
1537 pdp
->par_nintr
, KM_SLEEP
);
1540 /* Validate the interrupt number */
1541 if (inum
>= pdp
->par_nintr
)
1544 /* Get the interrupt structure pointer and return that */
1545 return ((struct intrspec
*)&pdp
->par_intr
[inum
]);
1549 * Allocate interrupt vector for FIXED (legacy) type.
1552 rootnex_alloc_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
,
1555 struct intrspec
*ispec
;
1556 ddi_intr_handle_impl_t info_hdl
;
1559 apic_get_type_t type_info
;
1561 if (psm_intr_ops
== NULL
)
1562 return (DDI_FAILURE
);
1564 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1565 return (DDI_FAILURE
);
1568 * If the PSM module is "APIX" then pass the request for it
1569 * to allocate the vector now.
1571 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1572 info_hdl
.ih_private
= &type_info
;
1573 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1574 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1575 if (hdlp
->ih_private
== NULL
) { /* allocate phdl structure */
1577 i_ddi_alloc_intr_phdl(hdlp
);
1579 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1580 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1581 PSM_INTR_OP_ALLOC_VECTORS
, result
);
1582 if (free_phdl
) { /* free up the phdl structure */
1584 i_ddi_free_intr_phdl(hdlp
);
1585 hdlp
->ih_private
= NULL
;
1589 * No APIX module; fall back to the old scheme where the
1590 * interrupt vector is allocated during ddi_enable_intr() call.
1592 hdlp
->ih_pri
= ispec
->intrspec_pri
;
1593 *(int *)result
= hdlp
->ih_scratch1
;
1601 * Free up interrupt vector for FIXED (legacy) type.
1604 rootnex_free_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
)
1606 struct intrspec
*ispec
;
1607 struct ddi_parent_private_data
*pdp
;
1608 ddi_intr_handle_impl_t info_hdl
;
1610 apic_get_type_t type_info
;
1612 if (psm_intr_ops
== NULL
)
1613 return (DDI_FAILURE
);
1616 * If the PSM module is "APIX" then pass the request for it
1617 * to free up the vector now.
1619 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1620 info_hdl
.ih_private
= &type_info
;
1621 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1622 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1623 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1624 return (DDI_FAILURE
);
1625 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1626 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1627 PSM_INTR_OP_FREE_VECTORS
, NULL
);
1630 * No APIX module; fall back to the old scheme where
1631 * the interrupt vector was already freed during
1632 * ddi_disable_intr() call.
1637 pdp
= ddi_get_parent_data(rdip
);
1640 * Special case for 'pcic' driver' only.
1641 * If an intrspec was created for it, clean it up here
1642 * See detailed comments on this in the function
1643 * rootnex_get_ispec().
1645 if (pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1646 kmem_free(pdp
->par_intr
, sizeof (struct intrspec
) *
1649 * Set it to zero; so that
1650 * DDI framework doesn't free it again
1652 pdp
->par_intr
= NULL
;
1661 * ******************
1663 * ******************
1668 rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1669 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
1670 ddi_dma_handle_t
*handlep
)
1672 uint64_t maxsegmentsize_ll
;
1673 uint_t maxsegmentsize
;
1682 /* convert our sleep flags */
1683 if (waitfp
== DDI_DMA_SLEEP
) {
1686 kmflag
= KM_NOSLEEP
;
1690 * We try to do only one memory allocation here. We'll do a little
1691 * pointer manipulation later. If the bind ends up taking more than
1692 * our prealloc's space, we'll have to allocate more memory in the
1693 * bind operation. Not great, but much better than before and the
1694 * best we can do with the current bind interfaces.
1696 hp
= kmem_cache_alloc(rootnex_state
->r_dmahdl_cache
, kmflag
);
1698 return (DDI_DMA_NORESOURCES
);
1700 /* Do our pointer manipulation now, align the structures */
1701 hp
->dmai_private
= (void *)(((uintptr_t)hp
+
1702 (uintptr_t)sizeof (ddi_dma_impl_t
) + 0x7) & ~0x7);
1703 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1704 dma
->dp_prealloc_buffer
= (uchar_t
*)(((uintptr_t)dma
+
1705 sizeof (rootnex_dma_t
) + 0x7) & ~0x7);
1707 /* setup the handle */
1708 rootnex_clean_dmahdl(hp
);
1709 hp
->dmai_error
.err_fep
= NULL
;
1710 hp
->dmai_error
.err_cf
= NULL
;
1712 dma
->dp_sglinfo
.si_flags
= attr
->dma_attr_flags
;
1713 dma
->dp_sglinfo
.si_min_addr
= attr
->dma_attr_addr_lo
;
1716 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1717 * is being used. Set the upper limit to the seg value.
1718 * There will be enough DVMA space to always get addresses
1719 * that will match the constraints.
1721 if (IOMMU_USED(rdip
) &&
1722 (attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
)) {
1723 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_seg
;
1724 dma
->dp_sglinfo
.si_flags
&= ~_DDI_DMA_BOUNCE_ON_SEG
;
1726 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_addr_hi
;
1728 hp
->dmai_minxfer
= attr
->dma_attr_minxfer
;
1729 hp
->dmai_burstsizes
= attr
->dma_attr_burstsizes
;
1730 hp
->dmai_rdip
= rdip
;
1731 hp
->dmai_attr
= *attr
;
1733 if (attr
->dma_attr_seg
>= dma
->dp_sglinfo
.si_max_addr
)
1734 dma
->dp_sglinfo
.si_cancross
= B_FALSE
;
1736 dma
->dp_sglinfo
.si_cancross
= B_TRUE
;
1738 /* we don't need to worry about the SPL since we do a tryenter */
1739 mutex_init(&dma
->dp_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1742 * Figure out our maximum segment size. If the segment size is greater
1743 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1744 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1745 * dma_attr_count_max are size-1 type values.
1747 * Maximum segment size is the largest physically contiguous chunk of
1748 * memory that we can return from a bind (i.e. the maximum size of a
1752 /* handle the rollover cases */
1753 seg
= attr
->dma_attr_seg
+ 1;
1754 if (seg
< attr
->dma_attr_seg
) {
1755 seg
= attr
->dma_attr_seg
;
1757 count_max
= attr
->dma_attr_count_max
+ 1;
1758 if (count_max
< attr
->dma_attr_count_max
) {
1759 count_max
= attr
->dma_attr_count_max
;
1763 * granularity may or may not be a power of two. If it isn't, we can't
1764 * use a simple mask.
1766 if (!ISP2(attr
->dma_attr_granular
)) {
1767 dma
->dp_granularity_power_2
= B_FALSE
;
1769 dma
->dp_granularity_power_2
= B_TRUE
;
1773 * maxxfer should be a whole multiple of granularity. If we're going to
1774 * break up a window because we're greater than maxxfer, we might as
1775 * well make sure it's maxxfer is a whole multiple so we don't have to
1776 * worry about triming the window later on for this case.
1778 if (attr
->dma_attr_granular
> 1) {
1779 if (dma
->dp_granularity_power_2
) {
1780 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1781 (attr
->dma_attr_maxxfer
&
1782 (attr
->dma_attr_granular
- 1));
1784 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1785 (attr
->dma_attr_maxxfer
% attr
->dma_attr_granular
);
1788 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
;
1791 maxsegmentsize_ll
= MIN(seg
, dma
->dp_maxxfer
);
1792 maxsegmentsize_ll
= MIN(maxsegmentsize_ll
, count_max
);
1793 if (maxsegmentsize_ll
== 0 || (maxsegmentsize_ll
> 0xFFFFFFFF)) {
1794 maxsegmentsize
= 0xFFFFFFFF;
1796 maxsegmentsize
= maxsegmentsize_ll
;
1798 dma
->dp_sglinfo
.si_max_cookie_size
= maxsegmentsize
;
1799 dma
->dp_sglinfo
.si_segmask
= attr
->dma_attr_seg
;
1801 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1802 if (rootnex_alloc_check_parms
) {
1803 e
= rootnex_valid_alloc_parms(attr
, maxsegmentsize
);
1804 if (e
!= DDI_SUCCESS
) {
1805 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ALLOC_FAIL
]);
1806 (void) rootnex_dma_freehdl(dip
, rdip
,
1807 (ddi_dma_handle_t
)hp
);
1812 *handlep
= (ddi_dma_handle_t
)hp
;
1814 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1815 ROOTNEX_DPROBE1(rootnex__alloc__handle
, uint64_t,
1816 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1818 return (DDI_SUCCESS
);
1823 * rootnex_dma_allochdl()
1824 * called from ddi_dma_alloc_handle().
1827 rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
1828 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
1830 int retval
= DDI_SUCCESS
;
1831 #if defined(__amd64) && !defined(__xpv)
1833 if (IOMMU_UNITIALIZED(rdip
)) {
1834 retval
= iommulib_nex_open(dip
, rdip
);
1836 if (retval
!= DDI_SUCCESS
&& retval
!= DDI_ENOTSUP
)
1840 if (IOMMU_UNUSED(rdip
)) {
1841 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1844 retval
= iommulib_nexdma_allochdl(dip
, rdip
, attr
,
1845 waitfp
, arg
, handlep
);
1848 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1852 case DDI_DMA_NORESOURCES
:
1853 if (waitfp
!= DDI_DMA_DONTWAIT
) {
1854 ddi_set_callback(waitfp
, arg
,
1855 &rootnex_state
->r_dvma_call_list_id
);
1859 ndi_fmc_insert(rdip
, DMA_HANDLE
, *handlep
, NULL
);
1869 rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1870 ddi_dma_handle_t handle
)
1876 hp
= (ddi_dma_impl_t
*)handle
;
1877 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1879 /* unbind should have been called first */
1880 ASSERT(!dma
->dp_inuse
);
1882 mutex_destroy(&dma
->dp_mutex
);
1883 kmem_cache_free(rootnex_state
->r_dmahdl_cache
, hp
);
1885 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1886 ROOTNEX_DPROBE1(rootnex__free__handle
, uint64_t,
1887 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1889 return (DDI_SUCCESS
);
1893 * rootnex_dma_freehdl()
1894 * called from ddi_dma_free_handle().
1897 rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
1901 ndi_fmc_remove(rdip
, DMA_HANDLE
, handle
);
1902 #if defined(__amd64) && !defined(__xpv)
1903 if (IOMMU_USED(rdip
))
1904 ret
= iommulib_nexdma_freehdl(dip
, rdip
, handle
);
1907 ret
= rootnex_coredma_freehdl(dip
, rdip
, handle
);
1909 if (rootnex_state
->r_dvma_call_list_id
)
1910 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
1917 rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1918 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
1919 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
1921 rootnex_sglinfo_t
*sinfo
;
1922 ddi_dma_obj_t
*dmao
;
1923 #if defined(__amd64) && !defined(__xpv)
1924 struct dvmaseg
*dvs
;
1925 ddi_dma_cookie_t
*cookie
;
1927 ddi_dma_attr_t
*attr
;
1934 hp
= (ddi_dma_impl_t
*)handle
;
1935 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1936 dmao
= &dma
->dp_dma
;
1937 sinfo
= &dma
->dp_sglinfo
;
1938 attr
= &hp
->dmai_attr
;
1940 /* convert the sleep flags */
1941 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
1942 dma
->dp_sleep_flags
= kmflag
= KM_SLEEP
;
1944 dma
->dp_sleep_flags
= kmflag
= KM_NOSLEEP
;
1947 hp
->dmai_rflags
= dmareq
->dmar_flags
& DMP_DDIFLAGS
;
1950 * This is useful for debugging a driver. Not as useful in a production
1951 * system. The only time this will fail is if you have a driver bug.
1953 if (rootnex_bind_check_inuse
) {
1955 * No one else should ever have this lock unless someone else
1956 * is trying to use this handle. So contention on the lock
1957 * is the same as inuse being set.
1959 e
= mutex_tryenter(&dma
->dp_mutex
);
1961 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
1962 return (DDI_DMA_INUSE
);
1964 if (dma
->dp_inuse
) {
1965 mutex_exit(&dma
->dp_mutex
);
1966 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
1967 return (DDI_DMA_INUSE
);
1969 dma
->dp_inuse
= B_TRUE
;
1970 mutex_exit(&dma
->dp_mutex
);
1973 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1974 if (rootnex_bind_check_parms
) {
1975 e
= rootnex_valid_bind_parms(dmareq
, attr
);
1976 if (e
!= DDI_SUCCESS
) {
1977 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
1978 rootnex_clean_dmahdl(hp
);
1983 /* save away the original bind info */
1984 dma
->dp_dma
= dmareq
->dmar_object
;
1986 #if defined(__amd64) && !defined(__xpv)
1987 if (IOMMU_USED(rdip
)) {
1988 dmao
= &dma
->dp_dvma
;
1989 e
= iommulib_nexdma_mapobject(dip
, rdip
, handle
, dmareq
, dmao
);
1992 if (sinfo
->si_cancross
||
1993 dmao
->dmao_obj
.dvma_obj
.dv_nseg
!= 1 ||
1994 dmao
->dmao_size
> sinfo
->si_max_cookie_size
) {
1995 dma
->dp_dvma_used
= B_TRUE
;
1998 sinfo
->si_sgl_size
= 1;
1999 hp
->dmai_rflags
|= DMP_NOSYNC
;
2001 dma
->dp_dvma_used
= B_TRUE
;
2002 dma
->dp_need_to_free_cookie
= B_FALSE
;
2004 dvs
= &dmao
->dmao_obj
.dvma_obj
.dv_seg
[0];
2005 cookie
= hp
->dmai_cookie
= dma
->dp_cookies
=
2006 (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2007 cookie
->dmac_laddress
= dvs
->dvs_start
+
2008 dmao
->dmao_obj
.dvma_obj
.dv_off
;
2009 cookie
->dmac_size
= dvs
->dvs_len
;
2010 cookie
->dmac_type
= 0;
2012 ROOTNEX_DPROBE1(rootnex__bind__dvmafast
, dev_info_t
*,
2018 rootnex_clean_dmahdl(hp
);
2025 * Figure out a rough estimate of what maximum number of pages
2026 * this buffer could use (a high estimate of course).
2028 sinfo
->si_max_pages
= mmu_btopr(dma
->dp_dma
.dmao_size
) + 1;
2030 if (dma
->dp_dvma_used
) {
2032 * The number of physical pages is the worst case.
2034 * For DVMA, the worst case is the length divided
2035 * by the maximum cookie length, plus 1. Add to that
2036 * the number of segment boundaries potentially crossed, and
2037 * the additional number of DVMA segments that was returned.
2039 * In the normal case, for modern devices, si_cancross will
2040 * be false, and dv_nseg will be 1, and the fast path will
2041 * have been taken above.
2043 ncookies
= (dma
->dp_dma
.dmao_size
/ sinfo
->si_max_cookie_size
)
2045 if (sinfo
->si_cancross
)
2047 (dma
->dp_dma
.dmao_size
/ attr
->dma_attr_seg
) + 1;
2048 ncookies
+= (dmao
->dmao_obj
.dvma_obj
.dv_nseg
- 1);
2050 sinfo
->si_max_pages
= MIN(sinfo
->si_max_pages
, ncookies
);
2054 * We'll use the pre-allocated cookies for any bind that will *always*
2055 * fit (more important to be consistent, we don't want to create
2056 * additional degenerate cases).
2058 if (sinfo
->si_max_pages
<= rootnex_state
->r_prealloc_cookies
) {
2059 dma
->dp_cookies
= (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2060 dma
->dp_need_to_free_cookie
= B_FALSE
;
2061 ROOTNEX_DPROBE2(rootnex__bind__prealloc
, dev_info_t
*, rdip
,
2062 uint_t
, sinfo
->si_max_pages
);
2065 * For anything larger than that, we'll go ahead and allocate the
2066 * maximum number of pages we expect to see. Hopefuly, we won't be
2067 * seeing this path in the fast path for high performance devices very
2070 * a ddi bind interface that allowed the driver to provide storage to
2071 * the bind interface would speed this case up.
2075 * Save away how much memory we allocated. If we're doing a
2076 * nosleep, the alloc could fail...
2078 dma
->dp_cookie_size
= sinfo
->si_max_pages
*
2079 sizeof (ddi_dma_cookie_t
);
2080 dma
->dp_cookies
= kmem_alloc(dma
->dp_cookie_size
, kmflag
);
2081 if (dma
->dp_cookies
== NULL
) {
2082 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2083 rootnex_clean_dmahdl(hp
);
2084 return (DDI_DMA_NORESOURCES
);
2086 dma
->dp_need_to_free_cookie
= B_TRUE
;
2087 ROOTNEX_DPROBE2(rootnex__bind__alloc
, dev_info_t
*, rdip
,
2088 uint_t
, sinfo
->si_max_pages
);
2090 hp
->dmai_cookie
= dma
->dp_cookies
;
2093 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2094 * looking at the constraints in the dma structure. It will then put
2095 * some additional state about the sgl in the dma struct (i.e. is
2096 * the sgl clean, or do we need to do some munging; how many pages
2097 * need to be copied, etc.)
2099 if (dma
->dp_dvma_used
)
2100 rootnex_dvma_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2102 rootnex_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2105 ASSERT(sinfo
->si_sgl_size
<= sinfo
->si_max_pages
);
2106 /* if we don't need a copy buffer, we don't need to sync */
2107 if (sinfo
->si_copybuf_req
== 0) {
2108 hp
->dmai_rflags
|= DMP_NOSYNC
;
2112 * if we don't need the copybuf and we don't need to do a partial, we
2113 * hit the fast path. All the high performance devices should be trying
2114 * to hit this path. To hit this path, a device should be able to reach
2115 * all of memory, shouldn't try to bind more than it can transfer, and
2116 * the buffer shouldn't require more cookies than the driver/device can
2119 * Note that negative values of dma_attr_sgllen are supposed
2120 * to mean unlimited, but we just cast them to mean a
2121 * "ridiculous large limit". This saves some extra checks on
2124 if ((sinfo
->si_copybuf_req
== 0) &&
2125 (sinfo
->si_sgl_size
<= (unsigned)attr
->dma_attr_sgllen
) &&
2126 (dmao
->dmao_size
< dma
->dp_maxxfer
)) {
2129 * If the driver supports FMA, insert the handle in the FMA DMA
2132 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2133 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2136 * copy out the first cookie and ccountp, set the cookie
2137 * pointer to the second cookie. The first cookie is passed
2138 * back on the stack. Additional cookies are accessed via
2139 * ddi_dma_nextcookie()
2141 *cookiep
= dma
->dp_cookies
[0];
2142 *ccountp
= sinfo
->si_sgl_size
;
2144 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2145 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2146 ROOTNEX_DPROBE4(rootnex__bind__fast
, dev_info_t
*, rdip
,
2147 uint64_t, rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
],
2148 uint_t
, dmao
->dmao_size
, uint_t
, *ccountp
);
2151 return (DDI_DMA_MAPPED
);
2155 * go to the slow path, we may need to alloc more memory, create
2156 * multiple windows, and munge up a sgl to make the device happy.
2160 * With the IOMMU mapobject method used, we should never hit
2161 * the slow path. If we do, something is seriously wrong.
2162 * Clean up and return an error.
2165 #if defined(__amd64) && !defined(__xpv)
2167 if (dma
->dp_dvma_used
) {
2168 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2170 e
= DDI_DMA_NOMAPPING
;
2173 e
= rootnex_bind_slowpath(hp
, dmareq
, dma
, attr
, &dma
->dp_dma
,
2175 #if defined(__amd64) && !defined(__xpv)
2178 if ((e
!= DDI_DMA_MAPPED
) && (e
!= DDI_DMA_PARTIAL_MAP
)) {
2179 if (dma
->dp_need_to_free_cookie
) {
2180 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2182 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2183 rootnex_clean_dmahdl(hp
); /* must be after free cookie */
2188 * If the driver supports FMA, insert the handle in the FMA DMA handle
2191 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2192 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2194 /* if the first window uses the copy buffer, sync it for the device */
2195 if ((dma
->dp_window
[dma
->dp_current_win
].wd_dosync
) &&
2196 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
2197 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2198 DDI_DMA_SYNC_FORDEV
);
2202 * copy out the first cookie and ccountp, set the cookie pointer to the
2203 * second cookie. Make sure the partial flag is set/cleared correctly.
2204 * If we have a partial map (i.e. multiple windows), the number of
2205 * cookies we return is the number of cookies in the first window.
2207 if (e
== DDI_DMA_MAPPED
) {
2208 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2209 *ccountp
= sinfo
->si_sgl_size
;
2212 hp
->dmai_rflags
|= DDI_DMA_PARTIAL
;
2213 *ccountp
= dma
->dp_window
[dma
->dp_current_win
].wd_cookie_cnt
;
2214 ASSERT(hp
->dmai_nwin
<= dma
->dp_max_win
);
2216 *cookiep
= dma
->dp_cookies
[0];
2219 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2220 ROOTNEX_DPROBE4(rootnex__bind__slow
, dev_info_t
*, rdip
, uint64_t,
2221 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
], uint_t
,
2222 dmao
->dmao_size
, uint_t
, *ccountp
);
2227 * rootnex_dma_bindhdl()
2228 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2231 rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2232 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
2233 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
2236 #if defined(__amd64) && !defined(__xpv)
2237 if (IOMMU_USED(rdip
))
2238 ret
= iommulib_nexdma_bindhdl(dip
, rdip
, handle
, dmareq
,
2242 ret
= rootnex_coredma_bindhdl(dip
, rdip
, handle
, dmareq
,
2245 if (ret
== DDI_DMA_NORESOURCES
&& dmareq
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
2246 ddi_set_callback(dmareq
->dmar_fp
, dmareq
->dmar_arg
,
2247 &rootnex_state
->r_dvma_call_list_id
);
2257 rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2258 ddi_dma_handle_t handle
)
2264 hp
= (ddi_dma_impl_t
*)handle
;
2265 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2267 /* make sure the buffer wasn't free'd before calling unbind */
2268 if (rootnex_unbind_verify_buffer
) {
2269 e
= rootnex_verify_buffer(dma
);
2270 if (e
!= DDI_SUCCESS
) {
2272 return (DDI_FAILURE
);
2276 /* sync the current window before unbinding the buffer */
2277 if (dma
->dp_window
&& dma
->dp_window
[dma
->dp_current_win
].wd_dosync
&&
2278 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
2279 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2280 DDI_DMA_SYNC_FORCPU
);
2284 * cleanup and copy buffer or window state. if we didn't use the copy
2285 * buffer or windows, there won't be much to do :-)
2287 rootnex_teardown_copybuf(dma
);
2288 rootnex_teardown_windows(dma
);
2290 #if defined(__amd64) && !defined(__xpv)
2291 if (IOMMU_USED(rdip
))
2292 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2297 * If we had to allocate space to for the worse case sgl (it didn't
2298 * fit into our pre-allocate buffer), free that up now
2300 if (dma
->dp_need_to_free_cookie
) {
2301 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2305 * clean up the handle so it's ready for the next bind (i.e. if the
2306 * handle is reused).
2308 rootnex_clean_dmahdl(hp
);
2309 hp
->dmai_error
.err_cf
= NULL
;
2311 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2312 ROOTNEX_DPROBE1(rootnex__unbind
, uint64_t,
2313 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2315 return (DDI_SUCCESS
);
2319 * rootnex_dma_unbindhdl()
2320 * called from ddi_dma_unbind_handle()
2324 rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2325 ddi_dma_handle_t handle
)
2329 #if defined(__amd64) && !defined(__xpv)
2330 if (IOMMU_USED(rdip
))
2331 ret
= iommulib_nexdma_unbindhdl(dip
, rdip
, handle
);
2334 ret
= rootnex_coredma_unbindhdl(dip
, rdip
, handle
);
2336 if (rootnex_state
->r_dvma_call_list_id
)
2337 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
2342 #if defined(__amd64) && !defined(__xpv)
2345 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
)
2347 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2348 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2350 if (dma
->dp_sleep_flags
!= KM_SLEEP
&&
2351 dma
->dp_sleep_flags
!= KM_NOSLEEP
)
2352 cmn_err(CE_PANIC
, "kmem sleep flags not set in DMA handle");
2353 return (dma
->dp_sleep_flags
);
2357 rootnex_coredma_reset_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2359 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2360 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2361 rootnex_window_t
*window
;
2363 if (dma
->dp_window
) {
2364 window
= &dma
->dp_window
[dma
->dp_current_win
];
2365 hp
->dmai_cookie
= window
->wd_first_cookie
;
2367 hp
->dmai_cookie
= dma
->dp_cookies
;
2374 rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2375 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
)
2379 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2380 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2381 rootnex_window_t
*window
;
2382 ddi_dma_cookie_t
*cp
;
2383 ddi_dma_cookie_t
*cookie
;
2385 ASSERT(*cookiepp
== NULL
);
2386 ASSERT(*ccountp
== 0);
2388 if (dma
->dp_window
) {
2389 window
= &dma
->dp_window
[dma
->dp_current_win
];
2390 cp
= window
->wd_first_cookie
;
2391 *ccountp
= window
->wd_cookie_cnt
;
2393 cp
= dma
->dp_cookies
;
2394 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
2397 km_flags
= rootnex_coredma_get_sleep_flags(handle
);
2398 cookie
= kmem_zalloc(sizeof (ddi_dma_cookie_t
) * (*ccountp
), km_flags
);
2399 if (cookie
== NULL
) {
2400 return (DDI_DMA_NORESOURCES
);
2403 for (i
= 0; i
< *ccountp
; i
++) {
2404 cookie
[i
].dmac_notused
= cp
[i
].dmac_notused
;
2405 cookie
[i
].dmac_type
= cp
[i
].dmac_type
;
2406 cookie
[i
].dmac_address
= cp
[i
].dmac_address
;
2407 cookie
[i
].dmac_size
= cp
[i
].dmac_size
;
2412 return (DDI_SUCCESS
);
2417 rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2418 ddi_dma_cookie_t
*cookiep
, uint_t ccount
)
2420 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2421 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2422 rootnex_window_t
*window
;
2423 ddi_dma_cookie_t
*cur_cookiep
;
2426 ASSERT(ccount
!= 0);
2427 ASSERT(dma
->dp_need_to_switch_cookies
== B_FALSE
);
2429 if (dma
->dp_window
) {
2430 window
= &dma
->dp_window
[dma
->dp_current_win
];
2431 dma
->dp_saved_cookies
= window
->wd_first_cookie
;
2432 window
->wd_first_cookie
= cookiep
;
2433 ASSERT(ccount
== window
->wd_cookie_cnt
);
2434 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2435 + window
->wd_first_cookie
;
2437 dma
->dp_saved_cookies
= dma
->dp_cookies
;
2438 dma
->dp_cookies
= cookiep
;
2439 ASSERT(ccount
== dma
->dp_sglinfo
.si_sgl_size
);
2440 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2444 dma
->dp_need_to_switch_cookies
= B_TRUE
;
2445 hp
->dmai_cookie
= cur_cookiep
;
2447 return (DDI_SUCCESS
);
2452 rootnex_coredma_clear_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2454 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2455 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2456 rootnex_window_t
*window
;
2457 ddi_dma_cookie_t
*cur_cookiep
;
2458 ddi_dma_cookie_t
*cookie_array
;
2461 /* check if cookies have not been switched */
2462 if (dma
->dp_need_to_switch_cookies
== B_FALSE
)
2463 return (DDI_SUCCESS
);
2465 ASSERT(dma
->dp_saved_cookies
);
2467 if (dma
->dp_window
) {
2468 window
= &dma
->dp_window
[dma
->dp_current_win
];
2469 cookie_array
= window
->wd_first_cookie
;
2470 window
->wd_first_cookie
= dma
->dp_saved_cookies
;
2471 dma
->dp_saved_cookies
= NULL
;
2472 ccount
= window
->wd_cookie_cnt
;
2473 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2474 + window
->wd_first_cookie
;
2476 cookie_array
= dma
->dp_cookies
;
2477 dma
->dp_cookies
= dma
->dp_saved_cookies
;
2478 dma
->dp_saved_cookies
= NULL
;
2479 ccount
= dma
->dp_sglinfo
.si_sgl_size
;
2480 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2484 kmem_free(cookie_array
, sizeof (ddi_dma_cookie_t
) * ccount
);
2486 hp
->dmai_cookie
= cur_cookiep
;
2488 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2490 return (DDI_SUCCESS
);
2496 rootnex_get_as(ddi_dma_obj_t
*dmao
)
2500 switch (dmao
->dmao_type
) {
2501 case DMA_OTYP_VADDR
:
2502 case DMA_OTYP_BUFVADDR
:
2503 asp
= dmao
->dmao_obj
.virt_obj
.v_as
;
2515 * rootnex_verify_buffer()
2516 * verify buffer wasn't free'd
2519 rootnex_verify_buffer(rootnex_dma_t
*dma
)
2529 /* Figure out how many pages this buffer occupies */
2530 if (dma
->dp_dma
.dmao_type
== DMA_OTYP_PAGES
) {
2531 poff
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_offset
& MMU_PAGEOFFSET
;
2533 vaddr
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_addr
;
2534 poff
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2536 pcnt
= mmu_btopr(dma
->dp_dma
.dmao_size
+ poff
);
2538 switch (dma
->dp_dma
.dmao_type
) {
2539 case DMA_OTYP_PAGES
:
2541 * for a linked list of pp's walk through them to make sure
2542 * they're locked and not free.
2544 pp
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_pp
;
2545 for (i
= 0; i
< pcnt
; i
++) {
2546 if (PP_ISFREE(pp
) || !PAGE_LOCKED(pp
)) {
2547 return (DDI_FAILURE
);
2553 case DMA_OTYP_VADDR
:
2554 case DMA_OTYP_BUFVADDR
:
2555 pplist
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_priv
;
2557 * for an array of pp's walk through them to make sure they're
2558 * not free. It's possible that they may not be locked.
2561 for (i
= 0; i
< pcnt
; i
++) {
2562 if (PP_ISFREE(pplist
[i
])) {
2563 return (DDI_FAILURE
);
2567 /* For a virtual address, try to peek at each page */
2569 if (rootnex_get_as(&dma
->dp_dma
) == &kas
) {
2570 for (i
= 0; i
< pcnt
; i
++) {
2571 if (ddi_peek8(NULL
, vaddr
, &b
) ==
2573 return (DDI_FAILURE
);
2574 vaddr
+= MMU_PAGESIZE
;
2581 cmn_err(CE_PANIC
, "rootnex_verify_buffer: bad DMA object");
2585 return (DDI_SUCCESS
);
2590 * rootnex_clean_dmahdl()
2591 * Clean the dma handle. This should be called on a handle alloc and an
2592 * unbind handle. Set the handle state to the default settings.
2595 rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
)
2600 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2603 dma
->dp_current_cookie
= 0;
2604 dma
->dp_copybuf_size
= 0;
2605 dma
->dp_window
= NULL
;
2606 dma
->dp_cbaddr
= NULL
;
2607 dma
->dp_inuse
= B_FALSE
;
2608 dma
->dp_dvma_used
= B_FALSE
;
2609 dma
->dp_need_to_free_cookie
= B_FALSE
;
2610 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2611 dma
->dp_saved_cookies
= NULL
;
2612 dma
->dp_sleep_flags
= KM_PANIC
;
2613 dma
->dp_need_to_free_window
= B_FALSE
;
2614 dma
->dp_partial_required
= B_FALSE
;
2615 dma
->dp_trim_required
= B_FALSE
;
2616 dma
->dp_sglinfo
.si_copybuf_req
= 0;
2617 #if !defined(__amd64)
2618 dma
->dp_cb_remaping
= B_FALSE
;
2622 /* FMA related initialization */
2624 hp
->dmai_fault_check
= NULL
;
2625 hp
->dmai_fault_notify
= NULL
;
2626 hp
->dmai_error
.err_ena
= 0;
2627 hp
->dmai_error
.err_status
= DDI_FM_OK
;
2628 hp
->dmai_error
.err_expected
= DDI_FM_ERR_UNEXPECTED
;
2629 hp
->dmai_error
.err_ontrap
= NULL
;
2634 * rootnex_valid_alloc_parms()
2635 * Called in ddi_dma_alloc_handle path to validate its parameters.
2638 rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegmentsize
)
2640 if ((attr
->dma_attr_seg
< MMU_PAGEOFFSET
) ||
2641 (attr
->dma_attr_count_max
< MMU_PAGEOFFSET
) ||
2642 (attr
->dma_attr_granular
> MMU_PAGESIZE
) ||
2643 (attr
->dma_attr_maxxfer
< MMU_PAGESIZE
)) {
2644 return (DDI_DMA_BADATTR
);
2647 if (attr
->dma_attr_addr_hi
<= attr
->dma_attr_addr_lo
) {
2648 return (DDI_DMA_BADATTR
);
2651 if ((attr
->dma_attr_seg
& MMU_PAGEOFFSET
) != MMU_PAGEOFFSET
||
2652 MMU_PAGESIZE
& (attr
->dma_attr_granular
- 1) ||
2653 attr
->dma_attr_sgllen
== 0) {
2654 return (DDI_DMA_BADATTR
);
2657 /* We should be able to DMA into every byte offset in a page */
2658 if (maxsegmentsize
< MMU_PAGESIZE
) {
2659 return (DDI_DMA_BADATTR
);
2662 /* if we're bouncing on seg, seg must be <= addr_hi */
2663 if ((attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
) &&
2664 (attr
->dma_attr_seg
> attr
->dma_attr_addr_hi
)) {
2665 return (DDI_DMA_BADATTR
);
2667 return (DDI_SUCCESS
);
2671 * rootnex_valid_bind_parms()
2672 * Called in ddi_dma_*_bind_handle path to validate its parameters.
2676 rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
, ddi_dma_attr_t
*attr
)
2678 #if !defined(__amd64)
2680 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2681 * we can track the offset for the obsoleted interfaces.
2683 if (dmareq
->dmar_object
.dmao_size
> 0x7FFFFFFF) {
2684 return (DDI_DMA_TOOBIG
);
2688 return (DDI_SUCCESS
);
2693 * rootnex_need_bounce_seg()
2694 * check to see if the buffer lives on both side of the seg.
2697 rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
, rootnex_sglinfo_t
*sglinfo
)
2699 ddi_dma_atyp_t buftype
;
2700 rootnex_addr_t raddr
;
2701 boolean_t lower_addr
;
2702 boolean_t upper_addr
;
2714 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2715 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2716 buftype
= dmar_object
->dmao_type
;
2717 size
= dmar_object
->dmao_size
;
2719 lower_addr
= B_FALSE
;
2720 upper_addr
= B_FALSE
;
2724 * Process the first page to handle the initial offset of the buffer.
2725 * We'll use the base address we get later when we loop through all
2728 if (buftype
== DMA_OTYP_PAGES
) {
2729 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2730 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2732 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2733 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2735 sglinfo
->si_asp
= NULL
;
2736 } else if (pplist
!= NULL
) {
2737 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2738 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2739 if (sglinfo
->si_asp
== NULL
) {
2740 sglinfo
->si_asp
= &kas
;
2742 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2744 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2747 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2748 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2749 if (sglinfo
->si_asp
== NULL
) {
2750 sglinfo
->si_asp
= &kas
;
2752 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2754 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2758 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2760 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2761 upper_addr
= B_TRUE
;
2763 lower_addr
= B_TRUE
;
2768 * Walk through the rest of the pages in the buffer. Track to see
2769 * if we have pages on both sides of the segment boundary.
2772 /* partial or full page */
2773 psize
= MIN(size
, MMU_PAGESIZE
);
2775 if (buftype
== DMA_OTYP_PAGES
) {
2776 /* get the paddr from the page_t */
2777 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2778 paddr
= pfn_to_pa(pp
->p_pagenum
);
2780 } else if (pplist
!= NULL
) {
2781 /* index into the array of page_t's to get the paddr */
2782 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2783 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2786 /* call into the VM to get the paddr */
2787 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
2792 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2794 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2795 upper_addr
= B_TRUE
;
2797 lower_addr
= B_TRUE
;
2800 * if the buffer lives both above and below the segment
2801 * boundary, or the current page is the page immediately
2802 * after the segment, we will use a copy/bounce buffer for
2805 if ((lower_addr
&& upper_addr
) ||
2806 (raddr
== (sglinfo
->si_segmask
+ 1))) {
2818 * Called in bind fastpath to get the sgl. Most of this will be replaced
2819 * with a call to the vm layer when vm2.0 comes around...
2822 rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
2823 rootnex_sglinfo_t
*sglinfo
)
2825 ddi_dma_atyp_t buftype
;
2826 rootnex_addr_t raddr
;
2843 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2844 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2845 maxseg
= sglinfo
->si_max_cookie_size
;
2846 buftype
= dmar_object
->dmao_type
;
2847 addrhi
= sglinfo
->si_max_addr
;
2848 addrlo
= sglinfo
->si_min_addr
;
2849 size
= dmar_object
->dmao_size
;
2856 * check to see if we need to use the copy buffer for pages over
2859 sglinfo
->si_bounce_on_seg
= B_FALSE
;
2860 if (sglinfo
->si_flags
& _DDI_DMA_BOUNCE_ON_SEG
) {
2861 sglinfo
->si_bounce_on_seg
= rootnex_need_bounce_seg(
2862 dmar_object
, sglinfo
);
2866 * if we were passed down a linked list of pages, i.e. pointer to
2867 * page_t, use this to get our physical address and buf offset.
2869 if (buftype
== DMA_OTYP_PAGES
) {
2870 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2871 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2872 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2874 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2875 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2877 sglinfo
->si_asp
= NULL
;
2880 * We weren't passed down a linked list of pages, but if we were passed
2881 * down an array of pages, use this to get our physical address and buf
2884 } else if (pplist
!= NULL
) {
2885 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2886 (buftype
== DMA_OTYP_BUFVADDR
));
2888 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2889 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2890 if (sglinfo
->si_asp
== NULL
) {
2891 sglinfo
->si_asp
= &kas
;
2894 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2895 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2897 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2901 * All we have is a virtual address, we'll need to call into the VM
2902 * to get the physical address.
2905 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2906 (buftype
== DMA_OTYP_BUFVADDR
));
2908 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2909 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2910 if (sglinfo
->si_asp
== NULL
) {
2911 sglinfo
->si_asp
= &kas
;
2914 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2916 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2920 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2923 * Setup the first cookie with the physical address of the page and the
2924 * size of the page (which takes into account the initial offset into
2927 sgl
[cnt
].dmac_laddress
= raddr
;
2928 sgl
[cnt
].dmac_size
= psize
;
2929 sgl
[cnt
].dmac_type
= 0;
2932 * Save away the buffer offset into the page. We'll need this later in
2933 * the copy buffer code to help figure out the page index within the
2934 * buffer and the offset into the current page.
2936 sglinfo
->si_buf_offset
= offset
;
2939 * If we are using the copy buffer for anything over the segment
2940 * boundary, and this page is over the segment boundary.
2942 * if the DMA engine can't reach the physical address.
2944 if (((sglinfo
->si_bounce_on_seg
) &&
2945 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
2946 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
2948 * Increase how much copy buffer we use. We always increase by
2949 * pagesize so we don't have to worry about converting offsets.
2950 * Set a flag in the cookies dmac_type to indicate that it uses
2951 * the copy buffer. If this isn't the last cookie, go to the
2952 * next cookie (since we separate each page which uses the copy
2953 * buffer in case the copy buffer is not physically contiguous.
2955 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
2956 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
2957 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
2959 sgl
[cnt
].dmac_laddress
= 0;
2960 sgl
[cnt
].dmac_size
= 0;
2961 sgl
[cnt
].dmac_type
= 0;
2966 * save this page's physical address so we can figure out if the next
2967 * page is physically contiguous. Keep decrementing size until we are
2968 * done with the buffer.
2970 last_page
= raddr
& MMU_PAGEMASK
;
2974 /* Get the size for this page (i.e. partial or full page) */
2975 psize
= MIN(size
, MMU_PAGESIZE
);
2977 if (buftype
== DMA_OTYP_PAGES
) {
2978 /* get the paddr from the page_t */
2979 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2980 paddr
= pfn_to_pa(pp
->p_pagenum
);
2982 } else if (pplist
!= NULL
) {
2983 /* index into the array of page_t's to get the paddr */
2984 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2985 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2988 /* call into the VM to get the paddr */
2989 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
2994 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2997 * If we are using the copy buffer for anything over the
2998 * segment boundary, and this page is over the segment
3001 * if the DMA engine can't reach the physical address.
3003 if (((sglinfo
->si_bounce_on_seg
) &&
3004 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
3005 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
3007 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3010 * if there is something in the current cookie, go to
3011 * the next one. We only want one page in a cookie which
3012 * uses the copybuf since the copybuf doesn't have to
3013 * be physically contiguous.
3015 if (sgl
[cnt
].dmac_size
!= 0) {
3018 sgl
[cnt
].dmac_laddress
= raddr
;
3019 sgl
[cnt
].dmac_size
= psize
;
3020 #if defined(__amd64)
3021 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3024 * save the buf offset for 32-bit kernel. used in the
3025 * obsoleted interfaces.
3027 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
|
3028 (dmar_object
->dmao_size
- size
);
3030 /* if this isn't the last cookie, go to the next one */
3031 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3033 sgl
[cnt
].dmac_laddress
= 0;
3034 sgl
[cnt
].dmac_size
= 0;
3035 sgl
[cnt
].dmac_type
= 0;
3039 * this page didn't need the copy buffer, if it's not physically
3040 * contiguous, or it would put us over a segment boundary, or it
3041 * puts us over the max cookie size, or the current sgl doesn't
3042 * have anything in it.
3044 } else if (((last_page
+ MMU_PAGESIZE
) != raddr
) ||
3045 !(raddr
& sglinfo
->si_segmask
) ||
3046 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3047 (sgl
[cnt
].dmac_size
== 0)) {
3049 * if we're not already in a new cookie, go to the next
3052 if (sgl
[cnt
].dmac_size
!= 0) {
3056 /* save the cookie information */
3057 sgl
[cnt
].dmac_laddress
= raddr
;
3058 sgl
[cnt
].dmac_size
= psize
;
3059 #if defined(__amd64)
3060 sgl
[cnt
].dmac_type
= 0;
3063 * save the buf offset for 32-bit kernel. used in the
3064 * obsoleted interfaces.
3066 sgl
[cnt
].dmac_type
= dmar_object
->dmao_size
- size
;
3070 * this page didn't need the copy buffer, it is physically
3071 * contiguous with the last page, and it's <= the max cookie
3075 sgl
[cnt
].dmac_size
+= psize
;
3078 * if this exactly == the maximum cookie size, and
3079 * it isn't the last cookie, go to the next cookie.
3081 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3082 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3084 sgl
[cnt
].dmac_laddress
= 0;
3085 sgl
[cnt
].dmac_size
= 0;
3086 sgl
[cnt
].dmac_type
= 0;
3091 * save this page's physical address so we can figure out if the
3092 * next page is physically contiguous. Keep decrementing size
3093 * until we are done with the buffer.
3099 /* we're done, save away how many cookies the sgl has */
3100 if (sgl
[cnt
].dmac_size
== 0) {
3101 ASSERT(cnt
< sglinfo
->si_max_pages
);
3102 sglinfo
->si_sgl_size
= cnt
;
3104 sglinfo
->si_sgl_size
= cnt
+ 1;
3109 rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
3110 rootnex_sglinfo_t
*sglinfo
)
3115 struct dvmaseg
*dvs
;
3117 uint32_t psize
, ssize
;
3122 ASSERT(dmar_object
->dmao_type
== DMA_OTYP_DVADDR
);
3125 maxseg
= sglinfo
->si_max_cookie_size
;
3126 size
= dmar_object
->dmao_size
;
3129 sglinfo
->si_bounce_on_seg
= B_FALSE
;
3131 dvs
= dmar_object
->dmao_obj
.dvma_obj
.dv_seg
;
3132 offset
= dmar_object
->dmao_obj
.dvma_obj
.dv_off
;
3133 ssize
= dvs
->dvs_len
;
3134 paddr
= dvs
->dvs_start
;
3136 psize
= MIN(ssize
, (maxseg
- offset
));
3137 dvaddr
= paddr
+ psize
;
3140 sgl
[cnt
].dmac_laddress
= paddr
;
3141 sgl
[cnt
].dmac_size
= psize
;
3142 sgl
[cnt
].dmac_type
= 0;
3148 ssize
= dvs
->dvs_len
;
3149 dvaddr
= dvs
->dvs_start
;
3155 psize
= MIN(ssize
, maxseg
);
3159 if (!physcontig
|| !(paddr
& sglinfo
->si_segmask
) ||
3160 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3161 (sgl
[cnt
].dmac_size
== 0)) {
3163 * if we're not already in a new cookie, go to the next
3166 if (sgl
[cnt
].dmac_size
!= 0) {
3170 /* save the cookie information */
3171 sgl
[cnt
].dmac_laddress
= paddr
;
3172 sgl
[cnt
].dmac_size
= psize
;
3173 sgl
[cnt
].dmac_type
= 0;
3175 sgl
[cnt
].dmac_size
+= psize
;
3178 * if this exactly == the maximum cookie size, and
3179 * it isn't the last cookie, go to the next cookie.
3181 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3182 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3184 sgl
[cnt
].dmac_laddress
= 0;
3185 sgl
[cnt
].dmac_size
= 0;
3186 sgl
[cnt
].dmac_type
= 0;
3192 /* we're done, save away how many cookies the sgl has */
3193 if (sgl
[cnt
].dmac_size
== 0) {
3194 sglinfo
->si_sgl_size
= cnt
;
3196 sglinfo
->si_sgl_size
= cnt
+ 1;
3201 * rootnex_bind_slowpath()
3202 * Call in the bind path if the calling driver can't use the sgl without
3203 * modifying it. We either need to use the copy buffer and/or we will end up
3204 * with a partial bind.
3207 rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3208 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3210 rootnex_sglinfo_t
*sinfo
;
3211 rootnex_window_t
*window
;
3212 ddi_dma_cookie_t
*cookie
;
3213 size_t copybuf_used
;
3223 sinfo
= &dma
->dp_sglinfo
;
3228 * If we're using the copybuf, set the copybuf state in dma struct.
3229 * Needs to be first since it sets the copy buffer size.
3231 if (sinfo
->si_copybuf_req
!= 0) {
3232 e
= rootnex_setup_copybuf(hp
, dmareq
, dma
, attr
);
3233 if (e
!= DDI_SUCCESS
) {
3237 dma
->dp_copybuf_size
= 0;
3241 * Figure out if we need to do a partial mapping. If so, figure out
3242 * if we need to trim the buffers when we munge the sgl.
3244 if ((dma
->dp_copybuf_size
< sinfo
->si_copybuf_req
) ||
3245 (dmao
->dmao_size
> dma
->dp_maxxfer
) ||
3246 ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
)) {
3247 dma
->dp_partial_required
= B_TRUE
;
3248 if (attr
->dma_attr_granular
!= 1) {
3249 dma
->dp_trim_required
= B_TRUE
;
3252 dma
->dp_partial_required
= B_FALSE
;
3253 dma
->dp_trim_required
= B_FALSE
;
3256 /* If we need to do a partial bind, make sure the driver supports it */
3257 if (dma
->dp_partial_required
&&
3258 !(dmareq
->dmar_flags
& DDI_DMA_PARTIAL
)) {
3260 mnum
= ddi_driver_major(dma
->dp_dip
);
3262 * patchable which allows us to print one warning per major
3265 if ((rootnex_bind_warn
) &&
3266 ((rootnex_warn_list
[mnum
] & ROOTNEX_BIND_WARNING
) == 0)) {
3267 rootnex_warn_list
[mnum
] |= ROOTNEX_BIND_WARNING
;
3268 cmn_err(CE_WARN
, "!%s: coding error detected, the "
3269 "driver is using ddi_dma_attr(9S) incorrectly. "
3270 "There is a small risk of data corruption in "
3271 "particular with large I/Os. The driver should be "
3272 "replaced with a corrected version for proper "
3273 "system operation. To disable this warning, add "
3274 "'set rootnex:rootnex_bind_warn=0' to "
3275 "/etc/system(4).", ddi_driver_name(dma
->dp_dip
));
3277 return (DDI_DMA_TOOBIG
);
3281 * we might need multiple windows, setup state to handle them. In this
3282 * code path, we will have at least one window.
3284 e
= rootnex_setup_windows(hp
, dma
, attr
, dmao
, kmflag
);
3285 if (e
!= DDI_SUCCESS
) {
3286 rootnex_teardown_copybuf(dma
);
3290 window
= &dma
->dp_window
[0];
3291 cookie
= &dma
->dp_cookies
[0];
3293 rootnex_init_win(hp
, dma
, window
, cookie
, cur_offset
);
3294 if (dmao
->dmao_type
== DMA_OTYP_PAGES
) {
3295 cur_pp
= dmareq
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3298 /* loop though all the cookies we got back from get_sgl() */
3299 for (i
= 0; i
< sinfo
->si_sgl_size
; i
++) {
3301 * If we're using the copy buffer, check this cookie and setup
3302 * its associated copy buffer state. If this cookie uses the
3303 * copy buffer, make sure we sync this window during dma_sync.
3305 if (dma
->dp_copybuf_size
> 0) {
3306 rootnex_setup_cookie(dmao
, dma
, cookie
,
3307 cur_offset
, ©buf_used
, &cur_pp
);
3308 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3309 window
->wd_dosync
= B_TRUE
;
3314 * save away the cookie size, since it could be modified in
3315 * the windowing code.
3317 dmac_size
= cookie
->dmac_size
;
3319 /* if we went over max copybuf size */
3320 if (dma
->dp_copybuf_size
&&
3321 (copybuf_used
> dma
->dp_copybuf_size
)) {
3323 e
= rootnex_copybuf_window_boundary(hp
, dma
, &window
,
3324 cookie
, cur_offset
, ©buf_used
);
3325 if (e
!= DDI_SUCCESS
) {
3326 rootnex_teardown_copybuf(dma
);
3327 rootnex_teardown_windows(dma
);
3332 * if the coookie uses the copy buffer, make sure the
3333 * new window we just moved to is set to sync.
3335 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3336 window
->wd_dosync
= B_TRUE
;
3338 ROOTNEX_DPROBE1(rootnex__copybuf__window
, dev_info_t
*,
3341 /* if the cookie cnt == max sgllen, move to the next window */
3342 } else if (window
->wd_cookie_cnt
>=
3343 (unsigned)attr
->dma_attr_sgllen
) {
3345 ASSERT(window
->wd_cookie_cnt
== attr
->dma_attr_sgllen
);
3346 e
= rootnex_sgllen_window_boundary(hp
, dma
, &window
,
3347 cookie
, attr
, cur_offset
);
3348 if (e
!= DDI_SUCCESS
) {
3349 rootnex_teardown_copybuf(dma
);
3350 rootnex_teardown_windows(dma
);
3355 * if the coookie uses the copy buffer, make sure the
3356 * new window we just moved to is set to sync.
3358 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3359 window
->wd_dosync
= B_TRUE
;
3361 ROOTNEX_DPROBE1(rootnex__sgllen__window
, dev_info_t
*,
3364 /* else if we will be over maxxfer */
3365 } else if ((window
->wd_size
+ dmac_size
) >
3368 e
= rootnex_maxxfer_window_boundary(hp
, dma
, &window
,
3370 if (e
!= DDI_SUCCESS
) {
3371 rootnex_teardown_copybuf(dma
);
3372 rootnex_teardown_windows(dma
);
3377 * if the coookie uses the copy buffer, make sure the
3378 * new window we just moved to is set to sync.
3380 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3381 window
->wd_dosync
= B_TRUE
;
3383 ROOTNEX_DPROBE1(rootnex__maxxfer__window
, dev_info_t
*,
3386 /* else this cookie fits in the current window */
3388 window
->wd_cookie_cnt
++;
3389 window
->wd_size
+= dmac_size
;
3392 /* track our offset into the buffer, go to the next cookie */
3393 ASSERT(dmac_size
<= dmao
->dmao_size
);
3394 ASSERT(cookie
->dmac_size
<= dmac_size
);
3395 cur_offset
+= dmac_size
;
3399 /* if we ended up with a zero sized window in the end, clean it up */
3400 if (window
->wd_size
== 0) {
3405 ASSERT(window
->wd_trim
.tr_trim_last
== B_FALSE
);
3408 return (DDI_DMA_MAPPED
);
3411 ASSERT(dma
->dp_partial_required
);
3412 return (DDI_DMA_PARTIAL_MAP
);
3416 * rootnex_setup_copybuf()
3417 * Called in bind slowpath. Figures out if we're going to use the copy
3418 * buffer, and if we do, sets up the basic state to handle it.
3421 rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3422 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
)
3424 rootnex_sglinfo_t
*sinfo
;
3425 ddi_dma_attr_t lattr
;
3429 #if !defined(__amd64)
3433 ASSERT(!dma
->dp_dvma_used
);
3435 sinfo
= &dma
->dp_sglinfo
;
3437 /* read this first so it's consistent through the routine */
3438 max_copybuf
= i_ddi_copybuf_size() & MMU_PAGEMASK
;
3440 /* We need to call into the rootnex on ddi_dma_sync() */
3441 hp
->dmai_rflags
&= ~DMP_NOSYNC
;
3443 /* make sure the copybuf size <= the max size */
3444 dma
->dp_copybuf_size
= MIN(sinfo
->si_copybuf_req
, max_copybuf
);
3445 ASSERT((dma
->dp_copybuf_size
& MMU_PAGEOFFSET
) == 0);
3447 #if !defined(__amd64)
3449 * if we don't have kva space to copy to/from, allocate the KVA space
3450 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3451 * the 64-bit kernel.
3453 if ((dmareq
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) ||
3454 (dmareq
->dmar_object
.dmao_obj
.virt_obj
.v_as
!= NULL
)) {
3456 /* convert the sleep flags */
3457 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3460 vmflag
= VM_NOSLEEP
;
3463 /* allocate Kernel VA space that we can bcopy to/from */
3464 dma
->dp_kva
= vmem_alloc(heap_arena
, dma
->dp_copybuf_size
,
3466 if (dma
->dp_kva
== NULL
) {
3467 return (DDI_DMA_NORESOURCES
);
3472 /* convert the sleep flags */
3473 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3480 * Allocate the actual copy buffer. This needs to fit within the DMA
3481 * engine limits, so we can't use kmem_alloc... We don't need
3482 * contiguous memory (sgllen) since we will be forcing windows on
3486 lattr
.dma_attr_align
= MMU_PAGESIZE
;
3487 lattr
.dma_attr_sgllen
= -1; /* no limit */
3489 * if we're using the copy buffer because of seg, use that for our
3490 * upper address limit.
3492 if (sinfo
->si_bounce_on_seg
) {
3493 lattr
.dma_attr_addr_hi
= lattr
.dma_attr_seg
;
3495 e
= i_ddi_mem_alloc(dma
->dp_dip
, &lattr
, dma
->dp_copybuf_size
, cansleep
,
3496 0, NULL
, &dma
->dp_cbaddr
, &dma
->dp_cbsize
, NULL
);
3497 if (e
!= DDI_SUCCESS
) {
3498 #if !defined(__amd64)
3499 if (dma
->dp_kva
!= NULL
) {
3500 vmem_free(heap_arena
, dma
->dp_kva
,
3501 dma
->dp_copybuf_size
);
3504 return (DDI_DMA_NORESOURCES
);
3507 ROOTNEX_DPROBE2(rootnex__alloc__copybuf
, dev_info_t
*, dma
->dp_dip
,
3508 size_t, dma
->dp_copybuf_size
);
3510 return (DDI_SUCCESS
);
3515 * rootnex_setup_windows()
3516 * Called in bind slowpath to setup the window state. We always have windows
3517 * in the slowpath. Even if the window count = 1.
3520 rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3521 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3523 rootnex_window_t
*windowp
;
3524 rootnex_sglinfo_t
*sinfo
;
3525 size_t copy_state_size
;
3526 size_t win_state_size
;
3527 size_t state_available
;
3528 size_t space_needed
;
3535 sinfo
= &dma
->dp_sglinfo
;
3537 dma
->dp_current_win
= 0;
3540 /* If we don't need to do a partial, we only have one window */
3541 if (!dma
->dp_partial_required
) {
3542 dma
->dp_max_win
= 1;
3545 * we need multiple windows, need to figure out the worse case number
3550 * if we need windows because we need more copy buffer that
3551 * we allow, the worse case number of windows we could need
3552 * here would be (copybuf space required / copybuf space that
3553 * we have) plus one for remainder, and plus 2 to handle the
3554 * extra pages on the trim for the first and last pages of the
3555 * buffer (a page is the minimum window size so under the right
3556 * attr settings, you could have a window for each page).
3557 * The last page will only be hit here if the size is not a
3558 * multiple of the granularity (which theoretically shouldn't
3559 * be the case but never has been enforced, so we could have
3560 * broken things without it).
3562 if (sinfo
->si_copybuf_req
> dma
->dp_copybuf_size
) {
3563 ASSERT(dma
->dp_copybuf_size
> 0);
3564 copybuf_win
= (sinfo
->si_copybuf_req
/
3565 dma
->dp_copybuf_size
) + 1 + 2;
3571 * if we need windows because we have more cookies than the H/W
3572 * can handle, the number of windows we would need here would
3573 * be (cookie count / cookies count H/W supports minus 1[for
3574 * trim]) plus one for remainder.
3576 if ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
) {
3577 sglwin
= (sinfo
->si_sgl_size
/
3578 (attr
->dma_attr_sgllen
- 1)) + 1;
3584 * if we need windows because we're binding more memory than the
3585 * H/W can transfer at once, the number of windows we would need
3586 * here would be (xfer count / max xfer H/W supports) plus one
3587 * for remainder, and plus 2 to handle the extra pages on the
3588 * trim (see above comment about trim)
3590 if (dmao
->dmao_size
> dma
->dp_maxxfer
) {
3591 maxxfer_win
= (dmao
->dmao_size
/
3592 dma
->dp_maxxfer
) + 1 + 2;
3596 dma
->dp_max_win
= copybuf_win
+ sglwin
+ maxxfer_win
;
3597 ASSERT(dma
->dp_max_win
> 0);
3599 win_state_size
= dma
->dp_max_win
* sizeof (rootnex_window_t
);
3602 * Get space for window and potential copy buffer state. Before we
3603 * go and allocate memory, see if we can get away with using what's
3604 * left in the pre-allocted state or the dynamically allocated sgl.
3606 space_used
= (uintptr_t)(sinfo
->si_sgl_size
*
3607 sizeof (ddi_dma_cookie_t
));
3609 /* if we dynamically allocated space for the cookies */
3610 if (dma
->dp_need_to_free_cookie
) {
3611 /* if we have more space in the pre-allocted buffer, use it */
3612 ASSERT(space_used
<= dma
->dp_cookie_size
);
3613 if ((dma
->dp_cookie_size
- space_used
) <=
3614 rootnex_state
->r_prealloc_size
) {
3615 state_available
= rootnex_state
->r_prealloc_size
;
3616 windowp
= (rootnex_window_t
*)dma
->dp_prealloc_buffer
;
3619 * else, we have more free space in the dynamically allocated
3620 * buffer, i.e. the buffer wasn't worse case fragmented so we
3621 * didn't need a lot of cookies.
3624 state_available
= dma
->dp_cookie_size
- space_used
;
3625 windowp
= (rootnex_window_t
*)
3626 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3629 /* we used the pre-alloced buffer */
3631 ASSERT(space_used
<= rootnex_state
->r_prealloc_size
);
3632 state_available
= rootnex_state
->r_prealloc_size
- space_used
;
3633 windowp
= (rootnex_window_t
*)
3634 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3638 * figure out how much state we need to track the copy buffer. Add an
3639 * addition 8 bytes for pointer alignemnt later.
3641 if (dma
->dp_copybuf_size
> 0) {
3642 copy_state_size
= sinfo
->si_max_pages
*
3643 sizeof (rootnex_pgmap_t
);
3645 copy_state_size
= 0;
3647 /* add an additional 8 bytes for pointer alignment */
3648 space_needed
= win_state_size
+ copy_state_size
+ 0x8;
3650 /* if we have enough space already, use it */
3651 if (state_available
>= space_needed
) {
3652 dma
->dp_window
= windowp
;
3653 dma
->dp_need_to_free_window
= B_FALSE
;
3655 /* not enough space, need to allocate more. */
3657 dma
->dp_window
= kmem_alloc(space_needed
, kmflag
);
3658 if (dma
->dp_window
== NULL
) {
3659 return (DDI_DMA_NORESOURCES
);
3661 dma
->dp_need_to_free_window
= B_TRUE
;
3662 dma
->dp_window_size
= space_needed
;
3663 ROOTNEX_DPROBE2(rootnex__bind__sp__alloc
, dev_info_t
*,
3664 dma
->dp_dip
, size_t, space_needed
);
3668 * we allocate copy buffer state and window state at the same time.
3669 * setup our copy buffer state pointers. Make sure it's aligned.
3671 if (dma
->dp_copybuf_size
> 0) {
3672 dma
->dp_pgmap
= (rootnex_pgmap_t
*)(((uintptr_t)
3673 &dma
->dp_window
[dma
->dp_max_win
] + 0x7) & ~0x7);
3675 #if !defined(__amd64)
3677 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3678 * false/NULL. Should be quicker to bzero vs loop and set.
3680 bzero(dma
->dp_pgmap
, copy_state_size
);
3683 dma
->dp_pgmap
= NULL
;
3686 return (DDI_SUCCESS
);
3691 * rootnex_teardown_copybuf()
3692 * cleans up after rootnex_setup_copybuf()
3695 rootnex_teardown_copybuf(rootnex_dma_t
*dma
)
3697 #if !defined(__amd64)
3701 * if we allocated kernel heap VMEM space, go through all the pages and
3702 * map out any of the ones that we're mapped into the kernel heap VMEM
3703 * arena. Then free the VMEM space.
3705 if (dma
->dp_kva
!= NULL
) {
3706 for (i
= 0; i
< dma
->dp_sglinfo
.si_max_pages
; i
++) {
3707 if (dma
->dp_pgmap
[i
].pm_mapped
) {
3708 hat_unload(kas
.a_hat
, dma
->dp_pgmap
[i
].pm_kaddr
,
3709 MMU_PAGESIZE
, HAT_UNLOAD
);
3710 dma
->dp_pgmap
[i
].pm_mapped
= B_FALSE
;
3714 vmem_free(heap_arena
, dma
->dp_kva
, dma
->dp_copybuf_size
);
3719 /* if we allocated a copy buffer, free it */
3720 if (dma
->dp_cbaddr
!= NULL
) {
3721 i_ddi_mem_free(dma
->dp_cbaddr
, NULL
);
3727 * rootnex_teardown_windows()
3728 * cleans up after rootnex_setup_windows()
3731 rootnex_teardown_windows(rootnex_dma_t
*dma
)
3734 * if we had to allocate window state on the last bind (because we
3735 * didn't have enough pre-allocated space in the handle), free it.
3737 if (dma
->dp_need_to_free_window
) {
3738 kmem_free(dma
->dp_window
, dma
->dp_window_size
);
3744 * rootnex_init_win()
3745 * Called in bind slow path during creation of a new window. Initializes
3746 * window state to default values.
3750 rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3751 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
)
3754 window
->wd_dosync
= B_FALSE
;
3755 window
->wd_offset
= cur_offset
;
3756 window
->wd_size
= 0;
3757 window
->wd_first_cookie
= cookie
;
3758 window
->wd_cookie_cnt
= 0;
3759 window
->wd_trim
.tr_trim_first
= B_FALSE
;
3760 window
->wd_trim
.tr_trim_last
= B_FALSE
;
3761 window
->wd_trim
.tr_first_copybuf_win
= B_FALSE
;
3762 window
->wd_trim
.tr_last_copybuf_win
= B_FALSE
;
3763 #if !defined(__amd64)
3764 window
->wd_remap_copybuf
= dma
->dp_cb_remaping
;
3770 * rootnex_setup_cookie()
3771 * Called in the bind slow path when the sgl uses the copy buffer. If any of
3772 * the sgl uses the copy buffer, we need to go through each cookie, figure
3773 * out if it uses the copy buffer, and if it does, save away everything we'll
3777 rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
, rootnex_dma_t
*dma
,
3778 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
,
3781 boolean_t copybuf_sz_power_2
;
3782 rootnex_sglinfo_t
*sinfo
;
3787 #if defined(__amd64)
3793 ASSERT(dmar_object
->dmao_type
!= DMA_OTYP_DVADDR
);
3795 sinfo
= &dma
->dp_sglinfo
;
3798 * Calculate the page index relative to the start of the buffer. The
3799 * index to the current page for our buffer is the offset into the
3800 * first page of the buffer plus our current offset into the buffer
3801 * itself, shifted of course...
3803 pidx
= (sinfo
->si_buf_offset
+ cur_offset
) >> MMU_PAGESHIFT
;
3804 ASSERT(pidx
< sinfo
->si_max_pages
);
3806 /* if this cookie uses the copy buffer */
3807 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3809 * NOTE: we know that since this cookie uses the copy buffer, it
3810 * is <= MMU_PAGESIZE.
3814 * get the offset into the page. For the 64-bit kernel, get the
3815 * pfn which we'll use with seg kpm.
3817 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
3818 #if defined(__amd64)
3819 /* mfn_to_pfn() is a NOP on i86pc */
3820 pfn
= mfn_to_pfn(cookie
->dmac_laddress
>> MMU_PAGESHIFT
);
3821 #endif /* __amd64 */
3823 /* figure out if the copybuf size is a power of 2 */
3824 if (!ISP2(dma
->dp_copybuf_size
)) {
3825 copybuf_sz_power_2
= B_FALSE
;
3827 copybuf_sz_power_2
= B_TRUE
;
3830 /* This page uses the copy buffer */
3831 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_TRUE
;
3834 * save the copy buffer KVA that we'll use with this page.
3835 * if we still fit within the copybuf, it's a simple add.
3836 * otherwise, we need to wrap over using & or % accordingly.
3838 if ((*copybuf_used
+ MMU_PAGESIZE
) <= dma
->dp_copybuf_size
) {
3839 dma
->dp_pgmap
[pidx
].pm_cbaddr
= dma
->dp_cbaddr
+
3842 if (copybuf_sz_power_2
) {
3843 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3844 (uintptr_t)dma
->dp_cbaddr
+
3846 (dma
->dp_copybuf_size
- 1)));
3848 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3849 (uintptr_t)dma
->dp_cbaddr
+
3850 (*copybuf_used
% dma
->dp_copybuf_size
));
3855 * over write the cookie physical address with the address of
3856 * the physical address of the copy buffer page that we will
3859 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
3860 dma
->dp_pgmap
[pidx
].pm_cbaddr
)) + poff
;
3862 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3864 /* if we have a kernel VA, it's easy, just save that address */
3865 if ((dmar_object
->dmao_type
!= DMA_OTYP_PAGES
) &&
3866 (sinfo
->si_asp
== &kas
)) {
3868 * save away the page aligned virtual address of the
3869 * driver buffer. Offsets are handled in the sync code.
3871 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)(((uintptr_t)
3872 dmar_object
->dmao_obj
.virt_obj
.v_addr
+ cur_offset
)
3874 #if !defined(__amd64)
3876 * we didn't need to, and will never need to map this
3879 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
3882 /* we don't have a kernel VA. We need one for the bcopy. */
3884 #if defined(__amd64)
3886 * for the 64-bit kernel, it's easy. We use seg kpm to
3887 * get a Kernel VA for the corresponding pfn.
3889 dma
->dp_pgmap
[pidx
].pm_kaddr
= hat_kpm_pfn2va(pfn
);
3892 * for the 32-bit kernel, this is a pain. First we'll
3893 * save away the page_t or user VA for this page. This
3894 * is needed in rootnex_dma_win() when we switch to a
3895 * new window which requires us to re-map the copy
3898 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
3899 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
3900 dma
->dp_pgmap
[pidx
].pm_pp
= *cur_pp
;
3901 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3902 } else if (pplist
!= NULL
) {
3903 dma
->dp_pgmap
[pidx
].pm_pp
= pplist
[pidx
];
3904 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3906 dma
->dp_pgmap
[pidx
].pm_pp
= NULL
;
3907 dma
->dp_pgmap
[pidx
].pm_vaddr
= (caddr_t
)
3909 dmar_object
->dmao_obj
.virt_obj
.v_addr
+
3910 cur_offset
) & MMU_PAGEMASK
);
3914 * save away the page aligned virtual address which was
3915 * allocated from the kernel heap arena (taking into
3916 * account if we need more copy buffer than we alloced
3917 * and use multiple windows to handle this, i.e. &,%).
3918 * NOTE: there isn't and physical memory backing up this
3919 * virtual address space currently.
3921 if ((*copybuf_used
+ MMU_PAGESIZE
) <=
3922 dma
->dp_copybuf_size
) {
3923 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3924 (((uintptr_t)dma
->dp_kva
+ *copybuf_used
) &
3927 if (copybuf_sz_power_2
) {
3928 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3929 (((uintptr_t)dma
->dp_kva
+
3931 (dma
->dp_copybuf_size
- 1))) &
3934 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3935 (((uintptr_t)dma
->dp_kva
+
3937 dma
->dp_copybuf_size
)) &
3943 * if we haven't used up the available copy buffer yet,
3944 * map the kva to the physical page.
3946 if (!dma
->dp_cb_remaping
&& ((*copybuf_used
+
3947 MMU_PAGESIZE
) <= dma
->dp_copybuf_size
)) {
3948 dma
->dp_pgmap
[pidx
].pm_mapped
= B_TRUE
;
3949 if (dma
->dp_pgmap
[pidx
].pm_pp
!= NULL
) {
3950 i86_pp_map(dma
->dp_pgmap
[pidx
].pm_pp
,
3951 dma
->dp_pgmap
[pidx
].pm_kaddr
);
3953 i86_va_map(dma
->dp_pgmap
[pidx
].pm_vaddr
,
3955 dma
->dp_pgmap
[pidx
].pm_kaddr
);
3959 * we've used up the available copy buffer, this page
3960 * will have to be mapped during rootnex_dma_win() when
3961 * we switch to a new window which requires a re-map
3962 * the copy buffer. (32-bit kernel only)
3965 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
3968 /* go to the next page_t */
3969 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
3970 *cur_pp
= (*cur_pp
)->p_next
;
3974 /* add to the copy buffer count */
3975 *copybuf_used
+= MMU_PAGESIZE
;
3978 * This cookie doesn't use the copy buffer. Walk through the pages this
3979 * cookie occupies to reflect this.
3983 * figure out how many pages the cookie occupies. We need to
3984 * use the original page offset of the buffer and the cookies
3985 * offset in the buffer to do this.
3987 poff
= (sinfo
->si_buf_offset
+ cur_offset
) & MMU_PAGEOFFSET
;
3988 pcnt
= mmu_btopr(cookie
->dmac_size
+ poff
);
3991 #if !defined(__amd64)
3993 * the 32-bit kernel doesn't have seg kpm, so we need
3994 * to map in the driver buffer (if it didn't come down
3995 * with a kernel VA) on the fly. Since this page doesn't
3996 * use the copy buffer, it's not, or will it ever, have
3999 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4001 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_FALSE
;
4004 * we need to update pidx and cur_pp or we'll loose
4005 * track of where we are.
4007 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4008 *cur_pp
= (*cur_pp
)->p_next
;
4018 * rootnex_sgllen_window_boundary()
4019 * Called in the bind slow path when the next cookie causes us to exceed (in
4020 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4021 * length supported by the DMA H/W.
4024 rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4025 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, ddi_dma_attr_t
*attr
,
4034 * if we know we'll never have to trim, it's pretty easy. Just move to
4035 * the next window and init it. We're done.
4037 if (!dma
->dp_trim_required
) {
4039 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4040 (*windowp
)->wd_cookie_cnt
++;
4041 (*windowp
)->wd_size
= cookie
->dmac_size
;
4042 return (DDI_SUCCESS
);
4045 /* figure out how much we need to trim from the window */
4046 ASSERT(attr
->dma_attr_granular
!= 0);
4047 if (dma
->dp_granularity_power_2
) {
4048 trim_sz
= (*windowp
)->wd_size
& (attr
->dma_attr_granular
- 1);
4050 trim_sz
= (*windowp
)->wd_size
% attr
->dma_attr_granular
;
4053 /* The window's a whole multiple of granularity. We're done */
4056 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4057 (*windowp
)->wd_cookie_cnt
++;
4058 (*windowp
)->wd_size
= cookie
->dmac_size
;
4059 return (DDI_SUCCESS
);
4063 * The window's not a whole multiple of granularity, since we know this
4064 * is due to the sgllen, we need to go back to the last cookie and trim
4065 * that one, add the left over part of the old cookie into the new
4066 * window, and then add in the new cookie into the new window.
4070 * make sure the driver isn't making us do something bad... Trimming and
4071 * sgllen == 1 don't go together.
4073 if (attr
->dma_attr_sgllen
== 1) {
4074 return (DDI_DMA_NOMAPPING
);
4078 * first, setup the current window to account for the trim. Need to go
4079 * back to the last cookie for this.
4082 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4083 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4084 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4085 ASSERT(cookie
->dmac_size
> trim_sz
);
4086 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4087 (*windowp
)->wd_size
-= trim_sz
;
4089 /* save the buffer offsets for the next window */
4090 coffset
= cookie
->dmac_size
- trim_sz
;
4091 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4094 * set this now in case this is the first window. all other cases are
4097 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4100 * initialize the next window using what's left over in the previous
4104 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4105 (*windowp
)->wd_cookie_cnt
++;
4106 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4107 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4108 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4109 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4110 (*windowp
)->wd_dosync
= B_TRUE
;
4114 * now go back to the current cookie and add it to the new window. set
4115 * the new window size to the what was left over from the previous
4116 * cookie and what's in the current cookie.
4119 (*windowp
)->wd_cookie_cnt
++;
4120 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4123 * trim plus the next cookie could put us over maxxfer (a cookie can be
4124 * a max size of maxxfer). Handle that case.
4126 if ((*windowp
)->wd_size
> dma
->dp_maxxfer
) {
4128 * maxxfer is already a whole multiple of granularity, and this
4129 * trim will be <= the previous trim (since a cookie can't be
4130 * larger than maxxfer). Make things simple here.
4132 trim_sz
= (*windowp
)->wd_size
- dma
->dp_maxxfer
;
4133 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4134 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4135 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4136 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4137 (*windowp
)->wd_size
-= trim_sz
;
4138 ASSERT((*windowp
)->wd_size
== dma
->dp_maxxfer
);
4140 /* save the buffer offsets for the next window */
4141 coffset
= cookie
->dmac_size
- trim_sz
;
4142 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4144 /* setup the next window */
4146 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4147 (*windowp
)->wd_cookie_cnt
++;
4148 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4149 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4151 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4154 return (DDI_SUCCESS
);
4159 * rootnex_copybuf_window_boundary()
4160 * Called in bind slowpath when we get to a window boundary because we used
4161 * up all the copy buffer that we have.
4164 rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4165 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
4166 size_t *copybuf_used
)
4168 rootnex_sglinfo_t
*sinfo
;
4177 sinfo
= &dma
->dp_sglinfo
;
4180 * the copy buffer should be a whole multiple of page size. We know that
4181 * this cookie is <= MMU_PAGESIZE.
4183 ASSERT(cookie
->dmac_size
<= MMU_PAGESIZE
);
4186 * from now on, all new windows in this bind need to be re-mapped during
4187 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4190 #if !defined(__amd64)
4191 dma
->dp_cb_remaping
= B_TRUE
;
4194 /* reset copybuf used */
4198 * if we don't have to trim (since granularity is set to 1), go to the
4199 * next window and add the current cookie to it. We know the current
4200 * cookie uses the copy buffer since we're in this code path.
4202 if (!dma
->dp_trim_required
) {
4204 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4206 /* Add this cookie to the new window */
4207 (*windowp
)->wd_cookie_cnt
++;
4208 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4209 *copybuf_used
+= MMU_PAGESIZE
;
4210 return (DDI_SUCCESS
);
4214 * *** may need to trim, figure it out.
4217 /* figure out how much we need to trim from the window */
4218 if (dma
->dp_granularity_power_2
) {
4219 trim_sz
= (*windowp
)->wd_size
&
4220 (hp
->dmai_attr
.dma_attr_granular
- 1);
4222 trim_sz
= (*windowp
)->wd_size
% hp
->dmai_attr
.dma_attr_granular
;
4226 * if the window's a whole multiple of granularity, go to the next
4227 * window, init it, then add in the current cookie. We know the current
4228 * cookie uses the copy buffer since we're in this code path.
4232 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4234 /* Add this cookie to the new window */
4235 (*windowp
)->wd_cookie_cnt
++;
4236 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4237 *copybuf_used
+= MMU_PAGESIZE
;
4238 return (DDI_SUCCESS
);
4242 * *** We figured it out, we definitly need to trim
4246 * make sure the driver isn't making us do something bad...
4247 * Trimming and sgllen == 1 don't go together.
4249 if (hp
->dmai_attr
.dma_attr_sgllen
== 1) {
4250 return (DDI_DMA_NOMAPPING
);
4254 * first, setup the current window to account for the trim. Need to go
4255 * back to the last cookie for this. Some of the last cookie will be in
4256 * the current window, and some of the last cookie will be in the new
4257 * window. All of the current cookie will be in the new window.
4260 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4261 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4262 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4263 ASSERT(cookie
->dmac_size
> trim_sz
);
4264 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4265 (*windowp
)->wd_size
-= trim_sz
;
4268 * we're trimming the last cookie (not the current cookie). So that
4269 * last cookie may have or may not have been using the copy buffer (
4270 * we know the cookie passed in uses the copy buffer since we're in
4273 * If the last cookie doesn't use the copy buffer, nothing special to
4274 * do. However, if it does uses the copy buffer, it will be both the
4275 * last page in the current window and the first page in the next
4276 * window. Since we are reusing the copy buffer (and KVA space on the
4277 * 32-bit kernel), this page will use the end of the copy buffer in the
4278 * current window, and the start of the copy buffer in the next window.
4279 * Track that info... The cookie physical address was already set to
4280 * the copy buffer physical address in setup_cookie..
4282 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4283 pidx
= (sinfo
->si_buf_offset
+ (*windowp
)->wd_offset
+
4284 (*windowp
)->wd_size
) >> MMU_PAGESHIFT
;
4285 (*windowp
)->wd_trim
.tr_last_copybuf_win
= B_TRUE
;
4286 (*windowp
)->wd_trim
.tr_last_pidx
= pidx
;
4287 (*windowp
)->wd_trim
.tr_last_cbaddr
=
4288 dma
->dp_pgmap
[pidx
].pm_cbaddr
;
4289 #if !defined(__amd64)
4290 (*windowp
)->wd_trim
.tr_last_kaddr
=
4291 dma
->dp_pgmap
[pidx
].pm_kaddr
;
4295 /* save the buffer offsets for the next window */
4296 coffset
= cookie
->dmac_size
- trim_sz
;
4297 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4300 * set this now in case this is the first window. all other cases are
4303 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4306 * initialize the next window using what's left over in the previous
4310 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4311 (*windowp
)->wd_cookie_cnt
++;
4312 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4313 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4314 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4317 * again, we're tracking if the last cookie uses the copy buffer.
4318 * read the comment above for more info on why we need to track
4321 * For the first cookie in the new window, we need reset the physical
4322 * address to DMA into to the start of the copy buffer plus any
4323 * initial page offset which may be present.
4325 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4326 (*windowp
)->wd_dosync
= B_TRUE
;
4327 (*windowp
)->wd_trim
.tr_first_copybuf_win
= B_TRUE
;
4328 (*windowp
)->wd_trim
.tr_first_pidx
= pidx
;
4329 (*windowp
)->wd_trim
.tr_first_cbaddr
= dma
->dp_cbaddr
;
4330 poff
= (*windowp
)->wd_trim
.tr_first_paddr
& MMU_PAGEOFFSET
;
4332 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
, dma
->dp_cbaddr
)) +
4334 (*windowp
)->wd_trim
.tr_first_paddr
=
4335 ROOTNEX_PADDR_TO_RBASE(paddr
);
4337 #if !defined(__amd64)
4338 (*windowp
)->wd_trim
.tr_first_kaddr
= dma
->dp_kva
;
4340 /* account for the cookie copybuf usage in the new window */
4341 *copybuf_used
+= MMU_PAGESIZE
;
4344 * every piece of code has to have a hack, and here is this
4347 * There is a complex interaction between setup_cookie and the
4348 * copybuf window boundary. The complexity had to be in either
4349 * the maxxfer window, or the copybuf window, and I chose the
4352 * So in this code path, we have taken the last cookie,
4353 * virtually broken it in half due to the trim, and it happens
4354 * to use the copybuf which further complicates life. At the
4355 * same time, we have already setup the current cookie, which
4356 * is now wrong. More background info: the current cookie uses
4357 * the copybuf, so it is only a page long max. So we need to
4358 * fix the current cookies copy buffer address, physical
4359 * address, and kva for the 32-bit kernel. We due this by
4360 * bumping them by page size (of course, we can't due this on
4361 * the physical address since the copy buffer may not be
4362 * physically contiguous).
4365 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
+= MMU_PAGESIZE
;
4366 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
4368 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
4369 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
)) + poff
;
4370 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
4372 #if !defined(__amd64)
4373 ASSERT(dma
->dp_pgmap
[pidx
+ 1].pm_mapped
== B_FALSE
);
4374 dma
->dp_pgmap
[pidx
+ 1].pm_kaddr
+= MMU_PAGESIZE
;
4377 /* go back to the current cookie */
4382 * add the current cookie to the new window. set the new window size to
4383 * the what was left over from the previous cookie and what's in the
4386 (*windowp
)->wd_cookie_cnt
++;
4387 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4388 ASSERT((*windowp
)->wd_size
< dma
->dp_maxxfer
);
4391 * we know that the cookie passed in always uses the copy buffer. We
4392 * wouldn't be here if it didn't.
4394 *copybuf_used
+= MMU_PAGESIZE
;
4396 return (DDI_SUCCESS
);
4401 * rootnex_maxxfer_window_boundary()
4402 * Called in bind slowpath when we get to a window boundary because we will
4406 rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4407 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
)
4416 * calculate how much we have to trim off of the current cookie to equal
4417 * maxxfer. We don't have to account for granularity here since our
4418 * maxxfer already takes that into account.
4420 trim_sz
= ((*windowp
)->wd_size
+ cookie
->dmac_size
) - dma
->dp_maxxfer
;
4421 ASSERT(trim_sz
<= cookie
->dmac_size
);
4422 ASSERT(trim_sz
<= dma
->dp_maxxfer
);
4424 /* save cookie size since we need it later and we might change it */
4425 dmac_size
= cookie
->dmac_size
;
4428 * if we're not trimming the entire cookie, setup the current window to
4429 * account for the trim.
4431 if (trim_sz
< cookie
->dmac_size
) {
4432 (*windowp
)->wd_cookie_cnt
++;
4433 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4434 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4435 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4436 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4437 (*windowp
)->wd_size
= dma
->dp_maxxfer
;
4440 * set the adjusted cookie size now in case this is the first
4441 * window. All other windows are taken care of in get win
4443 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4447 * coffset is the current offset within the cookie, new_offset is the
4448 * current offset with the entire buffer.
4450 coffset
= dmac_size
- trim_sz
;
4451 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4453 /* initialize the next window */
4455 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4456 (*windowp
)->wd_cookie_cnt
++;
4457 (*windowp
)->wd_size
= trim_sz
;
4458 if (trim_sz
< dmac_size
) {
4459 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4460 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4462 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4465 return (DDI_SUCCESS
);
4471 rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4472 off_t off
, size_t len
, uint_t cache_flags
)
4474 rootnex_sglinfo_t
*sinfo
;
4475 rootnex_pgmap_t
*cbpage
;
4476 rootnex_window_t
*win
;
4489 hp
= (ddi_dma_impl_t
*)handle
;
4490 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4491 sinfo
= &dma
->dp_sglinfo
;
4494 * if we don't have any windows, we don't need to sync. A copybuf
4495 * will cause us to have at least one window.
4497 if (dma
->dp_window
== NULL
) {
4498 return (DDI_SUCCESS
);
4501 /* This window may not need to be sync'd */
4502 win
= &dma
->dp_window
[dma
->dp_current_win
];
4503 if (!win
->wd_dosync
) {
4504 return (DDI_SUCCESS
);
4507 /* handle off and len special cases */
4508 if ((off
== 0) || (rootnex_sync_ignore_params
)) {
4509 offset
= win
->wd_offset
;
4513 if ((len
== 0) || (rootnex_sync_ignore_params
)) {
4514 size
= win
->wd_size
;
4519 /* check the sync args to make sure they make a little sense */
4520 if (rootnex_sync_check_parms
) {
4521 e
= rootnex_valid_sync_parms(hp
, win
, offset
, size
,
4523 if (e
!= DDI_SUCCESS
) {
4524 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_SYNC_FAIL
]);
4525 return (DDI_FAILURE
);
4530 * special case the first page to handle the offset into the page. The
4531 * offset to the current page for our buffer is the offset into the
4532 * first page of the buffer plus our current offset into the buffer
4533 * itself, masked of course.
4535 poff
= (sinfo
->si_buf_offset
+ offset
) & MMU_PAGEOFFSET
;
4536 psize
= MIN((MMU_PAGESIZE
- poff
), size
);
4538 /* go through all the pages that we want to sync */
4541 * Calculate the page index relative to the start of the buffer.
4542 * The index to the current page for our buffer is the offset
4543 * into the first page of the buffer plus our current offset
4544 * into the buffer itself, shifted of course...
4546 pidx
= (sinfo
->si_buf_offset
+ offset
) >> MMU_PAGESHIFT
;
4547 ASSERT(pidx
< sinfo
->si_max_pages
);
4550 * if this page uses the copy buffer, we need to sync it,
4551 * otherwise, go on to the next page.
4553 cbpage
= &dma
->dp_pgmap
[pidx
];
4554 ASSERT((cbpage
->pm_uses_copybuf
== B_TRUE
) ||
4555 (cbpage
->pm_uses_copybuf
== B_FALSE
));
4556 if (cbpage
->pm_uses_copybuf
) {
4557 /* cbaddr and kaddr should be page aligned */
4558 ASSERT(((uintptr_t)cbpage
->pm_cbaddr
&
4559 MMU_PAGEOFFSET
) == 0);
4560 ASSERT(((uintptr_t)cbpage
->pm_kaddr
&
4561 MMU_PAGEOFFSET
) == 0);
4564 * if we're copying for the device, we are going to
4565 * copy from the drivers buffer and to the rootnex
4566 * allocated copy buffer.
4568 if (cache_flags
== DDI_DMA_SYNC_FORDEV
) {
4569 fromaddr
= cbpage
->pm_kaddr
+ poff
;
4570 toaddr
= cbpage
->pm_cbaddr
+ poff
;
4571 ROOTNEX_DPROBE2(rootnex__sync__dev
,
4572 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4575 * if we're copying for the cpu/kernel, we are going to
4576 * copy from the rootnex allocated copy buffer to the
4580 fromaddr
= cbpage
->pm_cbaddr
+ poff
;
4581 toaddr
= cbpage
->pm_kaddr
+ poff
;
4582 ROOTNEX_DPROBE2(rootnex__sync__cpu
,
4583 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4586 bcopy(fromaddr
, toaddr
, psize
);
4590 * decrement size until we're done, update our offset into the
4591 * buffer, and get the next page size.
4595 psize
= MIN(MMU_PAGESIZE
, size
);
4597 /* page offset is zero for the rest of this loop */
4601 return (DDI_SUCCESS
);
4605 * rootnex_dma_sync()
4606 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4607 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4608 * is set, ddi_dma_sync() returns immediately passing back success.
4612 rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4613 off_t off
, size_t len
, uint_t cache_flags
)
4615 #if defined(__amd64) && !defined(__xpv)
4616 if (IOMMU_USED(rdip
)) {
4617 return (iommulib_nexdma_sync(dip
, rdip
, handle
, off
, len
,
4621 return (rootnex_coredma_sync(dip
, rdip
, handle
, off
, len
,
4626 * rootnex_valid_sync_parms()
4627 * checks the parameters passed to sync to verify they are correct.
4630 rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
4631 off_t offset
, size_t size
, uint_t cache_flags
)
4637 * the first part of the test to make sure the offset passed in is
4638 * within the window.
4640 if (offset
< win
->wd_offset
) {
4641 return (DDI_FAILURE
);
4645 * second and last part of the test to make sure the offset and length
4646 * passed in is within the window.
4648 woffset
= offset
- win
->wd_offset
;
4649 if ((woffset
+ size
) > win
->wd_size
) {
4650 return (DDI_FAILURE
);
4654 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4657 if ((cache_flags
== DDI_DMA_SYNC_FORDEV
) &&
4658 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4659 return (DDI_SUCCESS
);
4663 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4664 * should be set. Also DDI_DMA_READ should be set in the flags.
4666 if (((cache_flags
== DDI_DMA_SYNC_FORCPU
) ||
4667 (cache_flags
== DDI_DMA_SYNC_FORKERNEL
)) &&
4668 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4669 return (DDI_SUCCESS
);
4672 return (DDI_FAILURE
);
4678 rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4679 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4682 rootnex_window_t
*window
;
4683 rootnex_trim_t
*trim
;
4686 ddi_dma_obj_t
*dmao
;
4687 #if !defined(__amd64)
4688 rootnex_sglinfo_t
*sinfo
;
4689 rootnex_pgmap_t
*pmap
;
4697 hp
= (ddi_dma_impl_t
*)handle
;
4698 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4699 #if !defined(__amd64)
4700 sinfo
= &dma
->dp_sglinfo
;
4703 /* If we try and get a window which doesn't exist, return failure */
4704 if (win
>= hp
->dmai_nwin
) {
4705 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4706 return (DDI_FAILURE
);
4709 dmao
= dma
->dp_dvma_used
? &dma
->dp_dvma
: &dma
->dp_dma
;
4712 * if we don't have any windows, and they're asking for the first
4713 * window, setup the cookie pointer to the first cookie in the bind.
4714 * setup our return values, then increment the cookie since we return
4715 * the first cookie on the stack.
4717 if (dma
->dp_window
== NULL
) {
4720 &rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4721 return (DDI_FAILURE
);
4723 hp
->dmai_cookie
= dma
->dp_cookies
;
4725 *lenp
= dmao
->dmao_size
;
4726 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
4727 *cookiep
= hp
->dmai_cookie
[0];
4729 return (DDI_SUCCESS
);
4732 /* sync the old window before moving on to the new one */
4733 window
= &dma
->dp_window
[dma
->dp_current_win
];
4734 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4735 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4736 DDI_DMA_SYNC_FORCPU
);
4739 #if !defined(__amd64)
4741 * before we move to the next window, if we need to re-map, unmap all
4742 * the pages in this window.
4744 if (dma
->dp_cb_remaping
) {
4746 * If we switch to this window again, we'll need to map in
4747 * on the fly next time.
4749 window
->wd_remap_copybuf
= B_TRUE
;
4752 * calculate the page index into the buffer where this window
4753 * starts, and the number of pages this window takes up.
4755 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4757 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4759 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4760 ASSERT((pidx
+ pcnt
) <= sinfo
->si_max_pages
);
4762 /* unmap pages which are currently mapped in this window */
4763 for (i
= 0; i
< pcnt
; i
++) {
4764 if (dma
->dp_pgmap
[pidx
].pm_mapped
) {
4765 hat_unload(kas
.a_hat
,
4766 dma
->dp_pgmap
[pidx
].pm_kaddr
, MMU_PAGESIZE
,
4768 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4776 * Move to the new window.
4777 * NOTE: current_win must be set for sync to work right
4779 dma
->dp_current_win
= win
;
4780 window
= &dma
->dp_window
[win
];
4782 /* if needed, adjust the first and/or last cookies for trim */
4783 trim
= &window
->wd_trim
;
4784 if (trim
->tr_trim_first
) {
4785 window
->wd_first_cookie
->dmac_laddress
= trim
->tr_first_paddr
;
4786 window
->wd_first_cookie
->dmac_size
= trim
->tr_first_size
;
4787 #if !defined(__amd64)
4788 window
->wd_first_cookie
->dmac_type
=
4789 (window
->wd_first_cookie
->dmac_type
&
4790 ROOTNEX_USES_COPYBUF
) + window
->wd_offset
;
4792 if (trim
->tr_first_copybuf_win
) {
4793 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_cbaddr
=
4794 trim
->tr_first_cbaddr
;
4795 #if !defined(__amd64)
4796 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_kaddr
=
4797 trim
->tr_first_kaddr
;
4801 if (trim
->tr_trim_last
) {
4802 trim
->tr_last_cookie
->dmac_laddress
= trim
->tr_last_paddr
;
4803 trim
->tr_last_cookie
->dmac_size
= trim
->tr_last_size
;
4804 if (trim
->tr_last_copybuf_win
) {
4805 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_cbaddr
=
4806 trim
->tr_last_cbaddr
;
4807 #if !defined(__amd64)
4808 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_kaddr
=
4809 trim
->tr_last_kaddr
;
4815 * setup the cookie pointer to the first cookie in the window. setup
4816 * our return values, then increment the cookie since we return the
4817 * first cookie on the stack.
4819 hp
->dmai_cookie
= window
->wd_first_cookie
;
4820 *offp
= window
->wd_offset
;
4821 *lenp
= window
->wd_size
;
4822 *ccountp
= window
->wd_cookie_cnt
;
4823 *cookiep
= hp
->dmai_cookie
[0];
4826 #if !defined(__amd64)
4827 /* re-map copybuf if required for this window */
4828 if (dma
->dp_cb_remaping
) {
4830 * calculate the page index into the buffer where this
4833 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4835 ASSERT(pidx
< sinfo
->si_max_pages
);
4838 * the first page can get unmapped if it's shared with the
4839 * previous window. Even if the rest of this window is already
4840 * mapped in, we need to still check this one.
4842 pmap
= &dma
->dp_pgmap
[pidx
];
4843 if ((pmap
->pm_uses_copybuf
) && (pmap
->pm_mapped
== B_FALSE
)) {
4844 if (pmap
->pm_pp
!= NULL
) {
4845 pmap
->pm_mapped
= B_TRUE
;
4846 i86_pp_map(pmap
->pm_pp
, pmap
->pm_kaddr
);
4847 } else if (pmap
->pm_vaddr
!= NULL
) {
4848 pmap
->pm_mapped
= B_TRUE
;
4849 i86_va_map(pmap
->pm_vaddr
, sinfo
->si_asp
,
4855 /* map in the rest of the pages if required */
4856 if (window
->wd_remap_copybuf
) {
4857 window
->wd_remap_copybuf
= B_FALSE
;
4859 /* figure out many pages this window takes up */
4860 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4862 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4863 ASSERT(((pidx
- 1) + pcnt
) <= sinfo
->si_max_pages
);
4865 /* map pages which require it */
4866 for (i
= 1; i
< pcnt
; i
++) {
4867 pmap
= &dma
->dp_pgmap
[pidx
];
4868 if (pmap
->pm_uses_copybuf
) {
4869 ASSERT(pmap
->pm_mapped
== B_FALSE
);
4870 if (pmap
->pm_pp
!= NULL
) {
4871 pmap
->pm_mapped
= B_TRUE
;
4872 i86_pp_map(pmap
->pm_pp
,
4874 } else if (pmap
->pm_vaddr
!= NULL
) {
4875 pmap
->pm_mapped
= B_TRUE
;
4876 i86_va_map(pmap
->pm_vaddr
,
4887 /* if the new window uses the copy buffer, sync it for the device */
4888 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4889 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4890 DDI_DMA_SYNC_FORDEV
);
4893 return (DDI_SUCCESS
);
4898 * called from ddi_dma_getwin()
4902 rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4903 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4906 #if defined(__amd64) && !defined(__xpv)
4907 if (IOMMU_USED(rdip
)) {
4908 return (iommulib_nexdma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4913 return (rootnex_coredma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4917 #if defined(__amd64) && !defined(__xpv)
4920 rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4921 ddi_dma_handle_t handle
, void *v
)
4926 hp
= (ddi_dma_impl_t
*)handle
;
4927 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4928 dma
->dp_iommu_private
= v
;
4930 return (DDI_SUCCESS
);
4935 rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4936 ddi_dma_handle_t handle
)
4941 hp
= (ddi_dma_impl_t
*)handle
;
4942 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4944 return (dma
->dp_iommu_private
);
4949 * ************************
4950 * obsoleted dma routines
4951 * ************************
4955 * rootnex_dma_mctl()
4957 * We don't support this legacy interface any more on x86.
4961 rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4962 enum ddi_dma_ctlops request
, off_t
*offp
, size_t *lenp
, caddr_t
*objpp
,
4966 * The only thing dma_mctl is usef for anymore is legacy SPARC
4967 * dvma and sbus-specific routines.
4969 return (DDI_FAILURE
);
4984 rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
4985 ddi_iblock_cookie_t
*ibc
)
4987 *ibc
= rootnex_state
->r_err_ibc
;
4989 return (ddi_system_fmcap
);
4993 * rootnex_dma_check()
4994 * Function called after a dma fault occurred to find out whether the
4995 * fault address is associated with a driver that is able to handle faults
4996 * and recover from faults.
5000 rootnex_dma_check(dev_info_t
*dip
, const void *handle
, const void *addr
,
5001 const void *not_used
)
5003 rootnex_window_t
*window
;
5004 uint64_t start_addr
;
5005 uint64_t fault_addr
;
5014 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5015 hp
= (ddi_dma_impl_t
*)handle
;
5018 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5020 /* Get the address that we need to search for */
5021 fault_addr
= *(uint64_t *)addr
;
5024 * if we don't have any windows, we can just walk through all the
5027 if (dma
->dp_window
== NULL
) {
5028 /* for each cookie */
5029 for (i
= 0; i
< dma
->dp_sglinfo
.si_sgl_size
; i
++) {
5031 * if the faulted address is within the physical address
5032 * range of the cookie, return DDI_FM_NONFATAL.
5034 if ((fault_addr
>= dma
->dp_cookies
[i
].dmac_laddress
) &&
5035 (fault_addr
<= (dma
->dp_cookies
[i
].dmac_laddress
+
5036 dma
->dp_cookies
[i
].dmac_size
))) {
5037 return (DDI_FM_NONFATAL
);
5041 /* fault_addr not within this DMA handle */
5042 return (DDI_FM_UNKNOWN
);
5045 /* we have mutiple windows, walk through each window */
5046 for (i
= 0; i
< hp
->dmai_nwin
; i
++) {
5047 window
= &dma
->dp_window
[i
];
5049 /* Go through all the cookies in the window */
5050 for (j
= 0; j
< window
->wd_cookie_cnt
; j
++) {
5052 start_addr
= window
->wd_first_cookie
[j
].dmac_laddress
;
5053 csize
= window
->wd_first_cookie
[j
].dmac_size
;
5056 * if we are trimming the first cookie in the window,
5057 * and this is the first cookie, adjust the start
5058 * address and size of the cookie to account for the
5061 if (window
->wd_trim
.tr_trim_first
&& (j
== 0)) {
5062 start_addr
= window
->wd_trim
.tr_first_paddr
;
5063 csize
= window
->wd_trim
.tr_first_size
;
5067 * if we are trimming the last cookie in the window,
5068 * and this is the last cookie, adjust the start
5069 * address and size of the cookie to account for the
5072 if (window
->wd_trim
.tr_trim_last
&&
5073 (j
== (window
->wd_cookie_cnt
- 1))) {
5074 start_addr
= window
->wd_trim
.tr_last_paddr
;
5075 csize
= window
->wd_trim
.tr_last_size
;
5078 end_addr
= start_addr
+ csize
;
5081 * if the faulted address is within the physical
5082 * address of the cookie, return DDI_FM_NONFATAL.
5084 if ((fault_addr
>= start_addr
) &&
5085 (fault_addr
<= end_addr
)) {
5086 return (DDI_FM_NONFATAL
);
5091 /* fault_addr not within this DMA handle */
5092 return (DDI_FM_UNKNOWN
);
5097 rootnex_quiesce(dev_info_t
*dip
)
5099 #if defined(__amd64) && !defined(__xpv)
5100 return (immu_quiesce());
5102 return (DDI_SUCCESS
);