4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
26 * Copyright (c) 2011 Bayard G. Bell. All rights reserved.
27 * Copyright 2012 Garrett D'Amore <garrett@damore.org>. All rights reserved.
31 * x86 root nexus driver
34 #include <sys/sysmacros.h>
36 #include <sys/autoconf.h>
37 #include <sys/sysmacros.h>
38 #include <sys/debug.h>
40 #include <sys/ddidmareq.h>
41 #include <sys/promif.h>
42 #include <sys/devops.h>
44 #include <sys/cmn_err.h>
46 #include <vm/seg_kmem.h>
47 #include <vm/seg_dev.h>
53 #include <sys/avintr.h>
54 #include <sys/errno.h>
55 #include <sys/modctl.h>
56 #include <sys/ddi_impldefs.h>
57 #include <sys/sunddi.h>
58 #include <sys/sunndi.h>
59 #include <sys/mach_intr.h>
61 #include <sys/ontrap.h>
62 #include <sys/atomic.h>
64 #include <sys/rootnex.h>
65 #include <vm/hat_i86.h>
66 #include <sys/ddifm.h>
67 #include <sys/ddi_isa.h>
71 #include <sys/bootinfo.h>
72 #include <sys/hypervisor.h>
73 #include <sys/bootconf.h>
74 #include <vm/kboot_mmu.h>
77 #if defined(__amd64) && !defined(__xpv)
83 * enable/disable extra checking of function parameters. Useful for debugging
87 int rootnex_alloc_check_parms
= 1;
88 int rootnex_bind_check_parms
= 1;
89 int rootnex_bind_check_inuse
= 1;
90 int rootnex_unbind_verify_buffer
= 0;
91 int rootnex_sync_check_parms
= 1;
93 int rootnex_alloc_check_parms
= 0;
94 int rootnex_bind_check_parms
= 0;
95 int rootnex_bind_check_inuse
= 0;
96 int rootnex_unbind_verify_buffer
= 0;
97 int rootnex_sync_check_parms
= 0;
100 boolean_t rootnex_dmar_not_setup
;
102 /* Master Abort and Target Abort panic flag */
103 int rootnex_fm_ma_ta_panic_flag
= 0;
105 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
106 int rootnex_bind_fail
= 1;
107 int rootnex_bind_warn
= 1;
108 uint8_t *rootnex_warn_list
;
109 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
110 #define ROOTNEX_BIND_WARNING (0x1 << 0)
113 * revert back to old broken behavior of always sync'ing entire copy buffer.
114 * This is useful if be have a buggy driver which doesn't correctly pass in
115 * the offset and size into ddi_dma_sync().
117 int rootnex_sync_ignore_params
= 0;
120 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
121 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
122 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
123 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
124 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
125 * (< 8K). We will still need to allocate the copy buffer during bind though
126 * (if we need one). These can only be modified in /etc/system before rootnex
130 int rootnex_prealloc_cookies
= 65;
131 int rootnex_prealloc_windows
= 4;
132 int rootnex_prealloc_copybuf
= 2;
134 int rootnex_prealloc_cookies
= 33;
135 int rootnex_prealloc_windows
= 4;
136 int rootnex_prealloc_copybuf
= 2;
139 /* driver global state */
140 static rootnex_state_t
*rootnex_state
;
143 /* shortcut to rootnex counters */
144 static uint64_t *rootnex_cnt
;
148 * XXX - does x86 even need these or are they left over from the SPARC days?
150 /* statically defined integer/boolean properties for the root node */
151 static rootnex_intprop_t rootnex_intprp
[] = {
152 { "PAGESIZE", PAGESIZE
},
153 { "MMU_PAGESIZE", MMU_PAGESIZE
},
154 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET
},
155 { DDI_RELATIVE_ADDRESSING
, 1 },
157 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
160 * If we're dom0, we're using a real device so we need to load
161 * the cookies with MFNs instead of PFNs.
164 typedef maddr_t rootnex_addr_t
;
165 #define ROOTNEX_PADDR_TO_RBASE(pa) \
166 (DOMAIN_IS_INITDOMAIN(xen_info) ? pa_to_ma(pa) : (pa))
168 typedef paddr_t rootnex_addr_t
;
169 #define ROOTNEX_PADDR_TO_RBASE(pa) (pa)
172 static struct cb_ops rootnex_cb_ops
= {
175 nodev
, /* strategy */
184 nochpoll
, /* chpoll */
185 ddi_prop_op
, /* cb_prop_op */
186 NULL
, /* struct streamtab */
187 D_NEW
| D_MP
| D_HOTPLUG
, /* compatibility flags */
189 nodev
, /* cb_aread */
190 nodev
/* cb_awrite */
193 static int rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
194 off_t offset
, off_t len
, caddr_t
*vaddrp
);
195 static int rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
,
196 struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
197 struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
);
198 static int rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
199 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
200 ddi_dma_handle_t
*handlep
);
201 static int rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
202 ddi_dma_handle_t handle
);
203 static int rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
204 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
205 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
206 static int rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
207 ddi_dma_handle_t handle
);
208 static int rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
209 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
210 static int rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
211 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
212 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
213 static int rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
214 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
215 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t cache_flags
);
216 static int rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
,
217 ddi_ctl_enum_t ctlop
, void *arg
, void *result
);
218 static int rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
219 ddi_iblock_cookie_t
*ibc
);
220 static int rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
,
221 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
, void *result
);
222 static int rootnex_alloc_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*,
224 static int rootnex_free_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*);
226 static int rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
227 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
228 ddi_dma_handle_t
*handlep
);
229 static int rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
230 ddi_dma_handle_t handle
);
231 static int rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
232 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
233 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
234 static int rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
235 ddi_dma_handle_t handle
);
236 #if defined(__amd64) && !defined(__xpv)
237 static void rootnex_coredma_reset_cookies(dev_info_t
*dip
,
238 ddi_dma_handle_t handle
);
239 static int rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
240 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
);
241 static int rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
242 ddi_dma_cookie_t
*cookiep
, uint_t ccount
);
243 static int rootnex_coredma_clear_cookies(dev_info_t
*dip
,
244 ddi_dma_handle_t handle
);
245 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
);
247 static int rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
248 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
249 static int rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
250 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
251 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
253 #if defined(__amd64) && !defined(__xpv)
254 static int rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
255 ddi_dma_handle_t handle
, void *v
);
256 static void *rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
257 ddi_dma_handle_t handle
);
261 static struct bus_ops rootnex_bus_ops
= {
269 rootnex_dma_allochdl
,
272 rootnex_dma_unbindhdl
,
278 i_ddi_rootnex_get_eventcookie
,
279 i_ddi_rootnex_add_eventcall
,
280 i_ddi_rootnex_remove_eventcall
,
281 i_ddi_rootnex_post_event
,
282 0, /* bus_intr_ctl */
284 0, /* bus_unconfig */
285 rootnex_fm_init
, /* bus_fm_init */
286 NULL
, /* bus_fm_fini */
287 NULL
, /* bus_fm_access_enter */
288 NULL
, /* bus_fm_access_exit */
290 rootnex_intr_ops
/* bus_intr_op */
293 static int rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
294 static int rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
295 static int rootnex_quiesce(dev_info_t
*dip
);
297 static struct dev_ops rootnex_ops
= {
309 rootnex_quiesce
, /* quiesce */
312 static struct modldrv rootnex_modldrv
= {
318 static struct modlinkage rootnex_modlinkage
= {
320 (void *)&rootnex_modldrv
,
324 #if defined(__amd64) && !defined(__xpv)
325 static iommulib_nexops_t iommulib_nexops
= {
326 IOMMU_NEXOPS_VERSION
,
327 "Rootnex IOMMU ops Vers 1.1",
329 rootnex_coredma_allochdl
,
330 rootnex_coredma_freehdl
,
331 rootnex_coredma_bindhdl
,
332 rootnex_coredma_unbindhdl
,
333 rootnex_coredma_reset_cookies
,
334 rootnex_coredma_get_cookies
,
335 rootnex_coredma_set_cookies
,
336 rootnex_coredma_clear_cookies
,
337 rootnex_coredma_get_sleep_flags
,
338 rootnex_coredma_sync
,
340 rootnex_coredma_hdl_setprivate
,
341 rootnex_coredma_hdl_getprivate
348 extern struct seg_ops segdev_ops
;
349 extern int ignore_hardware_nodes
; /* force flag from ddi_impl.c */
351 extern int ddi_map_debug_flag
;
352 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf
354 extern void i86_pp_map(page_t
*pp
, caddr_t kaddr
);
355 extern void i86_va_map(caddr_t vaddr
, struct as
*asp
, caddr_t kaddr
);
356 extern int (*psm_intr_ops
)(dev_info_t
*, ddi_intr_handle_impl_t
*,
357 psm_intr_op_t
, int *);
358 extern int impl_ddi_sunbus_initchild(dev_info_t
*dip
);
359 extern void impl_ddi_sunbus_removechild(dev_info_t
*dip
);
362 * Use device arena to use for device control register mappings.
363 * Various kernel memory walkers (debugger, dtrace) need to know
364 * to avoid this address range to prevent undesired device activity.
366 extern void *device_arena_alloc(size_t size
, int vm_flag
);
367 extern void device_arena_free(void * vaddr
, size_t size
);
373 static int rootnex_dma_init();
374 static void rootnex_add_props(dev_info_t
*);
375 static int rootnex_ctl_reportdev(dev_info_t
*dip
);
376 static struct intrspec
*rootnex_get_ispec(dev_info_t
*rdip
, int inum
);
377 static int rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
378 static int rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
379 static int rootnex_map_handle(ddi_map_req_t
*mp
);
380 static void rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
);
381 static int rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegsize
);
382 static int rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
,
383 ddi_dma_attr_t
*attr
);
384 static void rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
385 rootnex_sglinfo_t
*sglinfo
);
386 static void rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
,
387 ddi_dma_cookie_t
*sgl
, rootnex_sglinfo_t
*sglinfo
);
388 static int rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
389 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
390 static int rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
391 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
);
392 static void rootnex_teardown_copybuf(rootnex_dma_t
*dma
);
393 static int rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
394 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
395 static void rootnex_teardown_windows(rootnex_dma_t
*dma
);
396 static void rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
397 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
);
398 static void rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
,
399 rootnex_dma_t
*dma
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
400 size_t *copybuf_used
, page_t
**cur_pp
);
401 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
,
402 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
,
403 ddi_dma_attr_t
*attr
, off_t cur_offset
);
404 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
,
405 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
,
406 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
);
407 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
,
408 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
);
409 static int rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
410 off_t offset
, size_t size
, uint_t cache_flags
);
411 static int rootnex_verify_buffer(rootnex_dma_t
*dma
);
412 static int rootnex_dma_check(dev_info_t
*dip
, const void *handle
,
413 const void *comp_addr
, const void *not_used
);
414 static boolean_t
rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
,
415 rootnex_sglinfo_t
*sglinfo
);
416 static struct as
*rootnex_get_as(ddi_dma_obj_t
*dmar_object
);
426 rootnex_state
= NULL
;
427 return (mod_install(&rootnex_modlinkage
));
436 _info(struct modinfo
*modinfop
)
438 return (mod_info(&rootnex_modlinkage
, modinfop
));
458 rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
467 #if defined(__amd64) && !defined(__xpv)
468 return (immu_unquiesce());
470 return (DDI_SUCCESS
);
473 return (DDI_FAILURE
);
477 * We should only have one instance of rootnex. Save it away since we
478 * don't have an easy way to get it back later.
480 ASSERT(rootnex_state
== NULL
);
481 rootnex_state
= kmem_zalloc(sizeof (rootnex_state_t
), KM_SLEEP
);
483 rootnex_state
->r_dip
= dip
;
484 rootnex_state
->r_err_ibc
= (ddi_iblock_cookie_t
)ipltospl(15);
485 rootnex_state
->r_reserved_msg_printed
= B_FALSE
;
487 rootnex_cnt
= &rootnex_state
->r_counters
[0];
491 * Set minimum fm capability level for i86pc platforms and then
492 * initialize error handling. Since we're the rootnex, we don't
493 * care what's returned in the fmcap field.
495 ddi_system_fmcap
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ERRCB_CAPABLE
|
496 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
;
497 fmcap
= ddi_system_fmcap
;
498 ddi_fm_init(dip
, &fmcap
, &rootnex_state
->r_err_ibc
);
500 /* initialize DMA related state */
501 e
= rootnex_dma_init();
502 if (e
!= DDI_SUCCESS
) {
503 kmem_free(rootnex_state
, sizeof (rootnex_state_t
));
504 return (DDI_FAILURE
);
507 /* Add static root node properties */
508 rootnex_add_props(dip
);
510 /* since we can't call ddi_report_dev() */
511 cmn_err(CE_CONT
, "?root nexus = %s\n", ddi_get_name(dip
));
513 /* Initialize rootnex event handle */
514 i_ddi_rootnex_init_events(dip
);
516 #if defined(__amd64) && !defined(__xpv)
517 e
= iommulib_nexus_register(dip
, &iommulib_nexops
,
518 &rootnex_state
->r_iommulib_handle
);
520 ASSERT(e
== DDI_SUCCESS
);
523 return (DDI_SUCCESS
);
533 rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
537 #if defined(__amd64) && !defined(__xpv)
538 return (immu_quiesce());
540 return (DDI_SUCCESS
);
543 return (DDI_FAILURE
);
562 * size of our cookie/window/copybuf state needed in dma bind that we
563 * pre-alloc in dma_alloc_handle
565 rootnex_state
->r_prealloc_cookies
= rootnex_prealloc_cookies
;
566 rootnex_state
->r_prealloc_size
=
567 (rootnex_state
->r_prealloc_cookies
* sizeof (ddi_dma_cookie_t
)) +
568 (rootnex_prealloc_windows
* sizeof (rootnex_window_t
)) +
569 (rootnex_prealloc_copybuf
* sizeof (rootnex_pgmap_t
));
572 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
573 * allocate 16 extra bytes for struct pointer alignment
574 * (p->dmai_private & dma->dp_prealloc_buffer)
576 bufsize
= sizeof (ddi_dma_impl_t
) + sizeof (rootnex_dma_t
) +
577 rootnex_state
->r_prealloc_size
+ 0x10;
578 rootnex_state
->r_dmahdl_cache
= kmem_cache_create("rootnex_dmahdl",
579 bufsize
, 64, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
580 if (rootnex_state
->r_dmahdl_cache
== NULL
) {
581 return (DDI_FAILURE
);
585 * allocate array to track which major numbers we have printed warnings
588 rootnex_warn_list
= kmem_zalloc(devcnt
* sizeof (*rootnex_warn_list
),
591 return (DDI_SUCCESS
);
596 * rootnex_add_props()
600 rootnex_add_props(dev_info_t
*dip
)
602 rootnex_intprop_t
*rpp
;
605 /* Add static integer/boolean properties to the root node */
606 rpp
= rootnex_intprp
;
607 for (i
= 0; i
< NROOT_INTPROPS
; i
++) {
608 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
609 rpp
[i
].prop_name
, rpp
[i
].prop_value
);
616 * *************************
617 * ctlops related routines
618 * *************************
627 rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t ctlop
,
628 void *arg
, void *result
)
631 struct ddi_parent_private_data
*pdp
;
634 case DDI_CTLOPS_DMAPMAPC
:
636 * Return 'partial' to indicate that dma mapping
637 * has to be done in the main MMU.
639 return (DDI_DMA_PARTIAL
);
641 case DDI_CTLOPS_BTOP
:
643 * Convert byte count input to physical page units.
644 * (byte counts that are not a page-size multiple
647 *(ulong_t
*)result
= btop(*(ulong_t
*)arg
);
648 return (DDI_SUCCESS
);
650 case DDI_CTLOPS_PTOB
:
652 * Convert size in physical pages to bytes
654 *(ulong_t
*)result
= ptob(*(ulong_t
*)arg
);
655 return (DDI_SUCCESS
);
657 case DDI_CTLOPS_BTOPR
:
659 * Convert byte count input to physical page units
660 * (byte counts that are not a page-size multiple
663 *(ulong_t
*)result
= btopr(*(ulong_t
*)arg
);
664 return (DDI_SUCCESS
);
666 case DDI_CTLOPS_INITCHILD
:
667 return (impl_ddi_sunbus_initchild(arg
));
669 case DDI_CTLOPS_UNINITCHILD
:
670 impl_ddi_sunbus_removechild(arg
);
671 return (DDI_SUCCESS
);
673 case DDI_CTLOPS_REPORTDEV
:
674 return (rootnex_ctl_reportdev(rdip
));
676 case DDI_CTLOPS_IOMIN
:
678 * Nothing to do here but reflect back..
680 return (DDI_SUCCESS
);
682 case DDI_CTLOPS_REGSIZE
:
683 case DDI_CTLOPS_NREGS
:
686 case DDI_CTLOPS_SIDDEV
:
687 if (ndi_dev_is_prom_node(rdip
))
688 return (DDI_SUCCESS
);
689 if (ndi_dev_is_persistent_node(rdip
))
690 return (DDI_SUCCESS
);
691 return (DDI_FAILURE
);
693 case DDI_CTLOPS_POWER
:
694 return ((*pm_platform_power
)((power_req_t
*)arg
));
696 case DDI_CTLOPS_RESERVED0
: /* Was DDI_CTLOPS_NINTRS, obsolete */
697 case DDI_CTLOPS_RESERVED1
: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
698 case DDI_CTLOPS_RESERVED2
: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
699 case DDI_CTLOPS_RESERVED3
: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
700 case DDI_CTLOPS_RESERVED4
: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
701 case DDI_CTLOPS_RESERVED5
: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
702 if (!rootnex_state
->r_reserved_msg_printed
) {
703 rootnex_state
->r_reserved_msg_printed
= B_TRUE
;
704 cmn_err(CE_WARN
, "Failing ddi_ctlops call(s) for "
705 "1 or more reserved/obsolete operations.");
707 return (DDI_FAILURE
);
710 return (DDI_FAILURE
);
713 * The rest are for "hardware" properties
715 if ((pdp
= ddi_get_parent_data(rdip
)) == NULL
)
716 return (DDI_FAILURE
);
718 if (ctlop
== DDI_CTLOPS_NREGS
) {
720 *ptr
= pdp
->par_nreg
;
722 off_t
*size
= (off_t
*)result
;
726 if (n
>= pdp
->par_nreg
) {
727 return (DDI_FAILURE
);
729 *size
= (off_t
)pdp
->par_reg
[n
].regspec_size
;
731 return (DDI_SUCCESS
);
736 * rootnex_ctl_reportdev()
740 rootnex_ctl_reportdev(dev_info_t
*dev
)
742 int i
, n
, len
, f_len
= 0;
745 buf
= kmem_alloc(REPORTDEV_BUFSIZE
, KM_SLEEP
);
746 f_len
+= snprintf(buf
, REPORTDEV_BUFSIZE
,
747 "%s%d at root", ddi_driver_name(dev
), ddi_get_instance(dev
));
750 for (i
= 0; i
< sparc_pd_getnreg(dev
); i
++) {
752 struct regspec
*rp
= sparc_pd_getreg(dev
, i
);
755 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
758 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
762 switch (rp
->regspec_bustype
) {
765 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
766 "%s 0x%x", DEVI_EISA_NEXNAME
, rp
->regspec_addr
);
770 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
771 "%s 0x%x", DEVI_ISA_NEXNAME
, rp
->regspec_addr
);
775 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
776 "space %x offset %x",
777 rp
->regspec_bustype
, rp
->regspec_addr
);
782 for (i
= 0, n
= sparc_pd_getnintr(dev
); i
< n
; i
++) {
786 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
790 pri
= INT_IPL(sparc_pd_getintr(dev
, i
)->intrspec_pri
);
791 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
792 " sparc ipl %d", pri
);
796 if (f_len
+ 1 >= REPORTDEV_BUFSIZE
) {
797 cmn_err(CE_NOTE
, "next message is truncated: "
798 "printed length 1024, real length %d", f_len
);
801 cmn_err(CE_CONT
, "?%s\n", buf
);
802 kmem_free(buf
, REPORTDEV_BUFSIZE
);
803 return (DDI_SUCCESS
);
818 rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
, off_t offset
,
819 off_t len
, caddr_t
*vaddrp
)
821 struct regspec
*rp
, tmp_reg
;
822 ddi_map_req_t mr
= *mp
; /* Get private copy of request */
827 switch (mp
->map_op
) {
828 case DDI_MO_MAP_LOCKED
:
830 case DDI_MO_MAP_HANDLE
:
834 cmn_err(CE_WARN
, "rootnex_map: unimplemented map op %d.",
836 #endif /* DDI_MAP_DEBUG */
837 return (DDI_ME_UNIMPLEMENTED
);
840 if (mp
->map_flags
& DDI_MF_USER_MAPPING
) {
842 cmn_err(CE_WARN
, "rootnex_map: unimplemented map type: user.");
843 #endif /* DDI_MAP_DEBUG */
844 return (DDI_ME_UNIMPLEMENTED
);
848 * First, if given an rnumber, convert it to a regspec...
849 * (Presumably, this is on behalf of a child of the root node?)
852 if (mp
->map_type
== DDI_MT_RNUMBER
) {
854 int rnumber
= mp
->map_obj
.rnumber
;
856 static char *out_of_range
=
857 "rootnex_map: Out of range rnumber <%d>, device <%s>";
858 #endif /* DDI_MAP_DEBUG */
860 rp
= i_ddi_rnumber_to_regspec(rdip
, rnumber
);
863 cmn_err(CE_WARN
, out_of_range
, rnumber
,
865 #endif /* DDI_MAP_DEBUG */
866 return (DDI_ME_RNUMBER_RANGE
);
870 * Convert the given ddi_map_req_t from rnumber to regspec...
873 mp
->map_type
= DDI_MT_REGSPEC
;
878 * Adjust offset and length correspnding to called values...
879 * XXX: A non-zero length means override the one in the regspec
880 * XXX: (regardless of what's in the parent's range?)
883 tmp_reg
= *(mp
->map_obj
.rp
); /* Preserve underlying data */
884 rp
= mp
->map_obj
.rp
= &tmp_reg
; /* Use tmp_reg in request */
887 cmn_err(CE_CONT
, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
888 "handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
889 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
, offset
,
890 len
, mp
->map_handlep
);
891 #endif /* DDI_MAP_DEBUG */
894 * I/O or memory mapping:
896 * <bustype=0, addr=x, len=x>: memory
897 * <bustype=1, addr=x, len=x>: i/o
898 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
901 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
902 cmn_err(CE_WARN
, "<%s,%s> invalid register spec"
903 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip
),
904 ddi_get_name(rdip
), rp
->regspec_bustype
,
905 rp
->regspec_addr
, rp
->regspec_size
);
906 return (DDI_ME_INVAL
);
909 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) {
911 * compatibility i/o mapping
913 rp
->regspec_bustype
+= (uint_t
)offset
;
916 * Normal memory or i/o mapping
918 rp
->regspec_addr
+= (uint_t
)offset
;
922 rp
->regspec_size
= (uint_t
)len
;
925 cmn_err(CE_CONT
, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
926 "len %d handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
927 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
,
928 offset
, len
, mp
->map_handlep
);
929 #endif /* DDI_MAP_DEBUG */
932 * Apply any parent ranges at this level, if applicable.
933 * (This is where nexus specific regspec translation takes place.
934 * Use of this function is implicit agreement that translation is
935 * provided via ddi_apply_range.)
939 ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
940 ddi_get_name(dip
), ddi_get_name(rdip
));
941 #endif /* DDI_MAP_DEBUG */
943 if ((error
= i_ddi_apply_range(dip
, rdip
, mp
->map_obj
.rp
)) != 0)
946 switch (mp
->map_op
) {
947 case DDI_MO_MAP_LOCKED
:
950 * Set up the locked down kernel mapping to the regspec...
953 return (rootnex_map_regspec(mp
, vaddrp
));
961 return (rootnex_unmap_regspec(mp
, vaddrp
));
963 case DDI_MO_MAP_HANDLE
:
965 return (rootnex_map_handle(mp
));
968 return (DDI_ME_UNIMPLEMENTED
);
974 * rootnex_map_fault()
976 * fault in mappings for requestors
980 rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
, struct hat
*hat
,
981 struct seg
*seg
, caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
,
986 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr
, pfn
);
987 ddi_map_debug(" Seg <%s>\n",
988 seg
->s_ops
== &segdev_ops
? "segdev" :
989 seg
== &kvseg
? "segkmem" : "NONE!");
990 #endif /* DDI_MAP_DEBUG */
993 * This is all terribly broken, but it is a start
995 * XXX Note that this test means that segdev_ops
996 * must be exported from seg_dev.c.
997 * XXX What about devices with their own segment drivers?
999 if (seg
->s_ops
== &segdev_ops
) {
1000 struct segdev_data
*sdp
= (struct segdev_data
*)seg
->s_data
;
1004 * This is one plausible interpretation of
1005 * a null hat i.e. use the first hat on the
1006 * address space hat list which by convention is
1007 * the hat of the system MMU. At alternative
1008 * would be to panic .. this might well be better ..
1010 ASSERT(AS_READ_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1011 hat
= seg
->s_as
->a_hat
;
1012 cmn_err(CE_NOTE
, "rootnex_map_fault: nil hat");
1014 hat_devload(hat
, addr
, MMU_PAGESIZE
, pfn
, prot
| sdp
->hat_attr
,
1015 (lock
? HAT_LOAD_LOCK
: HAT_LOAD
));
1016 } else if (seg
== &kvseg
&& dp
== NULL
) {
1017 hat_devload(kas
.a_hat
, addr
, MMU_PAGESIZE
, pfn
, prot
,
1020 return (DDI_FAILURE
);
1021 return (DDI_SUCCESS
);
1026 * rootnex_map_regspec()
1027 * we don't support mapping of I/O cards above 4Gb
1030 rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1032 rootnex_addr_t rbase
;
1034 uint_t npages
, pgoffset
;
1038 uint_t hat_acc_flags
;
1041 rp
= mp
->map_obj
.rp
;
1042 hp
= mp
->map_handlep
;
1044 #ifdef DDI_MAP_DEBUG
1046 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1047 rp
->regspec_bustype
, rp
->regspec_addr
,
1048 rp
->regspec_size
, mp
->map_handlep
);
1049 #endif /* DDI_MAP_DEBUG */
1052 * I/O or memory mapping
1054 * <bustype=0, addr=x, len=x>: memory
1055 * <bustype=1, addr=x, len=x>: i/o
1056 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1059 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
1060 cmn_err(CE_WARN
, "rootnex: invalid register spec"
1061 " <0x%x, 0x%x, 0x%x>", rp
->regspec_bustype
,
1062 rp
->regspec_addr
, rp
->regspec_size
);
1063 return (DDI_FAILURE
);
1066 if (rp
->regspec_bustype
!= 0) {
1068 * I/O space - needs a handle.
1071 return (DDI_FAILURE
);
1073 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1074 ap
->ahi_acc_attr
|= DDI_ACCATTR_IO_SPACE
;
1075 impl_acc_hdl_init(hp
);
1077 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1078 #ifdef DDI_MAP_DEBUG
1079 ddi_map_debug("rootnex_map_regspec: mmap() "
1080 "to I/O space is not supported.\n");
1081 #endif /* DDI_MAP_DEBUG */
1082 return (DDI_ME_INVAL
);
1085 * 1275-compliant vs. compatibility i/o mapping
1088 (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) ?
1089 ((caddr_t
)(uintptr_t)rp
->regspec_bustype
) :
1090 ((caddr_t
)(uintptr_t)rp
->regspec_addr
);
1092 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1093 hp
->ah_pfn
= xen_assign_pfn(
1094 mmu_btop((ulong_t
)rp
->regspec_addr
&
1097 hp
->ah_pfn
= mmu_btop(
1098 (ulong_t
)rp
->regspec_addr
& MMU_PAGEMASK
);
1101 hp
->ah_pfn
= mmu_btop((ulong_t
)rp
->regspec_addr
&
1104 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+
1105 (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
);
1108 #ifdef DDI_MAP_DEBUG
1110 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1111 rp
->regspec_size
, *vaddrp
);
1112 #endif /* DDI_MAP_DEBUG */
1113 return (DDI_SUCCESS
);
1123 * hp->ah_acc.devacc_attr_endian_flags.
1125 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1126 case DDI_STRICTORDER_ACC
:
1127 hat_acc_flags
= HAT_STRICTORDER
;
1129 case DDI_UNORDERED_OK_ACC
:
1130 hat_acc_flags
= HAT_UNORDERED_OK
;
1132 case DDI_MERGING_OK_ACC
:
1133 hat_acc_flags
= HAT_MERGING_OK
;
1135 case DDI_LOADCACHING_OK_ACC
:
1136 hat_acc_flags
= HAT_LOADCACHING_OK
;
1138 case DDI_STORECACHING_OK_ACC
:
1139 hat_acc_flags
= HAT_STORECACHING_OK
;
1142 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1143 ap
->ahi_acc_attr
|= DDI_ACCATTR_CPU_VADDR
;
1144 impl_acc_hdl_init(hp
);
1145 hp
->ah_hat_flags
= hat_acc_flags
;
1147 hat_acc_flags
= HAT_STRICTORDER
;
1150 rbase
= (rootnex_addr_t
)(rp
->regspec_addr
& MMU_PAGEMASK
);
1153 * If we're dom0, we're using a real device so we need to translate
1156 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1157 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
)));
1164 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1166 if (rp
->regspec_size
== 0) {
1167 #ifdef DDI_MAP_DEBUG
1168 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1169 #endif /* DDI_MAP_DEBUG */
1170 return (DDI_ME_INVAL
);
1173 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1174 /* extra cast to make gcc happy */
1175 *vaddrp
= (caddr_t
)((uintptr_t)mmu_btop(pbase
));
1177 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1179 #ifdef DDI_MAP_DEBUG
1180 ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1181 "physical %llx", npages
, pbase
);
1182 #endif /* DDI_MAP_DEBUG */
1184 cvaddr
= device_arena_alloc(ptob(npages
), VM_NOSLEEP
);
1186 return (DDI_ME_NORESOURCES
);
1189 * Now map in the pages we've allocated...
1191 hat_devload(kas
.a_hat
, cvaddr
, mmu_ptob(npages
),
1192 mmu_btop(pbase
), mp
->map_prot
| hat_acc_flags
,
1194 *vaddrp
= (caddr_t
)cvaddr
+ pgoffset
;
1196 /* save away pfn and npages for FMA */
1197 hp
= mp
->map_handlep
;
1199 hp
->ah_pfn
= mmu_btop(pbase
);
1200 hp
->ah_pnum
= npages
;
1204 #ifdef DDI_MAP_DEBUG
1205 ddi_map_debug("at virtual 0x%x\n", *vaddrp
);
1206 #endif /* DDI_MAP_DEBUG */
1207 return (DDI_SUCCESS
);
1212 * rootnex_unmap_regspec()
1216 rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1218 caddr_t addr
= (caddr_t
)*vaddrp
;
1219 uint_t npages
, pgoffset
;
1222 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
)
1225 rp
= mp
->map_obj
.rp
;
1227 if (rp
->regspec_size
== 0) {
1228 #ifdef DDI_MAP_DEBUG
1229 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1230 #endif /* DDI_MAP_DEBUG */
1231 return (DDI_ME_INVAL
);
1235 * I/O or memory mapping:
1237 * <bustype=0, addr=x, len=x>: memory
1238 * <bustype=1, addr=x, len=x>: i/o
1239 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1241 if (rp
->regspec_bustype
!= 0) {
1243 * This is I/O space, which requires no particular
1244 * processing on unmap since it isn't mapped in the
1247 return (DDI_SUCCESS
);
1253 pgoffset
= (uintptr_t)addr
& MMU_PAGEOFFSET
;
1254 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1255 hat_unload(kas
.a_hat
, addr
- pgoffset
, ptob(npages
), HAT_UNLOAD_UNLOCK
);
1256 device_arena_free(addr
- pgoffset
, ptob(npages
));
1259 * Destroy the pointer - the mapping has logically gone
1263 return (DDI_SUCCESS
);
1268 * rootnex_map_handle()
1272 rootnex_map_handle(ddi_map_req_t
*mp
)
1274 rootnex_addr_t rbase
;
1280 rp
= mp
->map_obj
.rp
;
1282 #ifdef DDI_MAP_DEBUG
1284 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1285 rp
->regspec_bustype
, rp
->regspec_addr
,
1286 rp
->regspec_size
, mp
->map_handlep
);
1287 #endif /* DDI_MAP_DEBUG */
1290 * I/O or memory mapping:
1292 * <bustype=0, addr=x, len=x>: memory
1293 * <bustype=1, addr=x, len=x>: i/o
1294 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1296 if (rp
->regspec_bustype
!= 0) {
1298 * This refers to I/O space, and we don't support "mapping"
1299 * I/O space to a user.
1301 return (DDI_FAILURE
);
1305 * Set up the hat_flags for the mapping.
1307 hp
= mp
->map_handlep
;
1309 switch (hp
->ah_acc
.devacc_attr_endian_flags
) {
1310 case DDI_NEVERSWAP_ACC
:
1311 hp
->ah_hat_flags
= HAT_NEVERSWAP
| HAT_STRICTORDER
;
1313 case DDI_STRUCTURE_LE_ACC
:
1314 hp
->ah_hat_flags
= HAT_STRUCTURE_LE
;
1316 case DDI_STRUCTURE_BE_ACC
:
1317 return (DDI_FAILURE
);
1319 return (DDI_REGS_ACC_CONFLICT
);
1322 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1323 case DDI_STRICTORDER_ACC
:
1325 case DDI_UNORDERED_OK_ACC
:
1326 hp
->ah_hat_flags
|= HAT_UNORDERED_OK
;
1328 case DDI_MERGING_OK_ACC
:
1329 hp
->ah_hat_flags
|= HAT_MERGING_OK
;
1331 case DDI_LOADCACHING_OK_ACC
:
1332 hp
->ah_hat_flags
|= HAT_LOADCACHING_OK
;
1334 case DDI_STORECACHING_OK_ACC
:
1335 hp
->ah_hat_flags
|= HAT_STORECACHING_OK
;
1338 return (DDI_FAILURE
);
1341 rbase
= (rootnex_addr_t
)rp
->regspec_addr
&
1342 (~(rootnex_addr_t
)MMU_PAGEOFFSET
);
1343 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1345 if (rp
->regspec_size
== 0)
1346 return (DDI_ME_INVAL
);
1350 * If we're dom0, we're using a real device so we need to translate
1353 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1354 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
))) |
1355 (rbase
& MMU_PAGEOFFSET
);
1363 hp
->ah_pfn
= mmu_btop(pbase
);
1364 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1366 return (DDI_SUCCESS
);
1372 * ************************
1373 * interrupt related code
1374 * ************************
1378 * rootnex_intr_ops()
1379 * bus_intr_op() function for interrupt support
1383 rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
1384 ddi_intr_handle_impl_t
*hdlp
, void *result
)
1386 struct intrspec
*ispec
;
1388 DDI_INTR_NEXDBG((CE_CONT
,
1389 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1390 (void *)pdip
, (void *)rdip
, intr_op
, (void *)hdlp
));
1392 /* Process the interrupt operation */
1394 case DDI_INTROP_GETCAP
:
1395 /* First check with pcplusmp */
1396 if (psm_intr_ops
== NULL
)
1397 return (DDI_FAILURE
);
1399 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_CAP
, result
)) {
1401 return (DDI_FAILURE
);
1404 case DDI_INTROP_SETCAP
:
1405 if (psm_intr_ops
== NULL
)
1406 return (DDI_FAILURE
);
1408 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_CAP
, result
))
1409 return (DDI_FAILURE
);
1411 case DDI_INTROP_ALLOC
:
1412 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1413 return (rootnex_alloc_intr_fixed(rdip
, hdlp
, result
));
1414 case DDI_INTROP_FREE
:
1415 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1416 return (rootnex_free_intr_fixed(rdip
, hdlp
));
1417 case DDI_INTROP_GETPRI
:
1418 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1419 return (DDI_FAILURE
);
1420 *(int *)result
= ispec
->intrspec_pri
;
1422 case DDI_INTROP_SETPRI
:
1423 /* Validate the interrupt priority passed to us */
1424 if (*(int *)result
> LOCK_LEVEL
)
1425 return (DDI_FAILURE
);
1427 /* Ensure that PSM is all initialized and ispec is ok */
1428 if ((psm_intr_ops
== NULL
) ||
1429 ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
))
1430 return (DDI_FAILURE
);
1432 /* Change the priority */
1433 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_PRI
, result
) ==
1435 return (DDI_FAILURE
);
1437 /* update the ispec with the new priority */
1438 ispec
->intrspec_pri
= *(int *)result
;
1440 case DDI_INTROP_ADDISR
:
1441 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1442 return (DDI_FAILURE
);
1443 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
1445 case DDI_INTROP_REMISR
:
1446 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1447 return (DDI_FAILURE
);
1448 ispec
->intrspec_func
= (uint_t (*)()) 0;
1450 case DDI_INTROP_ENABLE
:
1451 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1452 return (DDI_FAILURE
);
1454 /* Call psmi to translate irq with the dip */
1455 if (psm_intr_ops
== NULL
)
1456 return (DDI_FAILURE
);
1458 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1459 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_XLATE_VECTOR
,
1460 (int *)&hdlp
->ih_vector
) == PSM_FAILURE
)
1461 return (DDI_FAILURE
);
1463 /* Add the interrupt handler */
1464 if (!add_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1465 hdlp
->ih_cb_func
, DEVI(rdip
)->devi_name
, hdlp
->ih_vector
,
1466 hdlp
->ih_cb_arg1
, hdlp
->ih_cb_arg2
, NULL
, rdip
))
1467 return (DDI_FAILURE
);
1469 case DDI_INTROP_DISABLE
:
1470 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1471 return (DDI_FAILURE
);
1473 /* Call psm_ops() to translate irq with the dip */
1474 if (psm_intr_ops
== NULL
)
1475 return (DDI_FAILURE
);
1477 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1478 (void) (*psm_intr_ops
)(rdip
, hdlp
,
1479 PSM_INTR_OP_XLATE_VECTOR
, (int *)&hdlp
->ih_vector
);
1481 /* Remove the interrupt handler */
1482 rem_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1483 hdlp
->ih_cb_func
, hdlp
->ih_vector
);
1485 case DDI_INTROP_SETMASK
:
1486 if (psm_intr_ops
== NULL
)
1487 return (DDI_FAILURE
);
1489 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_MASK
, NULL
))
1490 return (DDI_FAILURE
);
1492 case DDI_INTROP_CLRMASK
:
1493 if (psm_intr_ops
== NULL
)
1494 return (DDI_FAILURE
);
1496 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_CLEAR_MASK
, NULL
))
1497 return (DDI_FAILURE
);
1499 case DDI_INTROP_GETPENDING
:
1500 if (psm_intr_ops
== NULL
)
1501 return (DDI_FAILURE
);
1503 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_PENDING
,
1506 return (DDI_FAILURE
);
1509 case DDI_INTROP_NAVAIL
:
1510 case DDI_INTROP_NINTRS
:
1511 *(int *)result
= i_ddi_get_intx_nintrs(rdip
);
1512 if (*(int *)result
== 0) {
1514 * Special case for 'pcic' driver' only. This driver
1515 * driver is a child of 'isa' and 'rootnex' drivers.
1517 * See detailed comments on this in the function
1518 * rootnex_get_ispec().
1520 * Children of 'pcic' send 'NINITR' request all the
1521 * way to rootnex driver. But, the 'pdp->par_nintr'
1522 * field may not initialized. So, we fake it here
1523 * to return 1 (a la what PCMCIA nexus does).
1525 if (strcmp(ddi_get_name(rdip
), "pcic") == 0)
1528 return (DDI_FAILURE
);
1531 case DDI_INTROP_SUPPORTED_TYPES
:
1532 *(int *)result
= DDI_INTR_TYPE_FIXED
; /* Always ... */
1535 return (DDI_FAILURE
);
1538 return (DDI_SUCCESS
);
1543 * rootnex_get_ispec()
1544 * convert an interrupt number to an interrupt specification.
1545 * The interrupt number determines which interrupt spec will be
1546 * returned if more than one exists.
1548 * Look into the parent private data area of the 'rdip' to find out
1549 * the interrupt specification. First check to make sure there is
1550 * one that matchs "inumber" and then return a pointer to it.
1552 * Return NULL if one could not be found.
1554 * NOTE: This is needed for rootnex_intr_ops()
1556 static struct intrspec
*
1557 rootnex_get_ispec(dev_info_t
*rdip
, int inum
)
1559 struct ddi_parent_private_data
*pdp
= ddi_get_parent_data(rdip
);
1562 * Special case handling for drivers that provide their own
1563 * intrspec structures instead of relying on the DDI framework.
1565 * A broken hardware driver in ON could potentially provide its
1566 * own intrspec structure, instead of relying on the hardware.
1567 * If these drivers are children of 'rootnex' then we need to
1568 * continue to provide backward compatibility to them here.
1570 * Following check is a special case for 'pcic' driver which
1571 * was found to have broken hardwre andby provides its own intrspec.
1573 * Verbatim comments from this driver are shown here:
1574 * "Don't use the ddi_add_intr since we don't have a
1575 * default intrspec in all cases."
1577 * Since an 'ispec' may not be always created for it,
1578 * check for that and create one if so.
1580 * NOTE: Currently 'pcic' is the only driver found to do this.
1582 if (!pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1584 pdp
->par_intr
= kmem_zalloc(sizeof (struct intrspec
) *
1585 pdp
->par_nintr
, KM_SLEEP
);
1588 /* Validate the interrupt number */
1589 if (inum
>= pdp
->par_nintr
)
1592 /* Get the interrupt structure pointer and return that */
1593 return ((struct intrspec
*)&pdp
->par_intr
[inum
]);
1597 * Allocate interrupt vector for FIXED (legacy) type.
1600 rootnex_alloc_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
,
1603 struct intrspec
*ispec
;
1604 ddi_intr_handle_impl_t info_hdl
;
1607 apic_get_type_t type_info
;
1609 if (psm_intr_ops
== NULL
)
1610 return (DDI_FAILURE
);
1612 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1613 return (DDI_FAILURE
);
1616 * If the PSM module is "APIX" then pass the request for it
1617 * to allocate the vector now.
1619 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1620 info_hdl
.ih_private
= &type_info
;
1621 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1622 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1623 if (hdlp
->ih_private
== NULL
) { /* allocate phdl structure */
1625 i_ddi_alloc_intr_phdl(hdlp
);
1627 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1628 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1629 PSM_INTR_OP_ALLOC_VECTORS
, result
);
1630 if (free_phdl
) { /* free up the phdl structure */
1632 i_ddi_free_intr_phdl(hdlp
);
1633 hdlp
->ih_private
= NULL
;
1637 * No APIX module; fall back to the old scheme where the
1638 * interrupt vector is allocated during ddi_enable_intr() call.
1640 hdlp
->ih_pri
= ispec
->intrspec_pri
;
1641 *(int *)result
= hdlp
->ih_scratch1
;
1649 * Free up interrupt vector for FIXED (legacy) type.
1652 rootnex_free_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
)
1654 struct intrspec
*ispec
;
1655 struct ddi_parent_private_data
*pdp
;
1656 ddi_intr_handle_impl_t info_hdl
;
1658 apic_get_type_t type_info
;
1660 if (psm_intr_ops
== NULL
)
1661 return (DDI_FAILURE
);
1664 * If the PSM module is "APIX" then pass the request for it
1665 * to free up the vector now.
1667 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1668 info_hdl
.ih_private
= &type_info
;
1669 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1670 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1671 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1672 return (DDI_FAILURE
);
1673 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1674 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1675 PSM_INTR_OP_FREE_VECTORS
, NULL
);
1678 * No APIX module; fall back to the old scheme where
1679 * the interrupt vector was already freed during
1680 * ddi_disable_intr() call.
1685 pdp
= ddi_get_parent_data(rdip
);
1688 * Special case for 'pcic' driver' only.
1689 * If an intrspec was created for it, clean it up here
1690 * See detailed comments on this in the function
1691 * rootnex_get_ispec().
1693 if (pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1694 kmem_free(pdp
->par_intr
, sizeof (struct intrspec
) *
1697 * Set it to zero; so that
1698 * DDI framework doesn't free it again
1700 pdp
->par_intr
= NULL
;
1709 * ******************
1711 * ******************
1716 rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1717 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
1718 ddi_dma_handle_t
*handlep
)
1720 uint64_t maxsegmentsize_ll
;
1721 uint_t maxsegmentsize
;
1730 /* convert our sleep flags */
1731 if (waitfp
== DDI_DMA_SLEEP
) {
1734 kmflag
= KM_NOSLEEP
;
1738 * We try to do only one memory allocation here. We'll do a little
1739 * pointer manipulation later. If the bind ends up taking more than
1740 * our prealloc's space, we'll have to allocate more memory in the
1741 * bind operation. Not great, but much better than before and the
1742 * best we can do with the current bind interfaces.
1744 hp
= kmem_cache_alloc(rootnex_state
->r_dmahdl_cache
, kmflag
);
1746 return (DDI_DMA_NORESOURCES
);
1748 /* Do our pointer manipulation now, align the structures */
1749 hp
->dmai_private
= (void *)(((uintptr_t)hp
+
1750 (uintptr_t)sizeof (ddi_dma_impl_t
) + 0x7) & ~0x7);
1751 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1752 dma
->dp_prealloc_buffer
= (uchar_t
*)(((uintptr_t)dma
+
1753 sizeof (rootnex_dma_t
) + 0x7) & ~0x7);
1755 /* setup the handle */
1756 rootnex_clean_dmahdl(hp
);
1757 hp
->dmai_error
.err_fep
= NULL
;
1758 hp
->dmai_error
.err_cf
= NULL
;
1760 dma
->dp_sglinfo
.si_flags
= attr
->dma_attr_flags
;
1761 dma
->dp_sglinfo
.si_min_addr
= attr
->dma_attr_addr_lo
;
1764 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1765 * is being used. Set the upper limit to the seg value.
1766 * There will be enough DVMA space to always get addresses
1767 * that will match the constraints.
1769 if (IOMMU_USED(rdip
) &&
1770 (attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
)) {
1771 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_seg
;
1772 dma
->dp_sglinfo
.si_flags
&= ~_DDI_DMA_BOUNCE_ON_SEG
;
1774 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_addr_hi
;
1776 hp
->dmai_minxfer
= attr
->dma_attr_minxfer
;
1777 hp
->dmai_burstsizes
= attr
->dma_attr_burstsizes
;
1778 hp
->dmai_rdip
= rdip
;
1779 hp
->dmai_attr
= *attr
;
1781 if (attr
->dma_attr_seg
>= dma
->dp_sglinfo
.si_max_addr
)
1782 dma
->dp_sglinfo
.si_cancross
= B_FALSE
;
1784 dma
->dp_sglinfo
.si_cancross
= B_TRUE
;
1786 /* we don't need to worry about the SPL since we do a tryenter */
1787 mutex_init(&dma
->dp_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1790 * Figure out our maximum segment size. If the segment size is greater
1791 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1792 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1793 * dma_attr_count_max are size-1 type values.
1795 * Maximum segment size is the largest physically contiguous chunk of
1796 * memory that we can return from a bind (i.e. the maximum size of a
1800 /* handle the rollover cases */
1801 seg
= attr
->dma_attr_seg
+ 1;
1802 if (seg
< attr
->dma_attr_seg
) {
1803 seg
= attr
->dma_attr_seg
;
1805 count_max
= attr
->dma_attr_count_max
+ 1;
1806 if (count_max
< attr
->dma_attr_count_max
) {
1807 count_max
= attr
->dma_attr_count_max
;
1811 * granularity may or may not be a power of two. If it isn't, we can't
1812 * use a simple mask.
1814 if (attr
->dma_attr_granular
& (attr
->dma_attr_granular
- 1)) {
1815 dma
->dp_granularity_power_2
= B_FALSE
;
1817 dma
->dp_granularity_power_2
= B_TRUE
;
1821 * maxxfer should be a whole multiple of granularity. If we're going to
1822 * break up a window because we're greater than maxxfer, we might as
1823 * well make sure it's maxxfer is a whole multiple so we don't have to
1824 * worry about triming the window later on for this case.
1826 if (attr
->dma_attr_granular
> 1) {
1827 if (dma
->dp_granularity_power_2
) {
1828 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1829 (attr
->dma_attr_maxxfer
&
1830 (attr
->dma_attr_granular
- 1));
1832 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1833 (attr
->dma_attr_maxxfer
% attr
->dma_attr_granular
);
1836 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
;
1839 maxsegmentsize_ll
= MIN(seg
, dma
->dp_maxxfer
);
1840 maxsegmentsize_ll
= MIN(maxsegmentsize_ll
, count_max
);
1841 if (maxsegmentsize_ll
== 0 || (maxsegmentsize_ll
> 0xFFFFFFFF)) {
1842 maxsegmentsize
= 0xFFFFFFFF;
1844 maxsegmentsize
= maxsegmentsize_ll
;
1846 dma
->dp_sglinfo
.si_max_cookie_size
= maxsegmentsize
;
1847 dma
->dp_sglinfo
.si_segmask
= attr
->dma_attr_seg
;
1849 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1850 if (rootnex_alloc_check_parms
) {
1851 e
= rootnex_valid_alloc_parms(attr
, maxsegmentsize
);
1852 if (e
!= DDI_SUCCESS
) {
1853 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ALLOC_FAIL
]);
1854 (void) rootnex_dma_freehdl(dip
, rdip
,
1855 (ddi_dma_handle_t
)hp
);
1860 *handlep
= (ddi_dma_handle_t
)hp
;
1862 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1863 ROOTNEX_DPROBE1(rootnex__alloc__handle
, uint64_t,
1864 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1866 return (DDI_SUCCESS
);
1871 * rootnex_dma_allochdl()
1872 * called from ddi_dma_alloc_handle().
1875 rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
1876 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
1878 int retval
= DDI_SUCCESS
;
1879 #if defined(__amd64) && !defined(__xpv)
1881 if (IOMMU_UNITIALIZED(rdip
)) {
1882 retval
= iommulib_nex_open(dip
, rdip
);
1884 if (retval
!= DDI_SUCCESS
&& retval
!= DDI_ENOTSUP
)
1888 if (IOMMU_UNUSED(rdip
)) {
1889 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1892 retval
= iommulib_nexdma_allochdl(dip
, rdip
, attr
,
1893 waitfp
, arg
, handlep
);
1896 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1900 case DDI_DMA_NORESOURCES
:
1901 if (waitfp
!= DDI_DMA_DONTWAIT
) {
1902 ddi_set_callback(waitfp
, arg
,
1903 &rootnex_state
->r_dvma_call_list_id
);
1907 ndi_fmc_insert(rdip
, DMA_HANDLE
, *handlep
, NULL
);
1917 rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1918 ddi_dma_handle_t handle
)
1924 hp
= (ddi_dma_impl_t
*)handle
;
1925 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1927 /* unbind should have been called first */
1928 ASSERT(!dma
->dp_inuse
);
1930 mutex_destroy(&dma
->dp_mutex
);
1931 kmem_cache_free(rootnex_state
->r_dmahdl_cache
, hp
);
1933 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1934 ROOTNEX_DPROBE1(rootnex__free__handle
, uint64_t,
1935 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1937 return (DDI_SUCCESS
);
1941 * rootnex_dma_freehdl()
1942 * called from ddi_dma_free_handle().
1945 rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
1949 ndi_fmc_remove(rdip
, DMA_HANDLE
, handle
);
1950 #if defined(__amd64) && !defined(__xpv)
1951 if (IOMMU_USED(rdip
))
1952 ret
= iommulib_nexdma_freehdl(dip
, rdip
, handle
);
1955 ret
= rootnex_coredma_freehdl(dip
, rdip
, handle
);
1957 if (rootnex_state
->r_dvma_call_list_id
)
1958 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
1965 rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1966 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
1967 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
1969 rootnex_sglinfo_t
*sinfo
;
1970 ddi_dma_obj_t
*dmao
;
1971 #if defined(__amd64) && !defined(__xpv)
1972 struct dvmaseg
*dvs
;
1973 ddi_dma_cookie_t
*cookie
;
1975 ddi_dma_attr_t
*attr
;
1982 hp
= (ddi_dma_impl_t
*)handle
;
1983 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1984 dmao
= &dma
->dp_dma
;
1985 sinfo
= &dma
->dp_sglinfo
;
1986 attr
= &hp
->dmai_attr
;
1988 /* convert the sleep flags */
1989 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
1990 dma
->dp_sleep_flags
= kmflag
= KM_SLEEP
;
1992 dma
->dp_sleep_flags
= kmflag
= KM_NOSLEEP
;
1995 hp
->dmai_rflags
= dmareq
->dmar_flags
& DMP_DDIFLAGS
;
1998 * This is useful for debugging a driver. Not as useful in a production
1999 * system. The only time this will fail is if you have a driver bug.
2001 if (rootnex_bind_check_inuse
) {
2003 * No one else should ever have this lock unless someone else
2004 * is trying to use this handle. So contention on the lock
2005 * is the same as inuse being set.
2007 e
= mutex_tryenter(&dma
->dp_mutex
);
2009 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2010 return (DDI_DMA_INUSE
);
2012 if (dma
->dp_inuse
) {
2013 mutex_exit(&dma
->dp_mutex
);
2014 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2015 return (DDI_DMA_INUSE
);
2017 dma
->dp_inuse
= B_TRUE
;
2018 mutex_exit(&dma
->dp_mutex
);
2021 /* check the ddi_dma_attr arg to make sure it makes a little sense */
2022 if (rootnex_bind_check_parms
) {
2023 e
= rootnex_valid_bind_parms(dmareq
, attr
);
2024 if (e
!= DDI_SUCCESS
) {
2025 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2026 rootnex_clean_dmahdl(hp
);
2031 /* save away the original bind info */
2032 dma
->dp_dma
= dmareq
->dmar_object
;
2034 #if defined(__amd64) && !defined(__xpv)
2035 if (IOMMU_USED(rdip
)) {
2036 dmao
= &dma
->dp_dvma
;
2037 e
= iommulib_nexdma_mapobject(dip
, rdip
, handle
, dmareq
, dmao
);
2040 if (sinfo
->si_cancross
||
2041 dmao
->dmao_obj
.dvma_obj
.dv_nseg
!= 1 ||
2042 dmao
->dmao_size
> sinfo
->si_max_cookie_size
) {
2043 dma
->dp_dvma_used
= B_TRUE
;
2046 sinfo
->si_sgl_size
= 1;
2047 hp
->dmai_rflags
|= DMP_NOSYNC
;
2049 dma
->dp_dvma_used
= B_TRUE
;
2050 dma
->dp_need_to_free_cookie
= B_FALSE
;
2052 dvs
= &dmao
->dmao_obj
.dvma_obj
.dv_seg
[0];
2053 cookie
= hp
->dmai_cookie
= dma
->dp_cookies
=
2054 (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2055 cookie
->dmac_laddress
= dvs
->dvs_start
+
2056 dmao
->dmao_obj
.dvma_obj
.dv_off
;
2057 cookie
->dmac_size
= dvs
->dvs_len
;
2058 cookie
->dmac_type
= 0;
2060 ROOTNEX_DPROBE1(rootnex__bind__dvmafast
, dev_info_t
*,
2066 rootnex_clean_dmahdl(hp
);
2073 * Figure out a rough estimate of what maximum number of pages
2074 * this buffer could use (a high estimate of course).
2076 sinfo
->si_max_pages
= mmu_btopr(dma
->dp_dma
.dmao_size
) + 1;
2078 if (dma
->dp_dvma_used
) {
2080 * The number of physical pages is the worst case.
2082 * For DVMA, the worst case is the length divided
2083 * by the maximum cookie length, plus 1. Add to that
2084 * the number of segment boundaries potentially crossed, and
2085 * the additional number of DVMA segments that was returned.
2087 * In the normal case, for modern devices, si_cancross will
2088 * be false, and dv_nseg will be 1, and the fast path will
2089 * have been taken above.
2091 ncookies
= (dma
->dp_dma
.dmao_size
/ sinfo
->si_max_cookie_size
)
2093 if (sinfo
->si_cancross
)
2095 (dma
->dp_dma
.dmao_size
/ attr
->dma_attr_seg
) + 1;
2096 ncookies
+= (dmao
->dmao_obj
.dvma_obj
.dv_nseg
- 1);
2098 sinfo
->si_max_pages
= MIN(sinfo
->si_max_pages
, ncookies
);
2102 * We'll use the pre-allocated cookies for any bind that will *always*
2103 * fit (more important to be consistent, we don't want to create
2104 * additional degenerate cases).
2106 if (sinfo
->si_max_pages
<= rootnex_state
->r_prealloc_cookies
) {
2107 dma
->dp_cookies
= (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2108 dma
->dp_need_to_free_cookie
= B_FALSE
;
2109 ROOTNEX_DPROBE2(rootnex__bind__prealloc
, dev_info_t
*, rdip
,
2110 uint_t
, sinfo
->si_max_pages
);
2113 * For anything larger than that, we'll go ahead and allocate the
2114 * maximum number of pages we expect to see. Hopefuly, we won't be
2115 * seeing this path in the fast path for high performance devices very
2118 * a ddi bind interface that allowed the driver to provide storage to
2119 * the bind interface would speed this case up.
2123 * Save away how much memory we allocated. If we're doing a
2124 * nosleep, the alloc could fail...
2126 dma
->dp_cookie_size
= sinfo
->si_max_pages
*
2127 sizeof (ddi_dma_cookie_t
);
2128 dma
->dp_cookies
= kmem_alloc(dma
->dp_cookie_size
, kmflag
);
2129 if (dma
->dp_cookies
== NULL
) {
2130 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2131 rootnex_clean_dmahdl(hp
);
2132 return (DDI_DMA_NORESOURCES
);
2134 dma
->dp_need_to_free_cookie
= B_TRUE
;
2135 ROOTNEX_DPROBE2(rootnex__bind__alloc
, dev_info_t
*, rdip
,
2136 uint_t
, sinfo
->si_max_pages
);
2138 hp
->dmai_cookie
= dma
->dp_cookies
;
2141 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2142 * looking at the constraints in the dma structure. It will then put
2143 * some additional state about the sgl in the dma struct (i.e. is
2144 * the sgl clean, or do we need to do some munging; how many pages
2145 * need to be copied, etc.)
2147 if (dma
->dp_dvma_used
)
2148 rootnex_dvma_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2150 rootnex_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2153 ASSERT(sinfo
->si_sgl_size
<= sinfo
->si_max_pages
);
2154 /* if we don't need a copy buffer, we don't need to sync */
2155 if (sinfo
->si_copybuf_req
== 0) {
2156 hp
->dmai_rflags
|= DMP_NOSYNC
;
2160 * if we don't need the copybuf and we don't need to do a partial, we
2161 * hit the fast path. All the high performance devices should be trying
2162 * to hit this path. To hit this path, a device should be able to reach
2163 * all of memory, shouldn't try to bind more than it can transfer, and
2164 * the buffer shouldn't require more cookies than the driver/device can
2167 * Note that negative values of dma_attr_sgllen are supposed
2168 * to mean unlimited, but we just cast them to mean a
2169 * "ridiculous large limit". This saves some extra checks on
2172 if ((sinfo
->si_copybuf_req
== 0) &&
2173 (sinfo
->si_sgl_size
<= (unsigned)attr
->dma_attr_sgllen
) &&
2174 (dmao
->dmao_size
< dma
->dp_maxxfer
)) {
2177 * If the driver supports FMA, insert the handle in the FMA DMA
2180 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2181 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2184 * copy out the first cookie and ccountp, set the cookie
2185 * pointer to the second cookie. The first cookie is passed
2186 * back on the stack. Additional cookies are accessed via
2187 * ddi_dma_nextcookie()
2189 *cookiep
= dma
->dp_cookies
[0];
2190 *ccountp
= sinfo
->si_sgl_size
;
2192 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2193 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2194 ROOTNEX_DPROBE4(rootnex__bind__fast
, dev_info_t
*, rdip
,
2195 uint64_t, rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
],
2196 uint_t
, dmao
->dmao_size
, uint_t
, *ccountp
);
2199 return (DDI_DMA_MAPPED
);
2203 * go to the slow path, we may need to alloc more memory, create
2204 * multiple windows, and munge up a sgl to make the device happy.
2208 * With the IOMMU mapobject method used, we should never hit
2209 * the slow path. If we do, something is seriously wrong.
2210 * Clean up and return an error.
2213 #if defined(__amd64) && !defined(__xpv)
2215 if (dma
->dp_dvma_used
) {
2216 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2218 e
= DDI_DMA_NOMAPPING
;
2221 e
= rootnex_bind_slowpath(hp
, dmareq
, dma
, attr
, &dma
->dp_dma
,
2223 #if defined(__amd64) && !defined(__xpv)
2226 if ((e
!= DDI_DMA_MAPPED
) && (e
!= DDI_DMA_PARTIAL_MAP
)) {
2227 if (dma
->dp_need_to_free_cookie
) {
2228 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2230 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2231 rootnex_clean_dmahdl(hp
); /* must be after free cookie */
2236 * If the driver supports FMA, insert the handle in the FMA DMA handle
2239 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2240 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2242 /* if the first window uses the copy buffer, sync it for the device */
2243 if ((dma
->dp_window
[dma
->dp_current_win
].wd_dosync
) &&
2244 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
2245 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2246 DDI_DMA_SYNC_FORDEV
);
2250 * copy out the first cookie and ccountp, set the cookie pointer to the
2251 * second cookie. Make sure the partial flag is set/cleared correctly.
2252 * If we have a partial map (i.e. multiple windows), the number of
2253 * cookies we return is the number of cookies in the first window.
2255 if (e
== DDI_DMA_MAPPED
) {
2256 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2257 *ccountp
= sinfo
->si_sgl_size
;
2260 hp
->dmai_rflags
|= DDI_DMA_PARTIAL
;
2261 *ccountp
= dma
->dp_window
[dma
->dp_current_win
].wd_cookie_cnt
;
2262 ASSERT(hp
->dmai_nwin
<= dma
->dp_max_win
);
2264 *cookiep
= dma
->dp_cookies
[0];
2267 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2268 ROOTNEX_DPROBE4(rootnex__bind__slow
, dev_info_t
*, rdip
, uint64_t,
2269 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
], uint_t
,
2270 dmao
->dmao_size
, uint_t
, *ccountp
);
2275 * rootnex_dma_bindhdl()
2276 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2279 rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2280 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
2281 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
2284 #if defined(__amd64) && !defined(__xpv)
2285 if (IOMMU_USED(rdip
))
2286 ret
= iommulib_nexdma_bindhdl(dip
, rdip
, handle
, dmareq
,
2290 ret
= rootnex_coredma_bindhdl(dip
, rdip
, handle
, dmareq
,
2293 if (ret
== DDI_DMA_NORESOURCES
&& dmareq
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
2294 ddi_set_callback(dmareq
->dmar_fp
, dmareq
->dmar_arg
,
2295 &rootnex_state
->r_dvma_call_list_id
);
2305 rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2306 ddi_dma_handle_t handle
)
2312 hp
= (ddi_dma_impl_t
*)handle
;
2313 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2315 /* make sure the buffer wasn't free'd before calling unbind */
2316 if (rootnex_unbind_verify_buffer
) {
2317 e
= rootnex_verify_buffer(dma
);
2318 if (e
!= DDI_SUCCESS
) {
2320 return (DDI_FAILURE
);
2324 /* sync the current window before unbinding the buffer */
2325 if (dma
->dp_window
&& dma
->dp_window
[dma
->dp_current_win
].wd_dosync
&&
2326 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
2327 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2328 DDI_DMA_SYNC_FORCPU
);
2332 * cleanup and copy buffer or window state. if we didn't use the copy
2333 * buffer or windows, there won't be much to do :-)
2335 rootnex_teardown_copybuf(dma
);
2336 rootnex_teardown_windows(dma
);
2338 #if defined(__amd64) && !defined(__xpv)
2339 if (IOMMU_USED(rdip
))
2340 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2345 * If we had to allocate space to for the worse case sgl (it didn't
2346 * fit into our pre-allocate buffer), free that up now
2348 if (dma
->dp_need_to_free_cookie
) {
2349 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2353 * clean up the handle so it's ready for the next bind (i.e. if the
2354 * handle is reused).
2356 rootnex_clean_dmahdl(hp
);
2357 hp
->dmai_error
.err_cf
= NULL
;
2359 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2360 ROOTNEX_DPROBE1(rootnex__unbind
, uint64_t,
2361 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2363 return (DDI_SUCCESS
);
2367 * rootnex_dma_unbindhdl()
2368 * called from ddi_dma_unbind_handle()
2372 rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2373 ddi_dma_handle_t handle
)
2377 #if defined(__amd64) && !defined(__xpv)
2378 if (IOMMU_USED(rdip
))
2379 ret
= iommulib_nexdma_unbindhdl(dip
, rdip
, handle
);
2382 ret
= rootnex_coredma_unbindhdl(dip
, rdip
, handle
);
2384 if (rootnex_state
->r_dvma_call_list_id
)
2385 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
2390 #if defined(__amd64) && !defined(__xpv)
2393 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
)
2395 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2396 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2398 if (dma
->dp_sleep_flags
!= KM_SLEEP
&&
2399 dma
->dp_sleep_flags
!= KM_NOSLEEP
)
2400 cmn_err(CE_PANIC
, "kmem sleep flags not set in DMA handle");
2401 return (dma
->dp_sleep_flags
);
2405 rootnex_coredma_reset_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2407 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2408 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2409 rootnex_window_t
*window
;
2411 if (dma
->dp_window
) {
2412 window
= &dma
->dp_window
[dma
->dp_current_win
];
2413 hp
->dmai_cookie
= window
->wd_first_cookie
;
2415 hp
->dmai_cookie
= dma
->dp_cookies
;
2422 rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2423 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
)
2427 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2428 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2429 rootnex_window_t
*window
;
2430 ddi_dma_cookie_t
*cp
;
2431 ddi_dma_cookie_t
*cookie
;
2433 ASSERT(*cookiepp
== NULL
);
2434 ASSERT(*ccountp
== 0);
2436 if (dma
->dp_window
) {
2437 window
= &dma
->dp_window
[dma
->dp_current_win
];
2438 cp
= window
->wd_first_cookie
;
2439 *ccountp
= window
->wd_cookie_cnt
;
2441 cp
= dma
->dp_cookies
;
2442 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
2445 km_flags
= rootnex_coredma_get_sleep_flags(handle
);
2446 cookie
= kmem_zalloc(sizeof (ddi_dma_cookie_t
) * (*ccountp
), km_flags
);
2447 if (cookie
== NULL
) {
2448 return (DDI_DMA_NORESOURCES
);
2451 for (i
= 0; i
< *ccountp
; i
++) {
2452 cookie
[i
].dmac_notused
= cp
[i
].dmac_notused
;
2453 cookie
[i
].dmac_type
= cp
[i
].dmac_type
;
2454 cookie
[i
].dmac_address
= cp
[i
].dmac_address
;
2455 cookie
[i
].dmac_size
= cp
[i
].dmac_size
;
2460 return (DDI_SUCCESS
);
2465 rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2466 ddi_dma_cookie_t
*cookiep
, uint_t ccount
)
2468 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2469 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2470 rootnex_window_t
*window
;
2471 ddi_dma_cookie_t
*cur_cookiep
;
2474 ASSERT(ccount
!= 0);
2475 ASSERT(dma
->dp_need_to_switch_cookies
== B_FALSE
);
2477 if (dma
->dp_window
) {
2478 window
= &dma
->dp_window
[dma
->dp_current_win
];
2479 dma
->dp_saved_cookies
= window
->wd_first_cookie
;
2480 window
->wd_first_cookie
= cookiep
;
2481 ASSERT(ccount
== window
->wd_cookie_cnt
);
2482 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2483 + window
->wd_first_cookie
;
2485 dma
->dp_saved_cookies
= dma
->dp_cookies
;
2486 dma
->dp_cookies
= cookiep
;
2487 ASSERT(ccount
== dma
->dp_sglinfo
.si_sgl_size
);
2488 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2492 dma
->dp_need_to_switch_cookies
= B_TRUE
;
2493 hp
->dmai_cookie
= cur_cookiep
;
2495 return (DDI_SUCCESS
);
2500 rootnex_coredma_clear_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2502 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2503 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2504 rootnex_window_t
*window
;
2505 ddi_dma_cookie_t
*cur_cookiep
;
2506 ddi_dma_cookie_t
*cookie_array
;
2509 /* check if cookies have not been switched */
2510 if (dma
->dp_need_to_switch_cookies
== B_FALSE
)
2511 return (DDI_SUCCESS
);
2513 ASSERT(dma
->dp_saved_cookies
);
2515 if (dma
->dp_window
) {
2516 window
= &dma
->dp_window
[dma
->dp_current_win
];
2517 cookie_array
= window
->wd_first_cookie
;
2518 window
->wd_first_cookie
= dma
->dp_saved_cookies
;
2519 dma
->dp_saved_cookies
= NULL
;
2520 ccount
= window
->wd_cookie_cnt
;
2521 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2522 + window
->wd_first_cookie
;
2524 cookie_array
= dma
->dp_cookies
;
2525 dma
->dp_cookies
= dma
->dp_saved_cookies
;
2526 dma
->dp_saved_cookies
= NULL
;
2527 ccount
= dma
->dp_sglinfo
.si_sgl_size
;
2528 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2532 kmem_free(cookie_array
, sizeof (ddi_dma_cookie_t
) * ccount
);
2534 hp
->dmai_cookie
= cur_cookiep
;
2536 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2538 return (DDI_SUCCESS
);
2544 rootnex_get_as(ddi_dma_obj_t
*dmao
)
2548 switch (dmao
->dmao_type
) {
2549 case DMA_OTYP_VADDR
:
2550 case DMA_OTYP_BUFVADDR
:
2551 asp
= dmao
->dmao_obj
.virt_obj
.v_as
;
2563 * rootnex_verify_buffer()
2564 * verify buffer wasn't free'd
2567 rootnex_verify_buffer(rootnex_dma_t
*dma
)
2577 /* Figure out how many pages this buffer occupies */
2578 if (dma
->dp_dma
.dmao_type
== DMA_OTYP_PAGES
) {
2579 poff
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_offset
& MMU_PAGEOFFSET
;
2581 vaddr
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_addr
;
2582 poff
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2584 pcnt
= mmu_btopr(dma
->dp_dma
.dmao_size
+ poff
);
2586 switch (dma
->dp_dma
.dmao_type
) {
2587 case DMA_OTYP_PAGES
:
2589 * for a linked list of pp's walk through them to make sure
2590 * they're locked and not free.
2592 pp
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_pp
;
2593 for (i
= 0; i
< pcnt
; i
++) {
2594 if (PP_ISFREE(pp
) || !PAGE_LOCKED(pp
)) {
2595 return (DDI_FAILURE
);
2601 case DMA_OTYP_VADDR
:
2602 case DMA_OTYP_BUFVADDR
:
2603 pplist
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_priv
;
2605 * for an array of pp's walk through them to make sure they're
2606 * not free. It's possible that they may not be locked.
2609 for (i
= 0; i
< pcnt
; i
++) {
2610 if (PP_ISFREE(pplist
[i
])) {
2611 return (DDI_FAILURE
);
2615 /* For a virtual address, try to peek at each page */
2617 if (rootnex_get_as(&dma
->dp_dma
) == &kas
) {
2618 for (i
= 0; i
< pcnt
; i
++) {
2619 if (ddi_peek8(NULL
, vaddr
, &b
) ==
2621 return (DDI_FAILURE
);
2622 vaddr
+= MMU_PAGESIZE
;
2629 cmn_err(CE_PANIC
, "rootnex_verify_buffer: bad DMA object");
2633 return (DDI_SUCCESS
);
2638 * rootnex_clean_dmahdl()
2639 * Clean the dma handle. This should be called on a handle alloc and an
2640 * unbind handle. Set the handle state to the default settings.
2643 rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
)
2648 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2651 dma
->dp_current_cookie
= 0;
2652 dma
->dp_copybuf_size
= 0;
2653 dma
->dp_window
= NULL
;
2654 dma
->dp_cbaddr
= NULL
;
2655 dma
->dp_inuse
= B_FALSE
;
2656 dma
->dp_dvma_used
= B_FALSE
;
2657 dma
->dp_need_to_free_cookie
= B_FALSE
;
2658 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2659 dma
->dp_saved_cookies
= NULL
;
2660 dma
->dp_sleep_flags
= KM_PANIC
;
2661 dma
->dp_need_to_free_window
= B_FALSE
;
2662 dma
->dp_partial_required
= B_FALSE
;
2663 dma
->dp_trim_required
= B_FALSE
;
2664 dma
->dp_sglinfo
.si_copybuf_req
= 0;
2665 #if !defined(__amd64)
2666 dma
->dp_cb_remaping
= B_FALSE
;
2670 /* FMA related initialization */
2672 hp
->dmai_fault_check
= NULL
;
2673 hp
->dmai_fault_notify
= NULL
;
2674 hp
->dmai_error
.err_ena
= 0;
2675 hp
->dmai_error
.err_status
= DDI_FM_OK
;
2676 hp
->dmai_error
.err_expected
= DDI_FM_ERR_UNEXPECTED
;
2677 hp
->dmai_error
.err_ontrap
= NULL
;
2682 * rootnex_valid_alloc_parms()
2683 * Called in ddi_dma_alloc_handle path to validate its parameters.
2686 rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegmentsize
)
2688 if ((attr
->dma_attr_seg
< MMU_PAGEOFFSET
) ||
2689 (attr
->dma_attr_count_max
< MMU_PAGEOFFSET
) ||
2690 (attr
->dma_attr_granular
> MMU_PAGESIZE
) ||
2691 (attr
->dma_attr_maxxfer
< MMU_PAGESIZE
)) {
2692 return (DDI_DMA_BADATTR
);
2695 if (attr
->dma_attr_addr_hi
<= attr
->dma_attr_addr_lo
) {
2696 return (DDI_DMA_BADATTR
);
2699 if ((attr
->dma_attr_seg
& MMU_PAGEOFFSET
) != MMU_PAGEOFFSET
||
2700 MMU_PAGESIZE
& (attr
->dma_attr_granular
- 1) ||
2701 attr
->dma_attr_sgllen
== 0) {
2702 return (DDI_DMA_BADATTR
);
2705 /* We should be able to DMA into every byte offset in a page */
2706 if (maxsegmentsize
< MMU_PAGESIZE
) {
2707 return (DDI_DMA_BADATTR
);
2710 /* if we're bouncing on seg, seg must be <= addr_hi */
2711 if ((attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
) &&
2712 (attr
->dma_attr_seg
> attr
->dma_attr_addr_hi
)) {
2713 return (DDI_DMA_BADATTR
);
2715 return (DDI_SUCCESS
);
2719 * rootnex_valid_bind_parms()
2720 * Called in ddi_dma_*_bind_handle path to validate its parameters.
2724 rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
, ddi_dma_attr_t
*attr
)
2726 #if !defined(__amd64)
2728 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2729 * we can track the offset for the obsoleted interfaces.
2731 if (dmareq
->dmar_object
.dmao_size
> 0x7FFFFFFF) {
2732 return (DDI_DMA_TOOBIG
);
2736 return (DDI_SUCCESS
);
2741 * rootnex_need_bounce_seg()
2742 * check to see if the buffer lives on both side of the seg.
2745 rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
, rootnex_sglinfo_t
*sglinfo
)
2747 ddi_dma_atyp_t buftype
;
2748 rootnex_addr_t raddr
;
2749 boolean_t lower_addr
;
2750 boolean_t upper_addr
;
2762 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2763 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2764 buftype
= dmar_object
->dmao_type
;
2765 size
= dmar_object
->dmao_size
;
2767 lower_addr
= B_FALSE
;
2768 upper_addr
= B_FALSE
;
2772 * Process the first page to handle the initial offset of the buffer.
2773 * We'll use the base address we get later when we loop through all
2776 if (buftype
== DMA_OTYP_PAGES
) {
2777 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2778 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2780 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2781 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2783 sglinfo
->si_asp
= NULL
;
2784 } else if (pplist
!= NULL
) {
2785 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2786 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2787 if (sglinfo
->si_asp
== NULL
) {
2788 sglinfo
->si_asp
= &kas
;
2790 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2792 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2795 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2796 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2797 if (sglinfo
->si_asp
== NULL
) {
2798 sglinfo
->si_asp
= &kas
;
2800 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2802 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2806 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2808 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2809 upper_addr
= B_TRUE
;
2811 lower_addr
= B_TRUE
;
2816 * Walk through the rest of the pages in the buffer. Track to see
2817 * if we have pages on both sides of the segment boundary.
2820 /* partial or full page */
2821 psize
= MIN(size
, MMU_PAGESIZE
);
2823 if (buftype
== DMA_OTYP_PAGES
) {
2824 /* get the paddr from the page_t */
2825 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2826 paddr
= pfn_to_pa(pp
->p_pagenum
);
2828 } else if (pplist
!= NULL
) {
2829 /* index into the array of page_t's to get the paddr */
2830 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2831 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2834 /* call into the VM to get the paddr */
2835 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
2840 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2842 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2843 upper_addr
= B_TRUE
;
2845 lower_addr
= B_TRUE
;
2848 * if the buffer lives both above and below the segment
2849 * boundary, or the current page is the page immediately
2850 * after the segment, we will use a copy/bounce buffer for
2853 if ((lower_addr
&& upper_addr
) ||
2854 (raddr
== (sglinfo
->si_segmask
+ 1))) {
2866 * Called in bind fastpath to get the sgl. Most of this will be replaced
2867 * with a call to the vm layer when vm2.0 comes around...
2870 rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
2871 rootnex_sglinfo_t
*sglinfo
)
2873 ddi_dma_atyp_t buftype
;
2874 rootnex_addr_t raddr
;
2891 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2892 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2893 maxseg
= sglinfo
->si_max_cookie_size
;
2894 buftype
= dmar_object
->dmao_type
;
2895 addrhi
= sglinfo
->si_max_addr
;
2896 addrlo
= sglinfo
->si_min_addr
;
2897 size
= dmar_object
->dmao_size
;
2904 * check to see if we need to use the copy buffer for pages over
2907 sglinfo
->si_bounce_on_seg
= B_FALSE
;
2908 if (sglinfo
->si_flags
& _DDI_DMA_BOUNCE_ON_SEG
) {
2909 sglinfo
->si_bounce_on_seg
= rootnex_need_bounce_seg(
2910 dmar_object
, sglinfo
);
2914 * if we were passed down a linked list of pages, i.e. pointer to
2915 * page_t, use this to get our physical address and buf offset.
2917 if (buftype
== DMA_OTYP_PAGES
) {
2918 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2919 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2920 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2922 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2923 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2925 sglinfo
->si_asp
= NULL
;
2928 * We weren't passed down a linked list of pages, but if we were passed
2929 * down an array of pages, use this to get our physical address and buf
2932 } else if (pplist
!= NULL
) {
2933 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2934 (buftype
== DMA_OTYP_BUFVADDR
));
2936 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2937 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2938 if (sglinfo
->si_asp
== NULL
) {
2939 sglinfo
->si_asp
= &kas
;
2942 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2943 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2945 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2949 * All we have is a virtual address, we'll need to call into the VM
2950 * to get the physical address.
2953 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2954 (buftype
== DMA_OTYP_BUFVADDR
));
2956 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2957 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2958 if (sglinfo
->si_asp
== NULL
) {
2959 sglinfo
->si_asp
= &kas
;
2962 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2964 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2968 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2971 * Setup the first cookie with the physical address of the page and the
2972 * size of the page (which takes into account the initial offset into
2975 sgl
[cnt
].dmac_laddress
= raddr
;
2976 sgl
[cnt
].dmac_size
= psize
;
2977 sgl
[cnt
].dmac_type
= 0;
2980 * Save away the buffer offset into the page. We'll need this later in
2981 * the copy buffer code to help figure out the page index within the
2982 * buffer and the offset into the current page.
2984 sglinfo
->si_buf_offset
= offset
;
2987 * If we are using the copy buffer for anything over the segment
2988 * boundary, and this page is over the segment boundary.
2990 * if the DMA engine can't reach the physical address.
2992 if (((sglinfo
->si_bounce_on_seg
) &&
2993 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
2994 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
2996 * Increase how much copy buffer we use. We always increase by
2997 * pagesize so we don't have to worry about converting offsets.
2998 * Set a flag in the cookies dmac_type to indicate that it uses
2999 * the copy buffer. If this isn't the last cookie, go to the
3000 * next cookie (since we separate each page which uses the copy
3001 * buffer in case the copy buffer is not physically contiguous.
3003 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3004 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3005 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3007 sgl
[cnt
].dmac_laddress
= 0;
3008 sgl
[cnt
].dmac_size
= 0;
3009 sgl
[cnt
].dmac_type
= 0;
3014 * save this page's physical address so we can figure out if the next
3015 * page is physically contiguous. Keep decrementing size until we are
3016 * done with the buffer.
3018 last_page
= raddr
& MMU_PAGEMASK
;
3022 /* Get the size for this page (i.e. partial or full page) */
3023 psize
= MIN(size
, MMU_PAGESIZE
);
3025 if (buftype
== DMA_OTYP_PAGES
) {
3026 /* get the paddr from the page_t */
3027 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
3028 paddr
= pfn_to_pa(pp
->p_pagenum
);
3030 } else if (pplist
!= NULL
) {
3031 /* index into the array of page_t's to get the paddr */
3032 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
3033 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
3036 /* call into the VM to get the paddr */
3037 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
3042 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3045 * If we are using the copy buffer for anything over the
3046 * segment boundary, and this page is over the segment
3049 * if the DMA engine can't reach the physical address.
3051 if (((sglinfo
->si_bounce_on_seg
) &&
3052 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
3053 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
3055 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3058 * if there is something in the current cookie, go to
3059 * the next one. We only want one page in a cookie which
3060 * uses the copybuf since the copybuf doesn't have to
3061 * be physically contiguous.
3063 if (sgl
[cnt
].dmac_size
!= 0) {
3066 sgl
[cnt
].dmac_laddress
= raddr
;
3067 sgl
[cnt
].dmac_size
= psize
;
3068 #if defined(__amd64)
3069 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3072 * save the buf offset for 32-bit kernel. used in the
3073 * obsoleted interfaces.
3075 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
|
3076 (dmar_object
->dmao_size
- size
);
3078 /* if this isn't the last cookie, go to the next one */
3079 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3081 sgl
[cnt
].dmac_laddress
= 0;
3082 sgl
[cnt
].dmac_size
= 0;
3083 sgl
[cnt
].dmac_type
= 0;
3087 * this page didn't need the copy buffer, if it's not physically
3088 * contiguous, or it would put us over a segment boundary, or it
3089 * puts us over the max cookie size, or the current sgl doesn't
3090 * have anything in it.
3092 } else if (((last_page
+ MMU_PAGESIZE
) != raddr
) ||
3093 !(raddr
& sglinfo
->si_segmask
) ||
3094 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3095 (sgl
[cnt
].dmac_size
== 0)) {
3097 * if we're not already in a new cookie, go to the next
3100 if (sgl
[cnt
].dmac_size
!= 0) {
3104 /* save the cookie information */
3105 sgl
[cnt
].dmac_laddress
= raddr
;
3106 sgl
[cnt
].dmac_size
= psize
;
3107 #if defined(__amd64)
3108 sgl
[cnt
].dmac_type
= 0;
3111 * save the buf offset for 32-bit kernel. used in the
3112 * obsoleted interfaces.
3114 sgl
[cnt
].dmac_type
= dmar_object
->dmao_size
- size
;
3118 * this page didn't need the copy buffer, it is physically
3119 * contiguous with the last page, and it's <= the max cookie
3123 sgl
[cnt
].dmac_size
+= psize
;
3126 * if this exactly == the maximum cookie size, and
3127 * it isn't the last cookie, go to the next cookie.
3129 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3130 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3132 sgl
[cnt
].dmac_laddress
= 0;
3133 sgl
[cnt
].dmac_size
= 0;
3134 sgl
[cnt
].dmac_type
= 0;
3139 * save this page's physical address so we can figure out if the
3140 * next page is physically contiguous. Keep decrementing size
3141 * until we are done with the buffer.
3147 /* we're done, save away how many cookies the sgl has */
3148 if (sgl
[cnt
].dmac_size
== 0) {
3149 ASSERT(cnt
< sglinfo
->si_max_pages
);
3150 sglinfo
->si_sgl_size
= cnt
;
3152 sglinfo
->si_sgl_size
= cnt
+ 1;
3157 rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
3158 rootnex_sglinfo_t
*sglinfo
)
3163 struct dvmaseg
*dvs
;
3165 uint32_t psize
, ssize
;
3170 ASSERT(dmar_object
->dmao_type
== DMA_OTYP_DVADDR
);
3173 maxseg
= sglinfo
->si_max_cookie_size
;
3174 size
= dmar_object
->dmao_size
;
3177 sglinfo
->si_bounce_on_seg
= B_FALSE
;
3179 dvs
= dmar_object
->dmao_obj
.dvma_obj
.dv_seg
;
3180 offset
= dmar_object
->dmao_obj
.dvma_obj
.dv_off
;
3181 ssize
= dvs
->dvs_len
;
3182 paddr
= dvs
->dvs_start
;
3184 psize
= MIN(ssize
, (maxseg
- offset
));
3185 dvaddr
= paddr
+ psize
;
3188 sgl
[cnt
].dmac_laddress
= paddr
;
3189 sgl
[cnt
].dmac_size
= psize
;
3190 sgl
[cnt
].dmac_type
= 0;
3196 ssize
= dvs
->dvs_len
;
3197 dvaddr
= dvs
->dvs_start
;
3203 psize
= MIN(ssize
, maxseg
);
3207 if (!physcontig
|| !(paddr
& sglinfo
->si_segmask
) ||
3208 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3209 (sgl
[cnt
].dmac_size
== 0)) {
3211 * if we're not already in a new cookie, go to the next
3214 if (sgl
[cnt
].dmac_size
!= 0) {
3218 /* save the cookie information */
3219 sgl
[cnt
].dmac_laddress
= paddr
;
3220 sgl
[cnt
].dmac_size
= psize
;
3221 sgl
[cnt
].dmac_type
= 0;
3223 sgl
[cnt
].dmac_size
+= psize
;
3226 * if this exactly == the maximum cookie size, and
3227 * it isn't the last cookie, go to the next cookie.
3229 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3230 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3232 sgl
[cnt
].dmac_laddress
= 0;
3233 sgl
[cnt
].dmac_size
= 0;
3234 sgl
[cnt
].dmac_type
= 0;
3240 /* we're done, save away how many cookies the sgl has */
3241 if (sgl
[cnt
].dmac_size
== 0) {
3242 sglinfo
->si_sgl_size
= cnt
;
3244 sglinfo
->si_sgl_size
= cnt
+ 1;
3249 * rootnex_bind_slowpath()
3250 * Call in the bind path if the calling driver can't use the sgl without
3251 * modifying it. We either need to use the copy buffer and/or we will end up
3252 * with a partial bind.
3255 rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3256 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3258 rootnex_sglinfo_t
*sinfo
;
3259 rootnex_window_t
*window
;
3260 ddi_dma_cookie_t
*cookie
;
3261 size_t copybuf_used
;
3271 sinfo
= &dma
->dp_sglinfo
;
3276 * If we're using the copybuf, set the copybuf state in dma struct.
3277 * Needs to be first since it sets the copy buffer size.
3279 if (sinfo
->si_copybuf_req
!= 0) {
3280 e
= rootnex_setup_copybuf(hp
, dmareq
, dma
, attr
);
3281 if (e
!= DDI_SUCCESS
) {
3285 dma
->dp_copybuf_size
= 0;
3289 * Figure out if we need to do a partial mapping. If so, figure out
3290 * if we need to trim the buffers when we munge the sgl.
3292 if ((dma
->dp_copybuf_size
< sinfo
->si_copybuf_req
) ||
3293 (dmao
->dmao_size
> dma
->dp_maxxfer
) ||
3294 ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
)) {
3295 dma
->dp_partial_required
= B_TRUE
;
3296 if (attr
->dma_attr_granular
!= 1) {
3297 dma
->dp_trim_required
= B_TRUE
;
3300 dma
->dp_partial_required
= B_FALSE
;
3301 dma
->dp_trim_required
= B_FALSE
;
3304 /* If we need to do a partial bind, make sure the driver supports it */
3305 if (dma
->dp_partial_required
&&
3306 !(dmareq
->dmar_flags
& DDI_DMA_PARTIAL
)) {
3308 mnum
= ddi_driver_major(dma
->dp_dip
);
3310 * patchable which allows us to print one warning per major
3313 if ((rootnex_bind_warn
) &&
3314 ((rootnex_warn_list
[mnum
] & ROOTNEX_BIND_WARNING
) == 0)) {
3315 rootnex_warn_list
[mnum
] |= ROOTNEX_BIND_WARNING
;
3316 cmn_err(CE_WARN
, "!%s: coding error detected, the "
3317 "driver is using ddi_dma_attr(9S) incorrectly. "
3318 "There is a small risk of data corruption in "
3319 "particular with large I/Os. The driver should be "
3320 "replaced with a corrected version for proper "
3321 "system operation. To disable this warning, add "
3322 "'set rootnex:rootnex_bind_warn=0' to "
3323 "/etc/system(4).", ddi_driver_name(dma
->dp_dip
));
3325 return (DDI_DMA_TOOBIG
);
3329 * we might need multiple windows, setup state to handle them. In this
3330 * code path, we will have at least one window.
3332 e
= rootnex_setup_windows(hp
, dma
, attr
, dmao
, kmflag
);
3333 if (e
!= DDI_SUCCESS
) {
3334 rootnex_teardown_copybuf(dma
);
3338 window
= &dma
->dp_window
[0];
3339 cookie
= &dma
->dp_cookies
[0];
3341 rootnex_init_win(hp
, dma
, window
, cookie
, cur_offset
);
3342 if (dmao
->dmao_type
== DMA_OTYP_PAGES
) {
3343 cur_pp
= dmareq
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3346 /* loop though all the cookies we got back from get_sgl() */
3347 for (i
= 0; i
< sinfo
->si_sgl_size
; i
++) {
3349 * If we're using the copy buffer, check this cookie and setup
3350 * its associated copy buffer state. If this cookie uses the
3351 * copy buffer, make sure we sync this window during dma_sync.
3353 if (dma
->dp_copybuf_size
> 0) {
3354 rootnex_setup_cookie(dmao
, dma
, cookie
,
3355 cur_offset
, ©buf_used
, &cur_pp
);
3356 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3357 window
->wd_dosync
= B_TRUE
;
3362 * save away the cookie size, since it could be modified in
3363 * the windowing code.
3365 dmac_size
= cookie
->dmac_size
;
3367 /* if we went over max copybuf size */
3368 if (dma
->dp_copybuf_size
&&
3369 (copybuf_used
> dma
->dp_copybuf_size
)) {
3371 e
= rootnex_copybuf_window_boundary(hp
, dma
, &window
,
3372 cookie
, cur_offset
, ©buf_used
);
3373 if (e
!= DDI_SUCCESS
) {
3374 rootnex_teardown_copybuf(dma
);
3375 rootnex_teardown_windows(dma
);
3380 * if the coookie uses the copy buffer, make sure the
3381 * new window we just moved to is set to sync.
3383 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3384 window
->wd_dosync
= B_TRUE
;
3386 ROOTNEX_DPROBE1(rootnex__copybuf__window
, dev_info_t
*,
3389 /* if the cookie cnt == max sgllen, move to the next window */
3390 } else if (window
->wd_cookie_cnt
>=
3391 (unsigned)attr
->dma_attr_sgllen
) {
3393 ASSERT(window
->wd_cookie_cnt
== attr
->dma_attr_sgllen
);
3394 e
= rootnex_sgllen_window_boundary(hp
, dma
, &window
,
3395 cookie
, attr
, cur_offset
);
3396 if (e
!= DDI_SUCCESS
) {
3397 rootnex_teardown_copybuf(dma
);
3398 rootnex_teardown_windows(dma
);
3403 * if the coookie uses the copy buffer, make sure the
3404 * new window we just moved to is set to sync.
3406 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3407 window
->wd_dosync
= B_TRUE
;
3409 ROOTNEX_DPROBE1(rootnex__sgllen__window
, dev_info_t
*,
3412 /* else if we will be over maxxfer */
3413 } else if ((window
->wd_size
+ dmac_size
) >
3416 e
= rootnex_maxxfer_window_boundary(hp
, dma
, &window
,
3418 if (e
!= DDI_SUCCESS
) {
3419 rootnex_teardown_copybuf(dma
);
3420 rootnex_teardown_windows(dma
);
3425 * if the coookie uses the copy buffer, make sure the
3426 * new window we just moved to is set to sync.
3428 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3429 window
->wd_dosync
= B_TRUE
;
3431 ROOTNEX_DPROBE1(rootnex__maxxfer__window
, dev_info_t
*,
3434 /* else this cookie fits in the current window */
3436 window
->wd_cookie_cnt
++;
3437 window
->wd_size
+= dmac_size
;
3440 /* track our offset into the buffer, go to the next cookie */
3441 ASSERT(dmac_size
<= dmao
->dmao_size
);
3442 ASSERT(cookie
->dmac_size
<= dmac_size
);
3443 cur_offset
+= dmac_size
;
3447 /* if we ended up with a zero sized window in the end, clean it up */
3448 if (window
->wd_size
== 0) {
3453 ASSERT(window
->wd_trim
.tr_trim_last
== B_FALSE
);
3456 return (DDI_DMA_MAPPED
);
3459 ASSERT(dma
->dp_partial_required
);
3460 return (DDI_DMA_PARTIAL_MAP
);
3464 * rootnex_setup_copybuf()
3465 * Called in bind slowpath. Figures out if we're going to use the copy
3466 * buffer, and if we do, sets up the basic state to handle it.
3469 rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3470 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
)
3472 rootnex_sglinfo_t
*sinfo
;
3473 ddi_dma_attr_t lattr
;
3477 #if !defined(__amd64)
3481 ASSERT(!dma
->dp_dvma_used
);
3483 sinfo
= &dma
->dp_sglinfo
;
3485 /* read this first so it's consistent through the routine */
3486 max_copybuf
= i_ddi_copybuf_size() & MMU_PAGEMASK
;
3488 /* We need to call into the rootnex on ddi_dma_sync() */
3489 hp
->dmai_rflags
&= ~DMP_NOSYNC
;
3491 /* make sure the copybuf size <= the max size */
3492 dma
->dp_copybuf_size
= MIN(sinfo
->si_copybuf_req
, max_copybuf
);
3493 ASSERT((dma
->dp_copybuf_size
& MMU_PAGEOFFSET
) == 0);
3495 #if !defined(__amd64)
3497 * if we don't have kva space to copy to/from, allocate the KVA space
3498 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3499 * the 64-bit kernel.
3501 if ((dmareq
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) ||
3502 (dmareq
->dmar_object
.dmao_obj
.virt_obj
.v_as
!= NULL
)) {
3504 /* convert the sleep flags */
3505 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3508 vmflag
= VM_NOSLEEP
;
3511 /* allocate Kernel VA space that we can bcopy to/from */
3512 dma
->dp_kva
= vmem_alloc(heap_arena
, dma
->dp_copybuf_size
,
3514 if (dma
->dp_kva
== NULL
) {
3515 return (DDI_DMA_NORESOURCES
);
3520 /* convert the sleep flags */
3521 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3528 * Allocate the actual copy buffer. This needs to fit within the DMA
3529 * engine limits, so we can't use kmem_alloc... We don't need
3530 * contiguous memory (sgllen) since we will be forcing windows on
3534 lattr
.dma_attr_align
= MMU_PAGESIZE
;
3535 lattr
.dma_attr_sgllen
= -1; /* no limit */
3537 * if we're using the copy buffer because of seg, use that for our
3538 * upper address limit.
3540 if (sinfo
->si_bounce_on_seg
) {
3541 lattr
.dma_attr_addr_hi
= lattr
.dma_attr_seg
;
3543 e
= i_ddi_mem_alloc(dma
->dp_dip
, &lattr
, dma
->dp_copybuf_size
, cansleep
,
3544 0, NULL
, &dma
->dp_cbaddr
, &dma
->dp_cbsize
, NULL
);
3545 if (e
!= DDI_SUCCESS
) {
3546 #if !defined(__amd64)
3547 if (dma
->dp_kva
!= NULL
) {
3548 vmem_free(heap_arena
, dma
->dp_kva
,
3549 dma
->dp_copybuf_size
);
3552 return (DDI_DMA_NORESOURCES
);
3555 ROOTNEX_DPROBE2(rootnex__alloc__copybuf
, dev_info_t
*, dma
->dp_dip
,
3556 size_t, dma
->dp_copybuf_size
);
3558 return (DDI_SUCCESS
);
3563 * rootnex_setup_windows()
3564 * Called in bind slowpath to setup the window state. We always have windows
3565 * in the slowpath. Even if the window count = 1.
3568 rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3569 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3571 rootnex_window_t
*windowp
;
3572 rootnex_sglinfo_t
*sinfo
;
3573 size_t copy_state_size
;
3574 size_t win_state_size
;
3575 size_t state_available
;
3576 size_t space_needed
;
3583 sinfo
= &dma
->dp_sglinfo
;
3585 dma
->dp_current_win
= 0;
3588 /* If we don't need to do a partial, we only have one window */
3589 if (!dma
->dp_partial_required
) {
3590 dma
->dp_max_win
= 1;
3593 * we need multiple windows, need to figure out the worse case number
3598 * if we need windows because we need more copy buffer that
3599 * we allow, the worse case number of windows we could need
3600 * here would be (copybuf space required / copybuf space that
3601 * we have) plus one for remainder, and plus 2 to handle the
3602 * extra pages on the trim for the first and last pages of the
3603 * buffer (a page is the minimum window size so under the right
3604 * attr settings, you could have a window for each page).
3605 * The last page will only be hit here if the size is not a
3606 * multiple of the granularity (which theoretically shouldn't
3607 * be the case but never has been enforced, so we could have
3608 * broken things without it).
3610 if (sinfo
->si_copybuf_req
> dma
->dp_copybuf_size
) {
3611 ASSERT(dma
->dp_copybuf_size
> 0);
3612 copybuf_win
= (sinfo
->si_copybuf_req
/
3613 dma
->dp_copybuf_size
) + 1 + 2;
3619 * if we need windows because we have more cookies than the H/W
3620 * can handle, the number of windows we would need here would
3621 * be (cookie count / cookies count H/W supports minus 1[for
3622 * trim]) plus one for remainder.
3624 if ((unsigned)attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
) {
3625 sglwin
= (sinfo
->si_sgl_size
/
3626 (attr
->dma_attr_sgllen
- 1)) + 1;
3632 * if we need windows because we're binding more memory than the
3633 * H/W can transfer at once, the number of windows we would need
3634 * here would be (xfer count / max xfer H/W supports) plus one
3635 * for remainder, and plus 2 to handle the extra pages on the
3636 * trim (see above comment about trim)
3638 if (dmao
->dmao_size
> dma
->dp_maxxfer
) {
3639 maxxfer_win
= (dmao
->dmao_size
/
3640 dma
->dp_maxxfer
) + 1 + 2;
3644 dma
->dp_max_win
= copybuf_win
+ sglwin
+ maxxfer_win
;
3645 ASSERT(dma
->dp_max_win
> 0);
3647 win_state_size
= dma
->dp_max_win
* sizeof (rootnex_window_t
);
3650 * Get space for window and potential copy buffer state. Before we
3651 * go and allocate memory, see if we can get away with using what's
3652 * left in the pre-allocted state or the dynamically allocated sgl.
3654 space_used
= (uintptr_t)(sinfo
->si_sgl_size
*
3655 sizeof (ddi_dma_cookie_t
));
3657 /* if we dynamically allocated space for the cookies */
3658 if (dma
->dp_need_to_free_cookie
) {
3659 /* if we have more space in the pre-allocted buffer, use it */
3660 ASSERT(space_used
<= dma
->dp_cookie_size
);
3661 if ((dma
->dp_cookie_size
- space_used
) <=
3662 rootnex_state
->r_prealloc_size
) {
3663 state_available
= rootnex_state
->r_prealloc_size
;
3664 windowp
= (rootnex_window_t
*)dma
->dp_prealloc_buffer
;
3667 * else, we have more free space in the dynamically allocated
3668 * buffer, i.e. the buffer wasn't worse case fragmented so we
3669 * didn't need a lot of cookies.
3672 state_available
= dma
->dp_cookie_size
- space_used
;
3673 windowp
= (rootnex_window_t
*)
3674 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3677 /* we used the pre-alloced buffer */
3679 ASSERT(space_used
<= rootnex_state
->r_prealloc_size
);
3680 state_available
= rootnex_state
->r_prealloc_size
- space_used
;
3681 windowp
= (rootnex_window_t
*)
3682 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3686 * figure out how much state we need to track the copy buffer. Add an
3687 * addition 8 bytes for pointer alignemnt later.
3689 if (dma
->dp_copybuf_size
> 0) {
3690 copy_state_size
= sinfo
->si_max_pages
*
3691 sizeof (rootnex_pgmap_t
);
3693 copy_state_size
= 0;
3695 /* add an additional 8 bytes for pointer alignment */
3696 space_needed
= win_state_size
+ copy_state_size
+ 0x8;
3698 /* if we have enough space already, use it */
3699 if (state_available
>= space_needed
) {
3700 dma
->dp_window
= windowp
;
3701 dma
->dp_need_to_free_window
= B_FALSE
;
3703 /* not enough space, need to allocate more. */
3705 dma
->dp_window
= kmem_alloc(space_needed
, kmflag
);
3706 if (dma
->dp_window
== NULL
) {
3707 return (DDI_DMA_NORESOURCES
);
3709 dma
->dp_need_to_free_window
= B_TRUE
;
3710 dma
->dp_window_size
= space_needed
;
3711 ROOTNEX_DPROBE2(rootnex__bind__sp__alloc
, dev_info_t
*,
3712 dma
->dp_dip
, size_t, space_needed
);
3716 * we allocate copy buffer state and window state at the same time.
3717 * setup our copy buffer state pointers. Make sure it's aligned.
3719 if (dma
->dp_copybuf_size
> 0) {
3720 dma
->dp_pgmap
= (rootnex_pgmap_t
*)(((uintptr_t)
3721 &dma
->dp_window
[dma
->dp_max_win
] + 0x7) & ~0x7);
3723 #if !defined(__amd64)
3725 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3726 * false/NULL. Should be quicker to bzero vs loop and set.
3728 bzero(dma
->dp_pgmap
, copy_state_size
);
3731 dma
->dp_pgmap
= NULL
;
3734 return (DDI_SUCCESS
);
3739 * rootnex_teardown_copybuf()
3740 * cleans up after rootnex_setup_copybuf()
3743 rootnex_teardown_copybuf(rootnex_dma_t
*dma
)
3745 #if !defined(__amd64)
3749 * if we allocated kernel heap VMEM space, go through all the pages and
3750 * map out any of the ones that we're mapped into the kernel heap VMEM
3751 * arena. Then free the VMEM space.
3753 if (dma
->dp_kva
!= NULL
) {
3754 for (i
= 0; i
< dma
->dp_sglinfo
.si_max_pages
; i
++) {
3755 if (dma
->dp_pgmap
[i
].pm_mapped
) {
3756 hat_unload(kas
.a_hat
, dma
->dp_pgmap
[i
].pm_kaddr
,
3757 MMU_PAGESIZE
, HAT_UNLOAD
);
3758 dma
->dp_pgmap
[i
].pm_mapped
= B_FALSE
;
3762 vmem_free(heap_arena
, dma
->dp_kva
, dma
->dp_copybuf_size
);
3767 /* if we allocated a copy buffer, free it */
3768 if (dma
->dp_cbaddr
!= NULL
) {
3769 i_ddi_mem_free(dma
->dp_cbaddr
, NULL
);
3775 * rootnex_teardown_windows()
3776 * cleans up after rootnex_setup_windows()
3779 rootnex_teardown_windows(rootnex_dma_t
*dma
)
3782 * if we had to allocate window state on the last bind (because we
3783 * didn't have enough pre-allocated space in the handle), free it.
3785 if (dma
->dp_need_to_free_window
) {
3786 kmem_free(dma
->dp_window
, dma
->dp_window_size
);
3792 * rootnex_init_win()
3793 * Called in bind slow path during creation of a new window. Initializes
3794 * window state to default values.
3798 rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3799 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
)
3802 window
->wd_dosync
= B_FALSE
;
3803 window
->wd_offset
= cur_offset
;
3804 window
->wd_size
= 0;
3805 window
->wd_first_cookie
= cookie
;
3806 window
->wd_cookie_cnt
= 0;
3807 window
->wd_trim
.tr_trim_first
= B_FALSE
;
3808 window
->wd_trim
.tr_trim_last
= B_FALSE
;
3809 window
->wd_trim
.tr_first_copybuf_win
= B_FALSE
;
3810 window
->wd_trim
.tr_last_copybuf_win
= B_FALSE
;
3811 #if !defined(__amd64)
3812 window
->wd_remap_copybuf
= dma
->dp_cb_remaping
;
3818 * rootnex_setup_cookie()
3819 * Called in the bind slow path when the sgl uses the copy buffer. If any of
3820 * the sgl uses the copy buffer, we need to go through each cookie, figure
3821 * out if it uses the copy buffer, and if it does, save away everything we'll
3825 rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
, rootnex_dma_t
*dma
,
3826 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
,
3829 boolean_t copybuf_sz_power_2
;
3830 rootnex_sglinfo_t
*sinfo
;
3835 #if defined(__amd64)
3841 ASSERT(dmar_object
->dmao_type
!= DMA_OTYP_DVADDR
);
3843 sinfo
= &dma
->dp_sglinfo
;
3846 * Calculate the page index relative to the start of the buffer. The
3847 * index to the current page for our buffer is the offset into the
3848 * first page of the buffer plus our current offset into the buffer
3849 * itself, shifted of course...
3851 pidx
= (sinfo
->si_buf_offset
+ cur_offset
) >> MMU_PAGESHIFT
;
3852 ASSERT(pidx
< sinfo
->si_max_pages
);
3854 /* if this cookie uses the copy buffer */
3855 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3857 * NOTE: we know that since this cookie uses the copy buffer, it
3858 * is <= MMU_PAGESIZE.
3862 * get the offset into the page. For the 64-bit kernel, get the
3863 * pfn which we'll use with seg kpm.
3865 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
3866 #if defined(__amd64)
3867 /* mfn_to_pfn() is a NOP on i86pc */
3868 pfn
= mfn_to_pfn(cookie
->dmac_laddress
>> MMU_PAGESHIFT
);
3869 #endif /* __amd64 */
3871 /* figure out if the copybuf size is a power of 2 */
3872 if (dma
->dp_copybuf_size
& (dma
->dp_copybuf_size
- 1)) {
3873 copybuf_sz_power_2
= B_FALSE
;
3875 copybuf_sz_power_2
= B_TRUE
;
3878 /* This page uses the copy buffer */
3879 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_TRUE
;
3882 * save the copy buffer KVA that we'll use with this page.
3883 * if we still fit within the copybuf, it's a simple add.
3884 * otherwise, we need to wrap over using & or % accordingly.
3886 if ((*copybuf_used
+ MMU_PAGESIZE
) <= dma
->dp_copybuf_size
) {
3887 dma
->dp_pgmap
[pidx
].pm_cbaddr
= dma
->dp_cbaddr
+
3890 if (copybuf_sz_power_2
) {
3891 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3892 (uintptr_t)dma
->dp_cbaddr
+
3894 (dma
->dp_copybuf_size
- 1)));
3896 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3897 (uintptr_t)dma
->dp_cbaddr
+
3898 (*copybuf_used
% dma
->dp_copybuf_size
));
3903 * over write the cookie physical address with the address of
3904 * the physical address of the copy buffer page that we will
3907 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
3908 dma
->dp_pgmap
[pidx
].pm_cbaddr
)) + poff
;
3910 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3912 /* if we have a kernel VA, it's easy, just save that address */
3913 if ((dmar_object
->dmao_type
!= DMA_OTYP_PAGES
) &&
3914 (sinfo
->si_asp
== &kas
)) {
3916 * save away the page aligned virtual address of the
3917 * driver buffer. Offsets are handled in the sync code.
3919 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)(((uintptr_t)
3920 dmar_object
->dmao_obj
.virt_obj
.v_addr
+ cur_offset
)
3922 #if !defined(__amd64)
3924 * we didn't need to, and will never need to map this
3927 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
3930 /* we don't have a kernel VA. We need one for the bcopy. */
3932 #if defined(__amd64)
3934 * for the 64-bit kernel, it's easy. We use seg kpm to
3935 * get a Kernel VA for the corresponding pfn.
3937 dma
->dp_pgmap
[pidx
].pm_kaddr
= hat_kpm_pfn2va(pfn
);
3940 * for the 32-bit kernel, this is a pain. First we'll
3941 * save away the page_t or user VA for this page. This
3942 * is needed in rootnex_dma_win() when we switch to a
3943 * new window which requires us to re-map the copy
3946 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
3947 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
3948 dma
->dp_pgmap
[pidx
].pm_pp
= *cur_pp
;
3949 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3950 } else if (pplist
!= NULL
) {
3951 dma
->dp_pgmap
[pidx
].pm_pp
= pplist
[pidx
];
3952 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3954 dma
->dp_pgmap
[pidx
].pm_pp
= NULL
;
3955 dma
->dp_pgmap
[pidx
].pm_vaddr
= (caddr_t
)
3957 dmar_object
->dmao_obj
.virt_obj
.v_addr
+
3958 cur_offset
) & MMU_PAGEMASK
);
3962 * save away the page aligned virtual address which was
3963 * allocated from the kernel heap arena (taking into
3964 * account if we need more copy buffer than we alloced
3965 * and use multiple windows to handle this, i.e. &,%).
3966 * NOTE: there isn't and physical memory backing up this
3967 * virtual address space currently.
3969 if ((*copybuf_used
+ MMU_PAGESIZE
) <=
3970 dma
->dp_copybuf_size
) {
3971 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3972 (((uintptr_t)dma
->dp_kva
+ *copybuf_used
) &
3975 if (copybuf_sz_power_2
) {
3976 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3977 (((uintptr_t)dma
->dp_kva
+
3979 (dma
->dp_copybuf_size
- 1))) &
3982 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3983 (((uintptr_t)dma
->dp_kva
+
3985 dma
->dp_copybuf_size
)) &
3991 * if we haven't used up the available copy buffer yet,
3992 * map the kva to the physical page.
3994 if (!dma
->dp_cb_remaping
&& ((*copybuf_used
+
3995 MMU_PAGESIZE
) <= dma
->dp_copybuf_size
)) {
3996 dma
->dp_pgmap
[pidx
].pm_mapped
= B_TRUE
;
3997 if (dma
->dp_pgmap
[pidx
].pm_pp
!= NULL
) {
3998 i86_pp_map(dma
->dp_pgmap
[pidx
].pm_pp
,
3999 dma
->dp_pgmap
[pidx
].pm_kaddr
);
4001 i86_va_map(dma
->dp_pgmap
[pidx
].pm_vaddr
,
4003 dma
->dp_pgmap
[pidx
].pm_kaddr
);
4007 * we've used up the available copy buffer, this page
4008 * will have to be mapped during rootnex_dma_win() when
4009 * we switch to a new window which requires a re-map
4010 * the copy buffer. (32-bit kernel only)
4013 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4016 /* go to the next page_t */
4017 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4018 *cur_pp
= (*cur_pp
)->p_next
;
4022 /* add to the copy buffer count */
4023 *copybuf_used
+= MMU_PAGESIZE
;
4026 * This cookie doesn't use the copy buffer. Walk through the pages this
4027 * cookie occupies to reflect this.
4031 * figure out how many pages the cookie occupies. We need to
4032 * use the original page offset of the buffer and the cookies
4033 * offset in the buffer to do this.
4035 poff
= (sinfo
->si_buf_offset
+ cur_offset
) & MMU_PAGEOFFSET
;
4036 pcnt
= mmu_btopr(cookie
->dmac_size
+ poff
);
4039 #if !defined(__amd64)
4041 * the 32-bit kernel doesn't have seg kpm, so we need
4042 * to map in the driver buffer (if it didn't come down
4043 * with a kernel VA) on the fly. Since this page doesn't
4044 * use the copy buffer, it's not, or will it ever, have
4047 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4049 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_FALSE
;
4052 * we need to update pidx and cur_pp or we'll loose
4053 * track of where we are.
4055 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4056 *cur_pp
= (*cur_pp
)->p_next
;
4066 * rootnex_sgllen_window_boundary()
4067 * Called in the bind slow path when the next cookie causes us to exceed (in
4068 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4069 * length supported by the DMA H/W.
4072 rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4073 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, ddi_dma_attr_t
*attr
,
4082 * if we know we'll never have to trim, it's pretty easy. Just move to
4083 * the next window and init it. We're done.
4085 if (!dma
->dp_trim_required
) {
4087 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4088 (*windowp
)->wd_cookie_cnt
++;
4089 (*windowp
)->wd_size
= cookie
->dmac_size
;
4090 return (DDI_SUCCESS
);
4093 /* figure out how much we need to trim from the window */
4094 ASSERT(attr
->dma_attr_granular
!= 0);
4095 if (dma
->dp_granularity_power_2
) {
4096 trim_sz
= (*windowp
)->wd_size
& (attr
->dma_attr_granular
- 1);
4098 trim_sz
= (*windowp
)->wd_size
% attr
->dma_attr_granular
;
4101 /* The window's a whole multiple of granularity. We're done */
4104 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4105 (*windowp
)->wd_cookie_cnt
++;
4106 (*windowp
)->wd_size
= cookie
->dmac_size
;
4107 return (DDI_SUCCESS
);
4111 * The window's not a whole multiple of granularity, since we know this
4112 * is due to the sgllen, we need to go back to the last cookie and trim
4113 * that one, add the left over part of the old cookie into the new
4114 * window, and then add in the new cookie into the new window.
4118 * make sure the driver isn't making us do something bad... Trimming and
4119 * sgllen == 1 don't go together.
4121 if (attr
->dma_attr_sgllen
== 1) {
4122 return (DDI_DMA_NOMAPPING
);
4126 * first, setup the current window to account for the trim. Need to go
4127 * back to the last cookie for this.
4130 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4131 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4132 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4133 ASSERT(cookie
->dmac_size
> trim_sz
);
4134 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4135 (*windowp
)->wd_size
-= trim_sz
;
4137 /* save the buffer offsets for the next window */
4138 coffset
= cookie
->dmac_size
- trim_sz
;
4139 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4142 * set this now in case this is the first window. all other cases are
4145 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4148 * initialize the next window using what's left over in the previous
4152 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4153 (*windowp
)->wd_cookie_cnt
++;
4154 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4155 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4156 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4157 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4158 (*windowp
)->wd_dosync
= B_TRUE
;
4162 * now go back to the current cookie and add it to the new window. set
4163 * the new window size to the what was left over from the previous
4164 * cookie and what's in the current cookie.
4167 (*windowp
)->wd_cookie_cnt
++;
4168 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4171 * trim plus the next cookie could put us over maxxfer (a cookie can be
4172 * a max size of maxxfer). Handle that case.
4174 if ((*windowp
)->wd_size
> dma
->dp_maxxfer
) {
4176 * maxxfer is already a whole multiple of granularity, and this
4177 * trim will be <= the previous trim (since a cookie can't be
4178 * larger than maxxfer). Make things simple here.
4180 trim_sz
= (*windowp
)->wd_size
- dma
->dp_maxxfer
;
4181 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4182 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4183 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4184 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4185 (*windowp
)->wd_size
-= trim_sz
;
4186 ASSERT((*windowp
)->wd_size
== dma
->dp_maxxfer
);
4188 /* save the buffer offsets for the next window */
4189 coffset
= cookie
->dmac_size
- trim_sz
;
4190 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4192 /* setup the next window */
4194 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4195 (*windowp
)->wd_cookie_cnt
++;
4196 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4197 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4199 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4202 return (DDI_SUCCESS
);
4207 * rootnex_copybuf_window_boundary()
4208 * Called in bind slowpath when we get to a window boundary because we used
4209 * up all the copy buffer that we have.
4212 rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4213 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
4214 size_t *copybuf_used
)
4216 rootnex_sglinfo_t
*sinfo
;
4225 sinfo
= &dma
->dp_sglinfo
;
4228 * the copy buffer should be a whole multiple of page size. We know that
4229 * this cookie is <= MMU_PAGESIZE.
4231 ASSERT(cookie
->dmac_size
<= MMU_PAGESIZE
);
4234 * from now on, all new windows in this bind need to be re-mapped during
4235 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4238 #if !defined(__amd64)
4239 dma
->dp_cb_remaping
= B_TRUE
;
4242 /* reset copybuf used */
4246 * if we don't have to trim (since granularity is set to 1), go to the
4247 * next window and add the current cookie to it. We know the current
4248 * cookie uses the copy buffer since we're in this code path.
4250 if (!dma
->dp_trim_required
) {
4252 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4254 /* Add this cookie to the new window */
4255 (*windowp
)->wd_cookie_cnt
++;
4256 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4257 *copybuf_used
+= MMU_PAGESIZE
;
4258 return (DDI_SUCCESS
);
4262 * *** may need to trim, figure it out.
4265 /* figure out how much we need to trim from the window */
4266 if (dma
->dp_granularity_power_2
) {
4267 trim_sz
= (*windowp
)->wd_size
&
4268 (hp
->dmai_attr
.dma_attr_granular
- 1);
4270 trim_sz
= (*windowp
)->wd_size
% hp
->dmai_attr
.dma_attr_granular
;
4274 * if the window's a whole multiple of granularity, go to the next
4275 * window, init it, then add in the current cookie. We know the current
4276 * cookie uses the copy buffer since we're in this code path.
4280 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4282 /* Add this cookie to the new window */
4283 (*windowp
)->wd_cookie_cnt
++;
4284 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4285 *copybuf_used
+= MMU_PAGESIZE
;
4286 return (DDI_SUCCESS
);
4290 * *** We figured it out, we definitly need to trim
4294 * make sure the driver isn't making us do something bad...
4295 * Trimming and sgllen == 1 don't go together.
4297 if (hp
->dmai_attr
.dma_attr_sgllen
== 1) {
4298 return (DDI_DMA_NOMAPPING
);
4302 * first, setup the current window to account for the trim. Need to go
4303 * back to the last cookie for this. Some of the last cookie will be in
4304 * the current window, and some of the last cookie will be in the new
4305 * window. All of the current cookie will be in the new window.
4308 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4309 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4310 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4311 ASSERT(cookie
->dmac_size
> trim_sz
);
4312 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4313 (*windowp
)->wd_size
-= trim_sz
;
4316 * we're trimming the last cookie (not the current cookie). So that
4317 * last cookie may have or may not have been using the copy buffer (
4318 * we know the cookie passed in uses the copy buffer since we're in
4321 * If the last cookie doesn't use the copy buffer, nothing special to
4322 * do. However, if it does uses the copy buffer, it will be both the
4323 * last page in the current window and the first page in the next
4324 * window. Since we are reusing the copy buffer (and KVA space on the
4325 * 32-bit kernel), this page will use the end of the copy buffer in the
4326 * current window, and the start of the copy buffer in the next window.
4327 * Track that info... The cookie physical address was already set to
4328 * the copy buffer physical address in setup_cookie..
4330 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4331 pidx
= (sinfo
->si_buf_offset
+ (*windowp
)->wd_offset
+
4332 (*windowp
)->wd_size
) >> MMU_PAGESHIFT
;
4333 (*windowp
)->wd_trim
.tr_last_copybuf_win
= B_TRUE
;
4334 (*windowp
)->wd_trim
.tr_last_pidx
= pidx
;
4335 (*windowp
)->wd_trim
.tr_last_cbaddr
=
4336 dma
->dp_pgmap
[pidx
].pm_cbaddr
;
4337 #if !defined(__amd64)
4338 (*windowp
)->wd_trim
.tr_last_kaddr
=
4339 dma
->dp_pgmap
[pidx
].pm_kaddr
;
4343 /* save the buffer offsets for the next window */
4344 coffset
= cookie
->dmac_size
- trim_sz
;
4345 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4348 * set this now in case this is the first window. all other cases are
4351 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4354 * initialize the next window using what's left over in the previous
4358 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4359 (*windowp
)->wd_cookie_cnt
++;
4360 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4361 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4362 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4365 * again, we're tracking if the last cookie uses the copy buffer.
4366 * read the comment above for more info on why we need to track
4369 * For the first cookie in the new window, we need reset the physical
4370 * address to DMA into to the start of the copy buffer plus any
4371 * initial page offset which may be present.
4373 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4374 (*windowp
)->wd_dosync
= B_TRUE
;
4375 (*windowp
)->wd_trim
.tr_first_copybuf_win
= B_TRUE
;
4376 (*windowp
)->wd_trim
.tr_first_pidx
= pidx
;
4377 (*windowp
)->wd_trim
.tr_first_cbaddr
= dma
->dp_cbaddr
;
4378 poff
= (*windowp
)->wd_trim
.tr_first_paddr
& MMU_PAGEOFFSET
;
4380 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
, dma
->dp_cbaddr
)) +
4382 (*windowp
)->wd_trim
.tr_first_paddr
=
4383 ROOTNEX_PADDR_TO_RBASE(paddr
);
4385 #if !defined(__amd64)
4386 (*windowp
)->wd_trim
.tr_first_kaddr
= dma
->dp_kva
;
4388 /* account for the cookie copybuf usage in the new window */
4389 *copybuf_used
+= MMU_PAGESIZE
;
4392 * every piece of code has to have a hack, and here is this
4395 * There is a complex interaction between setup_cookie and the
4396 * copybuf window boundary. The complexity had to be in either
4397 * the maxxfer window, or the copybuf window, and I chose the
4400 * So in this code path, we have taken the last cookie,
4401 * virtually broken it in half due to the trim, and it happens
4402 * to use the copybuf which further complicates life. At the
4403 * same time, we have already setup the current cookie, which
4404 * is now wrong. More background info: the current cookie uses
4405 * the copybuf, so it is only a page long max. So we need to
4406 * fix the current cookies copy buffer address, physical
4407 * address, and kva for the 32-bit kernel. We due this by
4408 * bumping them by page size (of course, we can't due this on
4409 * the physical address since the copy buffer may not be
4410 * physically contiguous).
4413 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
+= MMU_PAGESIZE
;
4414 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
4416 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
4417 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
)) + poff
;
4418 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
4420 #if !defined(__amd64)
4421 ASSERT(dma
->dp_pgmap
[pidx
+ 1].pm_mapped
== B_FALSE
);
4422 dma
->dp_pgmap
[pidx
+ 1].pm_kaddr
+= MMU_PAGESIZE
;
4425 /* go back to the current cookie */
4430 * add the current cookie to the new window. set the new window size to
4431 * the what was left over from the previous cookie and what's in the
4434 (*windowp
)->wd_cookie_cnt
++;
4435 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4436 ASSERT((*windowp
)->wd_size
< dma
->dp_maxxfer
);
4439 * we know that the cookie passed in always uses the copy buffer. We
4440 * wouldn't be here if it didn't.
4442 *copybuf_used
+= MMU_PAGESIZE
;
4444 return (DDI_SUCCESS
);
4449 * rootnex_maxxfer_window_boundary()
4450 * Called in bind slowpath when we get to a window boundary because we will
4454 rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4455 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
)
4464 * calculate how much we have to trim off of the current cookie to equal
4465 * maxxfer. We don't have to account for granularity here since our
4466 * maxxfer already takes that into account.
4468 trim_sz
= ((*windowp
)->wd_size
+ cookie
->dmac_size
) - dma
->dp_maxxfer
;
4469 ASSERT(trim_sz
<= cookie
->dmac_size
);
4470 ASSERT(trim_sz
<= dma
->dp_maxxfer
);
4472 /* save cookie size since we need it later and we might change it */
4473 dmac_size
= cookie
->dmac_size
;
4476 * if we're not trimming the entire cookie, setup the current window to
4477 * account for the trim.
4479 if (trim_sz
< cookie
->dmac_size
) {
4480 (*windowp
)->wd_cookie_cnt
++;
4481 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4482 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4483 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4484 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4485 (*windowp
)->wd_size
= dma
->dp_maxxfer
;
4488 * set the adjusted cookie size now in case this is the first
4489 * window. All other windows are taken care of in get win
4491 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4495 * coffset is the current offset within the cookie, new_offset is the
4496 * current offset with the entire buffer.
4498 coffset
= dmac_size
- trim_sz
;
4499 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4501 /* initialize the next window */
4503 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4504 (*windowp
)->wd_cookie_cnt
++;
4505 (*windowp
)->wd_size
= trim_sz
;
4506 if (trim_sz
< dmac_size
) {
4507 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4508 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4510 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4513 return (DDI_SUCCESS
);
4519 rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4520 off_t off
, size_t len
, uint_t cache_flags
)
4522 rootnex_sglinfo_t
*sinfo
;
4523 rootnex_pgmap_t
*cbpage
;
4524 rootnex_window_t
*win
;
4537 hp
= (ddi_dma_impl_t
*)handle
;
4538 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4539 sinfo
= &dma
->dp_sglinfo
;
4542 * if we don't have any windows, we don't need to sync. A copybuf
4543 * will cause us to have at least one window.
4545 if (dma
->dp_window
== NULL
) {
4546 return (DDI_SUCCESS
);
4549 /* This window may not need to be sync'd */
4550 win
= &dma
->dp_window
[dma
->dp_current_win
];
4551 if (!win
->wd_dosync
) {
4552 return (DDI_SUCCESS
);
4555 /* handle off and len special cases */
4556 if ((off
== 0) || (rootnex_sync_ignore_params
)) {
4557 offset
= win
->wd_offset
;
4561 if ((len
== 0) || (rootnex_sync_ignore_params
)) {
4562 size
= win
->wd_size
;
4567 /* check the sync args to make sure they make a little sense */
4568 if (rootnex_sync_check_parms
) {
4569 e
= rootnex_valid_sync_parms(hp
, win
, offset
, size
,
4571 if (e
!= DDI_SUCCESS
) {
4572 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_SYNC_FAIL
]);
4573 return (DDI_FAILURE
);
4578 * special case the first page to handle the offset into the page. The
4579 * offset to the current page for our buffer is the offset into the
4580 * first page of the buffer plus our current offset into the buffer
4581 * itself, masked of course.
4583 poff
= (sinfo
->si_buf_offset
+ offset
) & MMU_PAGEOFFSET
;
4584 psize
= MIN((MMU_PAGESIZE
- poff
), size
);
4586 /* go through all the pages that we want to sync */
4589 * Calculate the page index relative to the start of the buffer.
4590 * The index to the current page for our buffer is the offset
4591 * into the first page of the buffer plus our current offset
4592 * into the buffer itself, shifted of course...
4594 pidx
= (sinfo
->si_buf_offset
+ offset
) >> MMU_PAGESHIFT
;
4595 ASSERT(pidx
< sinfo
->si_max_pages
);
4598 * if this page uses the copy buffer, we need to sync it,
4599 * otherwise, go on to the next page.
4601 cbpage
= &dma
->dp_pgmap
[pidx
];
4602 ASSERT((cbpage
->pm_uses_copybuf
== B_TRUE
) ||
4603 (cbpage
->pm_uses_copybuf
== B_FALSE
));
4604 if (cbpage
->pm_uses_copybuf
) {
4605 /* cbaddr and kaddr should be page aligned */
4606 ASSERT(((uintptr_t)cbpage
->pm_cbaddr
&
4607 MMU_PAGEOFFSET
) == 0);
4608 ASSERT(((uintptr_t)cbpage
->pm_kaddr
&
4609 MMU_PAGEOFFSET
) == 0);
4612 * if we're copying for the device, we are going to
4613 * copy from the drivers buffer and to the rootnex
4614 * allocated copy buffer.
4616 if (cache_flags
== DDI_DMA_SYNC_FORDEV
) {
4617 fromaddr
= cbpage
->pm_kaddr
+ poff
;
4618 toaddr
= cbpage
->pm_cbaddr
+ poff
;
4619 ROOTNEX_DPROBE2(rootnex__sync__dev
,
4620 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4623 * if we're copying for the cpu/kernel, we are going to
4624 * copy from the rootnex allocated copy buffer to the
4628 fromaddr
= cbpage
->pm_cbaddr
+ poff
;
4629 toaddr
= cbpage
->pm_kaddr
+ poff
;
4630 ROOTNEX_DPROBE2(rootnex__sync__cpu
,
4631 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4634 bcopy(fromaddr
, toaddr
, psize
);
4638 * decrement size until we're done, update our offset into the
4639 * buffer, and get the next page size.
4643 psize
= MIN(MMU_PAGESIZE
, size
);
4645 /* page offset is zero for the rest of this loop */
4649 return (DDI_SUCCESS
);
4653 * rootnex_dma_sync()
4654 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4655 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4656 * is set, ddi_dma_sync() returns immediately passing back success.
4660 rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4661 off_t off
, size_t len
, uint_t cache_flags
)
4663 #if defined(__amd64) && !defined(__xpv)
4664 if (IOMMU_USED(rdip
)) {
4665 return (iommulib_nexdma_sync(dip
, rdip
, handle
, off
, len
,
4669 return (rootnex_coredma_sync(dip
, rdip
, handle
, off
, len
,
4674 * rootnex_valid_sync_parms()
4675 * checks the parameters passed to sync to verify they are correct.
4678 rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
4679 off_t offset
, size_t size
, uint_t cache_flags
)
4685 * the first part of the test to make sure the offset passed in is
4686 * within the window.
4688 if (offset
< win
->wd_offset
) {
4689 return (DDI_FAILURE
);
4693 * second and last part of the test to make sure the offset and length
4694 * passed in is within the window.
4696 woffset
= offset
- win
->wd_offset
;
4697 if ((woffset
+ size
) > win
->wd_size
) {
4698 return (DDI_FAILURE
);
4702 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4705 if ((cache_flags
== DDI_DMA_SYNC_FORDEV
) &&
4706 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4707 return (DDI_SUCCESS
);
4711 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4712 * should be set. Also DDI_DMA_READ should be set in the flags.
4714 if (((cache_flags
== DDI_DMA_SYNC_FORCPU
) ||
4715 (cache_flags
== DDI_DMA_SYNC_FORKERNEL
)) &&
4716 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4717 return (DDI_SUCCESS
);
4720 return (DDI_FAILURE
);
4726 rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4727 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4730 rootnex_window_t
*window
;
4731 rootnex_trim_t
*trim
;
4734 ddi_dma_obj_t
*dmao
;
4735 #if !defined(__amd64)
4736 rootnex_sglinfo_t
*sinfo
;
4737 rootnex_pgmap_t
*pmap
;
4745 hp
= (ddi_dma_impl_t
*)handle
;
4746 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4747 #if !defined(__amd64)
4748 sinfo
= &dma
->dp_sglinfo
;
4751 /* If we try and get a window which doesn't exist, return failure */
4752 if (win
>= hp
->dmai_nwin
) {
4753 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4754 return (DDI_FAILURE
);
4757 dmao
= dma
->dp_dvma_used
? &dma
->dp_dvma
: &dma
->dp_dma
;
4760 * if we don't have any windows, and they're asking for the first
4761 * window, setup the cookie pointer to the first cookie in the bind.
4762 * setup our return values, then increment the cookie since we return
4763 * the first cookie on the stack.
4765 if (dma
->dp_window
== NULL
) {
4768 &rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4769 return (DDI_FAILURE
);
4771 hp
->dmai_cookie
= dma
->dp_cookies
;
4773 *lenp
= dmao
->dmao_size
;
4774 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
4775 *cookiep
= hp
->dmai_cookie
[0];
4777 return (DDI_SUCCESS
);
4780 /* sync the old window before moving on to the new one */
4781 window
= &dma
->dp_window
[dma
->dp_current_win
];
4782 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4783 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4784 DDI_DMA_SYNC_FORCPU
);
4787 #if !defined(__amd64)
4789 * before we move to the next window, if we need to re-map, unmap all
4790 * the pages in this window.
4792 if (dma
->dp_cb_remaping
) {
4794 * If we switch to this window again, we'll need to map in
4795 * on the fly next time.
4797 window
->wd_remap_copybuf
= B_TRUE
;
4800 * calculate the page index into the buffer where this window
4801 * starts, and the number of pages this window takes up.
4803 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4805 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4807 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4808 ASSERT((pidx
+ pcnt
) <= sinfo
->si_max_pages
);
4810 /* unmap pages which are currently mapped in this window */
4811 for (i
= 0; i
< pcnt
; i
++) {
4812 if (dma
->dp_pgmap
[pidx
].pm_mapped
) {
4813 hat_unload(kas
.a_hat
,
4814 dma
->dp_pgmap
[pidx
].pm_kaddr
, MMU_PAGESIZE
,
4816 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4824 * Move to the new window.
4825 * NOTE: current_win must be set for sync to work right
4827 dma
->dp_current_win
= win
;
4828 window
= &dma
->dp_window
[win
];
4830 /* if needed, adjust the first and/or last cookies for trim */
4831 trim
= &window
->wd_trim
;
4832 if (trim
->tr_trim_first
) {
4833 window
->wd_first_cookie
->dmac_laddress
= trim
->tr_first_paddr
;
4834 window
->wd_first_cookie
->dmac_size
= trim
->tr_first_size
;
4835 #if !defined(__amd64)
4836 window
->wd_first_cookie
->dmac_type
=
4837 (window
->wd_first_cookie
->dmac_type
&
4838 ROOTNEX_USES_COPYBUF
) + window
->wd_offset
;
4840 if (trim
->tr_first_copybuf_win
) {
4841 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_cbaddr
=
4842 trim
->tr_first_cbaddr
;
4843 #if !defined(__amd64)
4844 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_kaddr
=
4845 trim
->tr_first_kaddr
;
4849 if (trim
->tr_trim_last
) {
4850 trim
->tr_last_cookie
->dmac_laddress
= trim
->tr_last_paddr
;
4851 trim
->tr_last_cookie
->dmac_size
= trim
->tr_last_size
;
4852 if (trim
->tr_last_copybuf_win
) {
4853 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_cbaddr
=
4854 trim
->tr_last_cbaddr
;
4855 #if !defined(__amd64)
4856 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_kaddr
=
4857 trim
->tr_last_kaddr
;
4863 * setup the cookie pointer to the first cookie in the window. setup
4864 * our return values, then increment the cookie since we return the
4865 * first cookie on the stack.
4867 hp
->dmai_cookie
= window
->wd_first_cookie
;
4868 *offp
= window
->wd_offset
;
4869 *lenp
= window
->wd_size
;
4870 *ccountp
= window
->wd_cookie_cnt
;
4871 *cookiep
= hp
->dmai_cookie
[0];
4874 #if !defined(__amd64)
4875 /* re-map copybuf if required for this window */
4876 if (dma
->dp_cb_remaping
) {
4878 * calculate the page index into the buffer where this
4881 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4883 ASSERT(pidx
< sinfo
->si_max_pages
);
4886 * the first page can get unmapped if it's shared with the
4887 * previous window. Even if the rest of this window is already
4888 * mapped in, we need to still check this one.
4890 pmap
= &dma
->dp_pgmap
[pidx
];
4891 if ((pmap
->pm_uses_copybuf
) && (pmap
->pm_mapped
== B_FALSE
)) {
4892 if (pmap
->pm_pp
!= NULL
) {
4893 pmap
->pm_mapped
= B_TRUE
;
4894 i86_pp_map(pmap
->pm_pp
, pmap
->pm_kaddr
);
4895 } else if (pmap
->pm_vaddr
!= NULL
) {
4896 pmap
->pm_mapped
= B_TRUE
;
4897 i86_va_map(pmap
->pm_vaddr
, sinfo
->si_asp
,
4903 /* map in the rest of the pages if required */
4904 if (window
->wd_remap_copybuf
) {
4905 window
->wd_remap_copybuf
= B_FALSE
;
4907 /* figure out many pages this window takes up */
4908 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4910 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4911 ASSERT(((pidx
- 1) + pcnt
) <= sinfo
->si_max_pages
);
4913 /* map pages which require it */
4914 for (i
= 1; i
< pcnt
; i
++) {
4915 pmap
= &dma
->dp_pgmap
[pidx
];
4916 if (pmap
->pm_uses_copybuf
) {
4917 ASSERT(pmap
->pm_mapped
== B_FALSE
);
4918 if (pmap
->pm_pp
!= NULL
) {
4919 pmap
->pm_mapped
= B_TRUE
;
4920 i86_pp_map(pmap
->pm_pp
,
4922 } else if (pmap
->pm_vaddr
!= NULL
) {
4923 pmap
->pm_mapped
= B_TRUE
;
4924 i86_va_map(pmap
->pm_vaddr
,
4935 /* if the new window uses the copy buffer, sync it for the device */
4936 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4937 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4938 DDI_DMA_SYNC_FORDEV
);
4941 return (DDI_SUCCESS
);
4946 * called from ddi_dma_getwin()
4950 rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4951 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4954 #if defined(__amd64) && !defined(__xpv)
4955 if (IOMMU_USED(rdip
)) {
4956 return (iommulib_nexdma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4961 return (rootnex_coredma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4965 #if defined(__amd64) && !defined(__xpv)
4968 rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4969 ddi_dma_handle_t handle
, void *v
)
4974 hp
= (ddi_dma_impl_t
*)handle
;
4975 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4976 dma
->dp_iommu_private
= v
;
4978 return (DDI_SUCCESS
);
4983 rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4984 ddi_dma_handle_t handle
)
4989 hp
= (ddi_dma_impl_t
*)handle
;
4990 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4992 return (dma
->dp_iommu_private
);
4997 * ************************
4998 * obsoleted dma routines
4999 * ************************
5003 * rootnex_dma_mctl()
5005 * We don't support this legacy interface any more on x86.
5009 rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
5010 enum ddi_dma_ctlops request
, off_t
*offp
, size_t *lenp
, caddr_t
*objpp
,
5014 * The only thing dma_mctl is usef for anymore is legacy SPARC
5015 * dvma and sbus-specific routines.
5017 return (DDI_FAILURE
);
5032 rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
5033 ddi_iblock_cookie_t
*ibc
)
5035 *ibc
= rootnex_state
->r_err_ibc
;
5037 return (ddi_system_fmcap
);
5041 * rootnex_dma_check()
5042 * Function called after a dma fault occurred to find out whether the
5043 * fault address is associated with a driver that is able to handle faults
5044 * and recover from faults.
5048 rootnex_dma_check(dev_info_t
*dip
, const void *handle
, const void *addr
,
5049 const void *not_used
)
5051 rootnex_window_t
*window
;
5052 uint64_t start_addr
;
5053 uint64_t fault_addr
;
5062 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5063 hp
= (ddi_dma_impl_t
*)handle
;
5066 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5068 /* Get the address that we need to search for */
5069 fault_addr
= *(uint64_t *)addr
;
5072 * if we don't have any windows, we can just walk through all the
5075 if (dma
->dp_window
== NULL
) {
5076 /* for each cookie */
5077 for (i
= 0; i
< dma
->dp_sglinfo
.si_sgl_size
; i
++) {
5079 * if the faulted address is within the physical address
5080 * range of the cookie, return DDI_FM_NONFATAL.
5082 if ((fault_addr
>= dma
->dp_cookies
[i
].dmac_laddress
) &&
5083 (fault_addr
<= (dma
->dp_cookies
[i
].dmac_laddress
+
5084 dma
->dp_cookies
[i
].dmac_size
))) {
5085 return (DDI_FM_NONFATAL
);
5089 /* fault_addr not within this DMA handle */
5090 return (DDI_FM_UNKNOWN
);
5093 /* we have mutiple windows, walk through each window */
5094 for (i
= 0; i
< hp
->dmai_nwin
; i
++) {
5095 window
= &dma
->dp_window
[i
];
5097 /* Go through all the cookies in the window */
5098 for (j
= 0; j
< window
->wd_cookie_cnt
; j
++) {
5100 start_addr
= window
->wd_first_cookie
[j
].dmac_laddress
;
5101 csize
= window
->wd_first_cookie
[j
].dmac_size
;
5104 * if we are trimming the first cookie in the window,
5105 * and this is the first cookie, adjust the start
5106 * address and size of the cookie to account for the
5109 if (window
->wd_trim
.tr_trim_first
&& (j
== 0)) {
5110 start_addr
= window
->wd_trim
.tr_first_paddr
;
5111 csize
= window
->wd_trim
.tr_first_size
;
5115 * if we are trimming the last cookie in the window,
5116 * and this is the last cookie, adjust the start
5117 * address and size of the cookie to account for the
5120 if (window
->wd_trim
.tr_trim_last
&&
5121 (j
== (window
->wd_cookie_cnt
- 1))) {
5122 start_addr
= window
->wd_trim
.tr_last_paddr
;
5123 csize
= window
->wd_trim
.tr_last_size
;
5126 end_addr
= start_addr
+ csize
;
5129 * if the faulted address is within the physical
5130 * address of the cookie, return DDI_FM_NONFATAL.
5132 if ((fault_addr
>= start_addr
) &&
5133 (fault_addr
<= end_addr
)) {
5134 return (DDI_FM_NONFATAL
);
5139 /* fault_addr not within this DMA handle */
5140 return (DDI_FM_UNKNOWN
);
5145 rootnex_quiesce(dev_info_t
*dip
)
5147 #if defined(__amd64) && !defined(__xpv)
5148 return (immu_quiesce());
5150 return (DDI_SUCCESS
);
5168 immu_physmem_update(uint64_t addr
, uint64_t size
)