4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright (c) 1992, 2010, Oracle and/or its affiliates. All rights reserved.
25 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
29 * x86 root nexus driver
32 #include <sys/sysmacros.h>
34 #include <sys/autoconf.h>
35 #include <sys/sysmacros.h>
36 #include <sys/debug.h>
38 #include <sys/ddidmareq.h>
39 #include <sys/promif.h>
40 #include <sys/devops.h>
42 #include <sys/cmn_err.h>
44 #include <vm/seg_kmem.h>
45 #include <vm/seg_dev.h>
51 #include <sys/avintr.h>
52 #include <sys/errno.h>
53 #include <sys/modctl.h>
54 #include <sys/ddi_impldefs.h>
55 #include <sys/sunddi.h>
56 #include <sys/sunndi.h>
57 #include <sys/mach_intr.h>
59 #include <sys/ontrap.h>
60 #include <sys/atomic.h>
62 #include <sys/rootnex.h>
63 #include <vm/hat_i86.h>
64 #include <sys/ddifm.h>
65 #include <sys/ddi_isa.h>
69 #include <sys/bootinfo.h>
70 #include <sys/hypervisor.h>
71 #include <sys/bootconf.h>
72 #include <vm/kboot_mmu.h>
75 #if defined(__amd64) && !defined(__xpv)
81 * enable/disable extra checking of function parameters. Useful for debugging
85 int rootnex_alloc_check_parms
= 1;
86 int rootnex_bind_check_parms
= 1;
87 int rootnex_bind_check_inuse
= 1;
88 int rootnex_unbind_verify_buffer
= 0;
89 int rootnex_sync_check_parms
= 1;
91 int rootnex_alloc_check_parms
= 0;
92 int rootnex_bind_check_parms
= 0;
93 int rootnex_bind_check_inuse
= 0;
94 int rootnex_unbind_verify_buffer
= 0;
95 int rootnex_sync_check_parms
= 0;
98 boolean_t rootnex_dmar_not_setup
;
100 /* Master Abort and Target Abort panic flag */
101 int rootnex_fm_ma_ta_panic_flag
= 0;
103 /* Semi-temporary patchables to phase in bug fixes, test drivers, etc. */
104 int rootnex_bind_fail
= 1;
105 int rootnex_bind_warn
= 1;
106 uint8_t *rootnex_warn_list
;
107 /* bitmasks for rootnex_warn_list. Up to 8 different warnings with uint8_t */
108 #define ROOTNEX_BIND_WARNING (0x1 << 0)
111 * revert back to old broken behavior of always sync'ing entire copy buffer.
112 * This is useful if be have a buggy driver which doesn't correctly pass in
113 * the offset and size into ddi_dma_sync().
115 int rootnex_sync_ignore_params
= 0;
118 * For the 64-bit kernel, pre-alloc enough cookies for a 256K buffer plus 1
119 * page for alignment. For the 32-bit kernel, pre-alloc enough cookies for a
120 * 64K buffer plus 1 page for alignment (we have less kernel space in a 32-bit
121 * kernel). Allocate enough windows to handle a 256K buffer w/ at least 65
122 * sgllen DMA engine, and enough copybuf buffer state pages to handle 2 pages
123 * (< 8K). We will still need to allocate the copy buffer during bind though
124 * (if we need one). These can only be modified in /etc/system before rootnex
128 int rootnex_prealloc_cookies
= 65;
129 int rootnex_prealloc_windows
= 4;
130 int rootnex_prealloc_copybuf
= 2;
132 int rootnex_prealloc_cookies
= 33;
133 int rootnex_prealloc_windows
= 4;
134 int rootnex_prealloc_copybuf
= 2;
137 /* driver global state */
138 static rootnex_state_t
*rootnex_state
;
141 /* shortcut to rootnex counters */
142 static uint64_t *rootnex_cnt
;
146 * XXX - does x86 even need these or are they left over from the SPARC days?
148 /* statically defined integer/boolean properties for the root node */
149 static rootnex_intprop_t rootnex_intprp
[] = {
150 { "PAGESIZE", PAGESIZE
},
151 { "MMU_PAGESIZE", MMU_PAGESIZE
},
152 { "MMU_PAGEOFFSET", MMU_PAGEOFFSET
},
153 { DDI_RELATIVE_ADDRESSING
, 1 },
155 #define NROOT_INTPROPS (sizeof (rootnex_intprp) / sizeof (rootnex_intprop_t))
158 * If we're dom0, we're using a real device so we need to load
159 * the cookies with MFNs instead of PFNs.
162 typedef maddr_t rootnex_addr_t
;
163 #define ROOTNEX_PADDR_TO_RBASE(pa) \
164 (DOMAIN_IS_INITDOMAIN(xen_info) ? pa_to_ma(pa) : (pa))
166 typedef paddr_t rootnex_addr_t
;
167 #define ROOTNEX_PADDR_TO_RBASE(pa) (pa)
171 char _depends_on
[] = "misc/iommulib misc/acpica";
174 static struct cb_ops rootnex_cb_ops
= {
177 nodev
, /* strategy */
186 nochpoll
, /* chpoll */
187 ddi_prop_op
, /* cb_prop_op */
188 NULL
, /* struct streamtab */
189 D_NEW
| D_MP
| D_HOTPLUG
, /* compatibility flags */
191 nodev
, /* cb_aread */
192 nodev
/* cb_awrite */
195 static int rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
,
196 off_t offset
, off_t len
, caddr_t
*vaddrp
);
197 static int rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
,
198 struct hat
*hat
, struct seg
*seg
, caddr_t addr
,
199 struct devpage
*dp
, pfn_t pfn
, uint_t prot
, uint_t lock
);
200 static int rootnex_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
201 struct ddi_dma_req
*dmareq
, ddi_dma_handle_t
*handlep
);
202 static int rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
203 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
204 ddi_dma_handle_t
*handlep
);
205 static int rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
206 ddi_dma_handle_t handle
);
207 static int rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
208 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
209 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
210 static int rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
211 ddi_dma_handle_t handle
);
212 static int rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
213 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
214 static int rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
215 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
216 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
217 static int rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
,
218 ddi_dma_handle_t handle
, enum ddi_dma_ctlops request
,
219 off_t
*offp
, size_t *lenp
, caddr_t
*objp
, uint_t cache_flags
);
220 static int rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
,
221 ddi_ctl_enum_t ctlop
, void *arg
, void *result
);
222 static int rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
223 ddi_iblock_cookie_t
*ibc
);
224 static int rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
,
225 ddi_intr_op_t intr_op
, ddi_intr_handle_impl_t
*hdlp
, void *result
);
226 static int rootnex_alloc_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*,
228 static int rootnex_free_intr_fixed(dev_info_t
*, ddi_intr_handle_impl_t
*);
230 static int rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
231 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
232 ddi_dma_handle_t
*handlep
);
233 static int rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
234 ddi_dma_handle_t handle
);
235 static int rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
236 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
237 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
238 static int rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
239 ddi_dma_handle_t handle
);
240 #if defined(__amd64) && !defined(__xpv)
241 static void rootnex_coredma_reset_cookies(dev_info_t
*dip
,
242 ddi_dma_handle_t handle
);
243 static int rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
244 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
);
245 static int rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
246 ddi_dma_cookie_t
*cookiep
, uint_t ccount
);
247 static int rootnex_coredma_clear_cookies(dev_info_t
*dip
,
248 ddi_dma_handle_t handle
);
249 static int rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
);
251 static int rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
,
252 ddi_dma_handle_t handle
, off_t off
, size_t len
, uint_t cache_flags
);
253 static int rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
,
254 ddi_dma_handle_t handle
, uint_t win
, off_t
*offp
, size_t *lenp
,
255 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
);
257 #if defined(__amd64) && !defined(__xpv)
258 static int rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
259 ddi_dma_handle_t handle
, void *v
);
260 static void *rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
261 ddi_dma_handle_t handle
);
265 static struct bus_ops rootnex_bus_ops
= {
273 rootnex_dma_allochdl
,
276 rootnex_dma_unbindhdl
,
282 i_ddi_rootnex_get_eventcookie
,
283 i_ddi_rootnex_add_eventcall
,
284 i_ddi_rootnex_remove_eventcall
,
285 i_ddi_rootnex_post_event
,
286 0, /* bus_intr_ctl */
288 0, /* bus_unconfig */
289 rootnex_fm_init
, /* bus_fm_init */
290 NULL
, /* bus_fm_fini */
291 NULL
, /* bus_fm_access_enter */
292 NULL
, /* bus_fm_access_exit */
294 rootnex_intr_ops
/* bus_intr_op */
297 static int rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
);
298 static int rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
);
299 static int rootnex_quiesce(dev_info_t
*dip
);
301 static struct dev_ops rootnex_ops
= {
313 rootnex_quiesce
, /* quiesce */
316 static struct modldrv rootnex_modldrv
= {
322 static struct modlinkage rootnex_modlinkage
= {
324 (void *)&rootnex_modldrv
,
328 #if defined(__amd64) && !defined(__xpv)
329 static iommulib_nexops_t iommulib_nexops
= {
330 IOMMU_NEXOPS_VERSION
,
331 "Rootnex IOMMU ops Vers 1.1",
333 rootnex_coredma_allochdl
,
334 rootnex_coredma_freehdl
,
335 rootnex_coredma_bindhdl
,
336 rootnex_coredma_unbindhdl
,
337 rootnex_coredma_reset_cookies
,
338 rootnex_coredma_get_cookies
,
339 rootnex_coredma_set_cookies
,
340 rootnex_coredma_clear_cookies
,
341 rootnex_coredma_get_sleep_flags
,
342 rootnex_coredma_sync
,
346 rootnex_coredma_hdl_setprivate
,
347 rootnex_coredma_hdl_getprivate
354 extern struct seg_ops segdev_ops
;
355 extern int ignore_hardware_nodes
; /* force flag from ddi_impl.c */
357 extern int ddi_map_debug_flag
;
358 #define ddi_map_debug if (ddi_map_debug_flag) prom_printf
360 extern void i86_pp_map(page_t
*pp
, caddr_t kaddr
);
361 extern void i86_va_map(caddr_t vaddr
, struct as
*asp
, caddr_t kaddr
);
362 extern int (*psm_intr_ops
)(dev_info_t
*, ddi_intr_handle_impl_t
*,
363 psm_intr_op_t
, int *);
364 extern int impl_ddi_sunbus_initchild(dev_info_t
*dip
);
365 extern void impl_ddi_sunbus_removechild(dev_info_t
*dip
);
368 * Use device arena to use for device control register mappings.
369 * Various kernel memory walkers (debugger, dtrace) need to know
370 * to avoid this address range to prevent undesired device activity.
372 extern void *device_arena_alloc(size_t size
, int vm_flag
);
373 extern void device_arena_free(void * vaddr
, size_t size
);
379 static int rootnex_dma_init();
380 static void rootnex_add_props(dev_info_t
*);
381 static int rootnex_ctl_reportdev(dev_info_t
*dip
);
382 static struct intrspec
*rootnex_get_ispec(dev_info_t
*rdip
, int inum
);
383 static int rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
384 static int rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
);
385 static int rootnex_map_handle(ddi_map_req_t
*mp
);
386 static void rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
);
387 static int rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegsize
);
388 static int rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
,
389 ddi_dma_attr_t
*attr
);
390 static void rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
391 rootnex_sglinfo_t
*sglinfo
);
392 static void rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
,
393 ddi_dma_cookie_t
*sgl
, rootnex_sglinfo_t
*sglinfo
);
394 static int rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
395 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
396 static int rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
397 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
);
398 static void rootnex_teardown_copybuf(rootnex_dma_t
*dma
);
399 static int rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
400 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
);
401 static void rootnex_teardown_windows(rootnex_dma_t
*dma
);
402 static void rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
403 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
);
404 static void rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
,
405 rootnex_dma_t
*dma
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
406 size_t *copybuf_used
, page_t
**cur_pp
);
407 static int rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
,
408 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
,
409 ddi_dma_attr_t
*attr
, off_t cur_offset
);
410 static int rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
,
411 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
,
412 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
);
413 static int rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
,
414 rootnex_dma_t
*dma
, rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
);
415 static int rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
416 off_t offset
, size_t size
, uint_t cache_flags
);
417 static int rootnex_verify_buffer(rootnex_dma_t
*dma
);
418 static int rootnex_dma_check(dev_info_t
*dip
, const void *handle
,
419 const void *comp_addr
, const void *not_used
);
420 static boolean_t
rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
,
421 rootnex_sglinfo_t
*sglinfo
);
422 static struct as
*rootnex_get_as(ddi_dma_obj_t
*dmar_object
);
432 rootnex_state
= NULL
;
433 return (mod_install(&rootnex_modlinkage
));
442 _info(struct modinfo
*modinfop
)
444 return (mod_info(&rootnex_modlinkage
, modinfop
));
464 rootnex_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
473 #if defined(__amd64) && !defined(__xpv)
474 return (immu_unquiesce());
476 return (DDI_SUCCESS
);
479 return (DDI_FAILURE
);
483 * We should only have one instance of rootnex. Save it away since we
484 * don't have an easy way to get it back later.
486 ASSERT(rootnex_state
== NULL
);
487 rootnex_state
= kmem_zalloc(sizeof (rootnex_state_t
), KM_SLEEP
);
489 rootnex_state
->r_dip
= dip
;
490 rootnex_state
->r_err_ibc
= (ddi_iblock_cookie_t
)ipltospl(15);
491 rootnex_state
->r_reserved_msg_printed
= B_FALSE
;
493 rootnex_cnt
= &rootnex_state
->r_counters
[0];
497 * Set minimum fm capability level for i86pc platforms and then
498 * initialize error handling. Since we're the rootnex, we don't
499 * care what's returned in the fmcap field.
501 ddi_system_fmcap
= DDI_FM_EREPORT_CAPABLE
| DDI_FM_ERRCB_CAPABLE
|
502 DDI_FM_ACCCHK_CAPABLE
| DDI_FM_DMACHK_CAPABLE
;
503 fmcap
= ddi_system_fmcap
;
504 ddi_fm_init(dip
, &fmcap
, &rootnex_state
->r_err_ibc
);
506 /* initialize DMA related state */
507 e
= rootnex_dma_init();
508 if (e
!= DDI_SUCCESS
) {
509 kmem_free(rootnex_state
, sizeof (rootnex_state_t
));
510 return (DDI_FAILURE
);
513 /* Add static root node properties */
514 rootnex_add_props(dip
);
516 /* since we can't call ddi_report_dev() */
517 cmn_err(CE_CONT
, "?root nexus = %s\n", ddi_get_name(dip
));
519 /* Initialize rootnex event handle */
520 i_ddi_rootnex_init_events(dip
);
522 #if defined(__amd64) && !defined(__xpv)
523 e
= iommulib_nexus_register(dip
, &iommulib_nexops
,
524 &rootnex_state
->r_iommulib_handle
);
526 ASSERT(e
== DDI_SUCCESS
);
529 return (DDI_SUCCESS
);
539 rootnex_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
543 #if defined(__amd64) && !defined(__xpv)
544 return (immu_quiesce());
546 return (DDI_SUCCESS
);
549 return (DDI_FAILURE
);
568 * size of our cookie/window/copybuf state needed in dma bind that we
569 * pre-alloc in dma_alloc_handle
571 rootnex_state
->r_prealloc_cookies
= rootnex_prealloc_cookies
;
572 rootnex_state
->r_prealloc_size
=
573 (rootnex_state
->r_prealloc_cookies
* sizeof (ddi_dma_cookie_t
)) +
574 (rootnex_prealloc_windows
* sizeof (rootnex_window_t
)) +
575 (rootnex_prealloc_copybuf
* sizeof (rootnex_pgmap_t
));
578 * setup DDI DMA handle kmem cache, align each handle on 64 bytes,
579 * allocate 16 extra bytes for struct pointer alignment
580 * (p->dmai_private & dma->dp_prealloc_buffer)
582 bufsize
= sizeof (ddi_dma_impl_t
) + sizeof (rootnex_dma_t
) +
583 rootnex_state
->r_prealloc_size
+ 0x10;
584 rootnex_state
->r_dmahdl_cache
= kmem_cache_create("rootnex_dmahdl",
585 bufsize
, 64, NULL
, NULL
, NULL
, NULL
, NULL
, 0);
586 if (rootnex_state
->r_dmahdl_cache
== NULL
) {
587 return (DDI_FAILURE
);
591 * allocate array to track which major numbers we have printed warnings
594 rootnex_warn_list
= kmem_zalloc(devcnt
* sizeof (*rootnex_warn_list
),
597 return (DDI_SUCCESS
);
602 * rootnex_add_props()
606 rootnex_add_props(dev_info_t
*dip
)
608 rootnex_intprop_t
*rpp
;
611 /* Add static integer/boolean properties to the root node */
612 rpp
= rootnex_intprp
;
613 for (i
= 0; i
< NROOT_INTPROPS
; i
++) {
614 (void) e_ddi_prop_update_int(DDI_DEV_T_NONE
, dip
,
615 rpp
[i
].prop_name
, rpp
[i
].prop_value
);
622 * *************************
623 * ctlops related routines
624 * *************************
633 rootnex_ctlops(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_ctl_enum_t ctlop
,
634 void *arg
, void *result
)
637 struct ddi_parent_private_data
*pdp
;
640 case DDI_CTLOPS_DMAPMAPC
:
642 * Return 'partial' to indicate that dma mapping
643 * has to be done in the main MMU.
645 return (DDI_DMA_PARTIAL
);
647 case DDI_CTLOPS_BTOP
:
649 * Convert byte count input to physical page units.
650 * (byte counts that are not a page-size multiple
653 *(ulong_t
*)result
= btop(*(ulong_t
*)arg
);
654 return (DDI_SUCCESS
);
656 case DDI_CTLOPS_PTOB
:
658 * Convert size in physical pages to bytes
660 *(ulong_t
*)result
= ptob(*(ulong_t
*)arg
);
661 return (DDI_SUCCESS
);
663 case DDI_CTLOPS_BTOPR
:
665 * Convert byte count input to physical page units
666 * (byte counts that are not a page-size multiple
669 *(ulong_t
*)result
= btopr(*(ulong_t
*)arg
);
670 return (DDI_SUCCESS
);
672 case DDI_CTLOPS_INITCHILD
:
673 return (impl_ddi_sunbus_initchild(arg
));
675 case DDI_CTLOPS_UNINITCHILD
:
676 impl_ddi_sunbus_removechild(arg
);
677 return (DDI_SUCCESS
);
679 case DDI_CTLOPS_REPORTDEV
:
680 return (rootnex_ctl_reportdev(rdip
));
682 case DDI_CTLOPS_IOMIN
:
684 * Nothing to do here but reflect back..
686 return (DDI_SUCCESS
);
688 case DDI_CTLOPS_REGSIZE
:
689 case DDI_CTLOPS_NREGS
:
692 case DDI_CTLOPS_SIDDEV
:
693 if (ndi_dev_is_prom_node(rdip
))
694 return (DDI_SUCCESS
);
695 if (ndi_dev_is_persistent_node(rdip
))
696 return (DDI_SUCCESS
);
697 return (DDI_FAILURE
);
699 case DDI_CTLOPS_POWER
:
700 return ((*pm_platform_power
)((power_req_t
*)arg
));
702 case DDI_CTLOPS_RESERVED0
: /* Was DDI_CTLOPS_NINTRS, obsolete */
703 case DDI_CTLOPS_RESERVED1
: /* Was DDI_CTLOPS_POKE_INIT, obsolete */
704 case DDI_CTLOPS_RESERVED2
: /* Was DDI_CTLOPS_POKE_FLUSH, obsolete */
705 case DDI_CTLOPS_RESERVED3
: /* Was DDI_CTLOPS_POKE_FINI, obsolete */
706 case DDI_CTLOPS_RESERVED4
: /* Was DDI_CTLOPS_INTR_HILEVEL, obsolete */
707 case DDI_CTLOPS_RESERVED5
: /* Was DDI_CTLOPS_XLATE_INTRS, obsolete */
708 if (!rootnex_state
->r_reserved_msg_printed
) {
709 rootnex_state
->r_reserved_msg_printed
= B_TRUE
;
710 cmn_err(CE_WARN
, "Failing ddi_ctlops call(s) for "
711 "1 or more reserved/obsolete operations.");
713 return (DDI_FAILURE
);
716 return (DDI_FAILURE
);
719 * The rest are for "hardware" properties
721 if ((pdp
= ddi_get_parent_data(rdip
)) == NULL
)
722 return (DDI_FAILURE
);
724 if (ctlop
== DDI_CTLOPS_NREGS
) {
726 *ptr
= pdp
->par_nreg
;
728 off_t
*size
= (off_t
*)result
;
732 if (n
>= pdp
->par_nreg
) {
733 return (DDI_FAILURE
);
735 *size
= (off_t
)pdp
->par_reg
[n
].regspec_size
;
737 return (DDI_SUCCESS
);
742 * rootnex_ctl_reportdev()
746 rootnex_ctl_reportdev(dev_info_t
*dev
)
748 int i
, n
, len
, f_len
= 0;
751 buf
= kmem_alloc(REPORTDEV_BUFSIZE
, KM_SLEEP
);
752 f_len
+= snprintf(buf
, REPORTDEV_BUFSIZE
,
753 "%s%d at root", ddi_driver_name(dev
), ddi_get_instance(dev
));
756 for (i
= 0; i
< sparc_pd_getnreg(dev
); i
++) {
758 struct regspec
*rp
= sparc_pd_getreg(dev
, i
);
761 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
764 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
768 switch (rp
->regspec_bustype
) {
771 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
772 "%s 0x%x", DEVI_EISA_NEXNAME
, rp
->regspec_addr
);
776 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
777 "%s 0x%x", DEVI_ISA_NEXNAME
, rp
->regspec_addr
);
781 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
782 "space %x offset %x",
783 rp
->regspec_bustype
, rp
->regspec_addr
);
788 for (i
= 0, n
= sparc_pd_getnintr(dev
); i
< n
; i
++) {
792 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
796 pri
= INT_IPL(sparc_pd_getintr(dev
, i
)->intrspec_pri
);
797 f_len
+= snprintf(buf
+ len
, REPORTDEV_BUFSIZE
- len
,
798 " sparc ipl %d", pri
);
802 if (f_len
+ 1 >= REPORTDEV_BUFSIZE
) {
803 cmn_err(CE_NOTE
, "next message is truncated: "
804 "printed length 1024, real length %d", f_len
);
807 cmn_err(CE_CONT
, "?%s\n", buf
);
808 kmem_free(buf
, REPORTDEV_BUFSIZE
);
809 return (DDI_SUCCESS
);
824 rootnex_map(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_map_req_t
*mp
, off_t offset
,
825 off_t len
, caddr_t
*vaddrp
)
827 struct regspec
*rp
, tmp_reg
;
828 ddi_map_req_t mr
= *mp
; /* Get private copy of request */
833 switch (mp
->map_op
) {
834 case DDI_MO_MAP_LOCKED
:
836 case DDI_MO_MAP_HANDLE
:
840 cmn_err(CE_WARN
, "rootnex_map: unimplemented map op %d.",
842 #endif /* DDI_MAP_DEBUG */
843 return (DDI_ME_UNIMPLEMENTED
);
846 if (mp
->map_flags
& DDI_MF_USER_MAPPING
) {
848 cmn_err(CE_WARN
, "rootnex_map: unimplemented map type: user.");
849 #endif /* DDI_MAP_DEBUG */
850 return (DDI_ME_UNIMPLEMENTED
);
854 * First, if given an rnumber, convert it to a regspec...
855 * (Presumably, this is on behalf of a child of the root node?)
858 if (mp
->map_type
== DDI_MT_RNUMBER
) {
860 int rnumber
= mp
->map_obj
.rnumber
;
862 static char *out_of_range
=
863 "rootnex_map: Out of range rnumber <%d>, device <%s>";
864 #endif /* DDI_MAP_DEBUG */
866 rp
= i_ddi_rnumber_to_regspec(rdip
, rnumber
);
869 cmn_err(CE_WARN
, out_of_range
, rnumber
,
871 #endif /* DDI_MAP_DEBUG */
872 return (DDI_ME_RNUMBER_RANGE
);
876 * Convert the given ddi_map_req_t from rnumber to regspec...
879 mp
->map_type
= DDI_MT_REGSPEC
;
884 * Adjust offset and length correspnding to called values...
885 * XXX: A non-zero length means override the one in the regspec
886 * XXX: (regardless of what's in the parent's range?)
889 tmp_reg
= *(mp
->map_obj
.rp
); /* Preserve underlying data */
890 rp
= mp
->map_obj
.rp
= &tmp_reg
; /* Use tmp_reg in request */
893 cmn_err(CE_CONT
, "rootnex: <%s,%s> <0x%x, 0x%x, 0x%d> offset %d len %d "
894 "handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
895 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
, offset
,
896 len
, mp
->map_handlep
);
897 #endif /* DDI_MAP_DEBUG */
900 * I/O or memory mapping:
902 * <bustype=0, addr=x, len=x>: memory
903 * <bustype=1, addr=x, len=x>: i/o
904 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
907 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
908 cmn_err(CE_WARN
, "<%s,%s> invalid register spec"
909 " <0x%x, 0x%x, 0x%x>", ddi_get_name(dip
),
910 ddi_get_name(rdip
), rp
->regspec_bustype
,
911 rp
->regspec_addr
, rp
->regspec_size
);
912 return (DDI_ME_INVAL
);
915 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) {
917 * compatibility i/o mapping
919 rp
->regspec_bustype
+= (uint_t
)offset
;
922 * Normal memory or i/o mapping
924 rp
->regspec_addr
+= (uint_t
)offset
;
928 rp
->regspec_size
= (uint_t
)len
;
931 cmn_err(CE_CONT
, " <%s,%s> <0x%x, 0x%x, 0x%d> offset %d "
932 "len %d handle 0x%x\n", ddi_get_name(dip
), ddi_get_name(rdip
),
933 rp
->regspec_bustype
, rp
->regspec_addr
, rp
->regspec_size
,
934 offset
, len
, mp
->map_handlep
);
935 #endif /* DDI_MAP_DEBUG */
938 * Apply any parent ranges at this level, if applicable.
939 * (This is where nexus specific regspec translation takes place.
940 * Use of this function is implicit agreement that translation is
941 * provided via ddi_apply_range.)
945 ddi_map_debug("applying range of parent <%s> to child <%s>...\n",
946 ddi_get_name(dip
), ddi_get_name(rdip
));
947 #endif /* DDI_MAP_DEBUG */
949 if ((error
= i_ddi_apply_range(dip
, rdip
, mp
->map_obj
.rp
)) != 0)
952 switch (mp
->map_op
) {
953 case DDI_MO_MAP_LOCKED
:
956 * Set up the locked down kernel mapping to the regspec...
959 return (rootnex_map_regspec(mp
, vaddrp
));
967 return (rootnex_unmap_regspec(mp
, vaddrp
));
969 case DDI_MO_MAP_HANDLE
:
971 return (rootnex_map_handle(mp
));
974 return (DDI_ME_UNIMPLEMENTED
);
980 * rootnex_map_fault()
982 * fault in mappings for requestors
986 rootnex_map_fault(dev_info_t
*dip
, dev_info_t
*rdip
, struct hat
*hat
,
987 struct seg
*seg
, caddr_t addr
, struct devpage
*dp
, pfn_t pfn
, uint_t prot
,
992 ddi_map_debug("rootnex_map_fault: address <%x> pfn <%x>", addr
, pfn
);
993 ddi_map_debug(" Seg <%s>\n",
994 seg
->s_ops
== &segdev_ops
? "segdev" :
995 seg
== &kvseg
? "segkmem" : "NONE!");
996 #endif /* DDI_MAP_DEBUG */
999 * This is all terribly broken, but it is a start
1001 * XXX Note that this test means that segdev_ops
1002 * must be exported from seg_dev.c.
1003 * XXX What about devices with their own segment drivers?
1005 if (seg
->s_ops
== &segdev_ops
) {
1006 struct segdev_data
*sdp
= (struct segdev_data
*)seg
->s_data
;
1010 * This is one plausible interpretation of
1011 * a null hat i.e. use the first hat on the
1012 * address space hat list which by convention is
1013 * the hat of the system MMU. At alternative
1014 * would be to panic .. this might well be better ..
1016 ASSERT(AS_READ_HELD(seg
->s_as
, &seg
->s_as
->a_lock
));
1017 hat
= seg
->s_as
->a_hat
;
1018 cmn_err(CE_NOTE
, "rootnex_map_fault: nil hat");
1020 hat_devload(hat
, addr
, MMU_PAGESIZE
, pfn
, prot
| sdp
->hat_attr
,
1021 (lock
? HAT_LOAD_LOCK
: HAT_LOAD
));
1022 } else if (seg
== &kvseg
&& dp
== NULL
) {
1023 hat_devload(kas
.a_hat
, addr
, MMU_PAGESIZE
, pfn
, prot
,
1026 return (DDI_FAILURE
);
1027 return (DDI_SUCCESS
);
1032 * rootnex_map_regspec()
1033 * we don't support mapping of I/O cards above 4Gb
1036 rootnex_map_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1038 rootnex_addr_t rbase
;
1040 uint_t npages
, pgoffset
;
1044 uint_t hat_acc_flags
;
1047 rp
= mp
->map_obj
.rp
;
1048 hp
= mp
->map_handlep
;
1050 #ifdef DDI_MAP_DEBUG
1052 "rootnex_map_regspec: <0x%x 0x%x 0x%x> handle 0x%x\n",
1053 rp
->regspec_bustype
, rp
->regspec_addr
,
1054 rp
->regspec_size
, mp
->map_handlep
);
1055 #endif /* DDI_MAP_DEBUG */
1058 * I/O or memory mapping
1060 * <bustype=0, addr=x, len=x>: memory
1061 * <bustype=1, addr=x, len=x>: i/o
1062 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1065 if (rp
->regspec_bustype
> 1 && rp
->regspec_addr
!= 0) {
1066 cmn_err(CE_WARN
, "rootnex: invalid register spec"
1067 " <0x%x, 0x%x, 0x%x>", rp
->regspec_bustype
,
1068 rp
->regspec_addr
, rp
->regspec_size
);
1069 return (DDI_FAILURE
);
1072 if (rp
->regspec_bustype
!= 0) {
1074 * I/O space - needs a handle.
1077 return (DDI_FAILURE
);
1079 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1080 ap
->ahi_acc_attr
|= DDI_ACCATTR_IO_SPACE
;
1081 impl_acc_hdl_init(hp
);
1083 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1084 #ifdef DDI_MAP_DEBUG
1085 ddi_map_debug("rootnex_map_regspec: mmap() "
1086 "to I/O space is not supported.\n");
1087 #endif /* DDI_MAP_DEBUG */
1088 return (DDI_ME_INVAL
);
1091 * 1275-compliant vs. compatibility i/o mapping
1094 (rp
->regspec_bustype
> 1 && rp
->regspec_addr
== 0) ?
1095 ((caddr_t
)(uintptr_t)rp
->regspec_bustype
) :
1096 ((caddr_t
)(uintptr_t)rp
->regspec_addr
);
1098 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1099 hp
->ah_pfn
= xen_assign_pfn(
1100 mmu_btop((ulong_t
)rp
->regspec_addr
&
1103 hp
->ah_pfn
= mmu_btop(
1104 (ulong_t
)rp
->regspec_addr
& MMU_PAGEMASK
);
1107 hp
->ah_pfn
= mmu_btop((ulong_t
)rp
->regspec_addr
&
1110 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+
1111 (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
);
1114 #ifdef DDI_MAP_DEBUG
1116 "rootnex_map_regspec: \"Mapping\" %d bytes I/O space at 0x%x\n",
1117 rp
->regspec_size
, *vaddrp
);
1118 #endif /* DDI_MAP_DEBUG */
1119 return (DDI_SUCCESS
);
1129 * hp->ah_acc.devacc_attr_endian_flags.
1131 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1132 case DDI_STRICTORDER_ACC
:
1133 hat_acc_flags
= HAT_STRICTORDER
;
1135 case DDI_UNORDERED_OK_ACC
:
1136 hat_acc_flags
= HAT_UNORDERED_OK
;
1138 case DDI_MERGING_OK_ACC
:
1139 hat_acc_flags
= HAT_MERGING_OK
;
1141 case DDI_LOADCACHING_OK_ACC
:
1142 hat_acc_flags
= HAT_LOADCACHING_OK
;
1144 case DDI_STORECACHING_OK_ACC
:
1145 hat_acc_flags
= HAT_STORECACHING_OK
;
1148 ap
= (ddi_acc_impl_t
*)hp
->ah_platform_private
;
1149 ap
->ahi_acc_attr
|= DDI_ACCATTR_CPU_VADDR
;
1150 impl_acc_hdl_init(hp
);
1151 hp
->ah_hat_flags
= hat_acc_flags
;
1153 hat_acc_flags
= HAT_STRICTORDER
;
1156 rbase
= (rootnex_addr_t
)(rp
->regspec_addr
& MMU_PAGEMASK
);
1159 * If we're dom0, we're using a real device so we need to translate
1162 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1163 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
)));
1170 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1172 if (rp
->regspec_size
== 0) {
1173 #ifdef DDI_MAP_DEBUG
1174 ddi_map_debug("rootnex_map_regspec: zero regspec_size\n");
1175 #endif /* DDI_MAP_DEBUG */
1176 return (DDI_ME_INVAL
);
1179 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
) {
1180 /* extra cast to make gcc happy */
1181 *vaddrp
= (caddr_t
)((uintptr_t)mmu_btop(pbase
));
1183 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1185 #ifdef DDI_MAP_DEBUG
1186 ddi_map_debug("rootnex_map_regspec: Mapping %d pages "
1187 "physical %llx", npages
, pbase
);
1188 #endif /* DDI_MAP_DEBUG */
1190 cvaddr
= device_arena_alloc(ptob(npages
), VM_NOSLEEP
);
1192 return (DDI_ME_NORESOURCES
);
1195 * Now map in the pages we've allocated...
1197 hat_devload(kas
.a_hat
, cvaddr
, mmu_ptob(npages
),
1198 mmu_btop(pbase
), mp
->map_prot
| hat_acc_flags
,
1200 *vaddrp
= (caddr_t
)cvaddr
+ pgoffset
;
1202 /* save away pfn and npages for FMA */
1203 hp
= mp
->map_handlep
;
1205 hp
->ah_pfn
= mmu_btop(pbase
);
1206 hp
->ah_pnum
= npages
;
1210 #ifdef DDI_MAP_DEBUG
1211 ddi_map_debug("at virtual 0x%x\n", *vaddrp
);
1212 #endif /* DDI_MAP_DEBUG */
1213 return (DDI_SUCCESS
);
1218 * rootnex_unmap_regspec()
1222 rootnex_unmap_regspec(ddi_map_req_t
*mp
, caddr_t
*vaddrp
)
1224 caddr_t addr
= (caddr_t
)*vaddrp
;
1225 uint_t npages
, pgoffset
;
1228 if (mp
->map_flags
& DDI_MF_DEVICE_MAPPING
)
1231 rp
= mp
->map_obj
.rp
;
1233 if (rp
->regspec_size
== 0) {
1234 #ifdef DDI_MAP_DEBUG
1235 ddi_map_debug("rootnex_unmap_regspec: zero regspec_size\n");
1236 #endif /* DDI_MAP_DEBUG */
1237 return (DDI_ME_INVAL
);
1241 * I/O or memory mapping:
1243 * <bustype=0, addr=x, len=x>: memory
1244 * <bustype=1, addr=x, len=x>: i/o
1245 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1247 if (rp
->regspec_bustype
!= 0) {
1249 * This is I/O space, which requires no particular
1250 * processing on unmap since it isn't mapped in the
1253 return (DDI_SUCCESS
);
1259 pgoffset
= (uintptr_t)addr
& MMU_PAGEOFFSET
;
1260 npages
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1261 hat_unload(kas
.a_hat
, addr
- pgoffset
, ptob(npages
), HAT_UNLOAD_UNLOCK
);
1262 device_arena_free(addr
- pgoffset
, ptob(npages
));
1265 * Destroy the pointer - the mapping has logically gone
1269 return (DDI_SUCCESS
);
1274 * rootnex_map_handle()
1278 rootnex_map_handle(ddi_map_req_t
*mp
)
1280 rootnex_addr_t rbase
;
1286 rp
= mp
->map_obj
.rp
;
1288 #ifdef DDI_MAP_DEBUG
1290 "rootnex_map_handle: <0x%x 0x%x 0x%x> handle 0x%x\n",
1291 rp
->regspec_bustype
, rp
->regspec_addr
,
1292 rp
->regspec_size
, mp
->map_handlep
);
1293 #endif /* DDI_MAP_DEBUG */
1296 * I/O or memory mapping:
1298 * <bustype=0, addr=x, len=x>: memory
1299 * <bustype=1, addr=x, len=x>: i/o
1300 * <bustype>1, addr=0, len=x>: x86-compatibility i/o
1302 if (rp
->regspec_bustype
!= 0) {
1304 * This refers to I/O space, and we don't support "mapping"
1305 * I/O space to a user.
1307 return (DDI_FAILURE
);
1311 * Set up the hat_flags for the mapping.
1313 hp
= mp
->map_handlep
;
1315 switch (hp
->ah_acc
.devacc_attr_endian_flags
) {
1316 case DDI_NEVERSWAP_ACC
:
1317 hp
->ah_hat_flags
= HAT_NEVERSWAP
| HAT_STRICTORDER
;
1319 case DDI_STRUCTURE_LE_ACC
:
1320 hp
->ah_hat_flags
= HAT_STRUCTURE_LE
;
1322 case DDI_STRUCTURE_BE_ACC
:
1323 return (DDI_FAILURE
);
1325 return (DDI_REGS_ACC_CONFLICT
);
1328 switch (hp
->ah_acc
.devacc_attr_dataorder
) {
1329 case DDI_STRICTORDER_ACC
:
1331 case DDI_UNORDERED_OK_ACC
:
1332 hp
->ah_hat_flags
|= HAT_UNORDERED_OK
;
1334 case DDI_MERGING_OK_ACC
:
1335 hp
->ah_hat_flags
|= HAT_MERGING_OK
;
1337 case DDI_LOADCACHING_OK_ACC
:
1338 hp
->ah_hat_flags
|= HAT_LOADCACHING_OK
;
1340 case DDI_STORECACHING_OK_ACC
:
1341 hp
->ah_hat_flags
|= HAT_STORECACHING_OK
;
1344 return (DDI_FAILURE
);
1347 rbase
= (rootnex_addr_t
)rp
->regspec_addr
&
1348 (~(rootnex_addr_t
)MMU_PAGEOFFSET
);
1349 pgoffset
= (ulong_t
)rp
->regspec_addr
& MMU_PAGEOFFSET
;
1351 if (rp
->regspec_size
== 0)
1352 return (DDI_ME_INVAL
);
1356 * If we're dom0, we're using a real device so we need to translate
1359 if (DOMAIN_IS_INITDOMAIN(xen_info
)) {
1360 pbase
= pfn_to_pa(xen_assign_pfn(mmu_btop(rbase
))) |
1361 (rbase
& MMU_PAGEOFFSET
);
1369 hp
->ah_pfn
= mmu_btop(pbase
);
1370 hp
->ah_pnum
= mmu_btopr(rp
->regspec_size
+ pgoffset
);
1372 return (DDI_SUCCESS
);
1378 * ************************
1379 * interrupt related code
1380 * ************************
1384 * rootnex_intr_ops()
1385 * bus_intr_op() function for interrupt support
1389 rootnex_intr_ops(dev_info_t
*pdip
, dev_info_t
*rdip
, ddi_intr_op_t intr_op
,
1390 ddi_intr_handle_impl_t
*hdlp
, void *result
)
1392 struct intrspec
*ispec
;
1394 DDI_INTR_NEXDBG((CE_CONT
,
1395 "rootnex_intr_ops: pdip = %p, rdip = %p, intr_op = %x, hdlp = %p\n",
1396 (void *)pdip
, (void *)rdip
, intr_op
, (void *)hdlp
));
1398 /* Process the interrupt operation */
1400 case DDI_INTROP_GETCAP
:
1401 /* First check with pcplusmp */
1402 if (psm_intr_ops
== NULL
)
1403 return (DDI_FAILURE
);
1405 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_CAP
, result
)) {
1407 return (DDI_FAILURE
);
1410 case DDI_INTROP_SETCAP
:
1411 if (psm_intr_ops
== NULL
)
1412 return (DDI_FAILURE
);
1414 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_CAP
, result
))
1415 return (DDI_FAILURE
);
1417 case DDI_INTROP_ALLOC
:
1418 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1419 return (rootnex_alloc_intr_fixed(rdip
, hdlp
, result
));
1420 case DDI_INTROP_FREE
:
1421 ASSERT(hdlp
->ih_type
== DDI_INTR_TYPE_FIXED
);
1422 return (rootnex_free_intr_fixed(rdip
, hdlp
));
1423 case DDI_INTROP_GETPRI
:
1424 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1425 return (DDI_FAILURE
);
1426 *(int *)result
= ispec
->intrspec_pri
;
1428 case DDI_INTROP_SETPRI
:
1429 /* Validate the interrupt priority passed to us */
1430 if (*(int *)result
> LOCK_LEVEL
)
1431 return (DDI_FAILURE
);
1433 /* Ensure that PSM is all initialized and ispec is ok */
1434 if ((psm_intr_ops
== NULL
) ||
1435 ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
))
1436 return (DDI_FAILURE
);
1438 /* Change the priority */
1439 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_PRI
, result
) ==
1441 return (DDI_FAILURE
);
1443 /* update the ispec with the new priority */
1444 ispec
->intrspec_pri
= *(int *)result
;
1446 case DDI_INTROP_ADDISR
:
1447 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1448 return (DDI_FAILURE
);
1449 ispec
->intrspec_func
= hdlp
->ih_cb_func
;
1451 case DDI_INTROP_REMISR
:
1452 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1453 return (DDI_FAILURE
);
1454 ispec
->intrspec_func
= (uint_t (*)()) 0;
1456 case DDI_INTROP_ENABLE
:
1457 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1458 return (DDI_FAILURE
);
1460 /* Call psmi to translate irq with the dip */
1461 if (psm_intr_ops
== NULL
)
1462 return (DDI_FAILURE
);
1464 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1465 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_XLATE_VECTOR
,
1466 (int *)&hdlp
->ih_vector
) == PSM_FAILURE
)
1467 return (DDI_FAILURE
);
1469 /* Add the interrupt handler */
1470 if (!add_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1471 hdlp
->ih_cb_func
, DEVI(rdip
)->devi_name
, hdlp
->ih_vector
,
1472 hdlp
->ih_cb_arg1
, hdlp
->ih_cb_arg2
, NULL
, rdip
))
1473 return (DDI_FAILURE
);
1475 case DDI_INTROP_DISABLE
:
1476 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1477 return (DDI_FAILURE
);
1479 /* Call psm_ops() to translate irq with the dip */
1480 if (psm_intr_ops
== NULL
)
1481 return (DDI_FAILURE
);
1483 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1484 (void) (*psm_intr_ops
)(rdip
, hdlp
,
1485 PSM_INTR_OP_XLATE_VECTOR
, (int *)&hdlp
->ih_vector
);
1487 /* Remove the interrupt handler */
1488 rem_avintr((void *)hdlp
, ispec
->intrspec_pri
,
1489 hdlp
->ih_cb_func
, hdlp
->ih_vector
);
1491 case DDI_INTROP_SETMASK
:
1492 if (psm_intr_ops
== NULL
)
1493 return (DDI_FAILURE
);
1495 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_SET_MASK
, NULL
))
1496 return (DDI_FAILURE
);
1498 case DDI_INTROP_CLRMASK
:
1499 if (psm_intr_ops
== NULL
)
1500 return (DDI_FAILURE
);
1502 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_CLEAR_MASK
, NULL
))
1503 return (DDI_FAILURE
);
1505 case DDI_INTROP_GETPENDING
:
1506 if (psm_intr_ops
== NULL
)
1507 return (DDI_FAILURE
);
1509 if ((*psm_intr_ops
)(rdip
, hdlp
, PSM_INTR_OP_GET_PENDING
,
1512 return (DDI_FAILURE
);
1515 case DDI_INTROP_NAVAIL
:
1516 case DDI_INTROP_NINTRS
:
1517 *(int *)result
= i_ddi_get_intx_nintrs(rdip
);
1518 if (*(int *)result
== 0) {
1520 * Special case for 'pcic' driver' only. This driver
1521 * driver is a child of 'isa' and 'rootnex' drivers.
1523 * See detailed comments on this in the function
1524 * rootnex_get_ispec().
1526 * Children of 'pcic' send 'NINITR' request all the
1527 * way to rootnex driver. But, the 'pdp->par_nintr'
1528 * field may not initialized. So, we fake it here
1529 * to return 1 (a la what PCMCIA nexus does).
1531 if (strcmp(ddi_get_name(rdip
), "pcic") == 0)
1534 return (DDI_FAILURE
);
1537 case DDI_INTROP_SUPPORTED_TYPES
:
1538 *(int *)result
= DDI_INTR_TYPE_FIXED
; /* Always ... */
1541 return (DDI_FAILURE
);
1544 return (DDI_SUCCESS
);
1549 * rootnex_get_ispec()
1550 * convert an interrupt number to an interrupt specification.
1551 * The interrupt number determines which interrupt spec will be
1552 * returned if more than one exists.
1554 * Look into the parent private data area of the 'rdip' to find out
1555 * the interrupt specification. First check to make sure there is
1556 * one that matchs "inumber" and then return a pointer to it.
1558 * Return NULL if one could not be found.
1560 * NOTE: This is needed for rootnex_intr_ops()
1562 static struct intrspec
*
1563 rootnex_get_ispec(dev_info_t
*rdip
, int inum
)
1565 struct ddi_parent_private_data
*pdp
= ddi_get_parent_data(rdip
);
1568 * Special case handling for drivers that provide their own
1569 * intrspec structures instead of relying on the DDI framework.
1571 * A broken hardware driver in ON could potentially provide its
1572 * own intrspec structure, instead of relying on the hardware.
1573 * If these drivers are children of 'rootnex' then we need to
1574 * continue to provide backward compatibility to them here.
1576 * Following check is a special case for 'pcic' driver which
1577 * was found to have broken hardwre andby provides its own intrspec.
1579 * Verbatim comments from this driver are shown here:
1580 * "Don't use the ddi_add_intr since we don't have a
1581 * default intrspec in all cases."
1583 * Since an 'ispec' may not be always created for it,
1584 * check for that and create one if so.
1586 * NOTE: Currently 'pcic' is the only driver found to do this.
1588 if (!pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1590 pdp
->par_intr
= kmem_zalloc(sizeof (struct intrspec
) *
1591 pdp
->par_nintr
, KM_SLEEP
);
1594 /* Validate the interrupt number */
1595 if (inum
>= pdp
->par_nintr
)
1598 /* Get the interrupt structure pointer and return that */
1599 return ((struct intrspec
*)&pdp
->par_intr
[inum
]);
1603 * Allocate interrupt vector for FIXED (legacy) type.
1606 rootnex_alloc_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
,
1609 struct intrspec
*ispec
;
1610 ddi_intr_handle_impl_t info_hdl
;
1613 apic_get_type_t type_info
;
1615 if (psm_intr_ops
== NULL
)
1616 return (DDI_FAILURE
);
1618 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1619 return (DDI_FAILURE
);
1622 * If the PSM module is "APIX" then pass the request for it
1623 * to allocate the vector now.
1625 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1626 info_hdl
.ih_private
= &type_info
;
1627 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1628 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1629 if (hdlp
->ih_private
== NULL
) { /* allocate phdl structure */
1631 i_ddi_alloc_intr_phdl(hdlp
);
1633 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1634 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1635 PSM_INTR_OP_ALLOC_VECTORS
, result
);
1636 if (free_phdl
) { /* free up the phdl structure */
1638 i_ddi_free_intr_phdl(hdlp
);
1639 hdlp
->ih_private
= NULL
;
1643 * No APIX module; fall back to the old scheme where the
1644 * interrupt vector is allocated during ddi_enable_intr() call.
1646 hdlp
->ih_pri
= ispec
->intrspec_pri
;
1647 *(int *)result
= hdlp
->ih_scratch1
;
1655 * Free up interrupt vector for FIXED (legacy) type.
1658 rootnex_free_intr_fixed(dev_info_t
*rdip
, ddi_intr_handle_impl_t
*hdlp
)
1660 struct intrspec
*ispec
;
1661 struct ddi_parent_private_data
*pdp
;
1662 ddi_intr_handle_impl_t info_hdl
;
1664 apic_get_type_t type_info
;
1666 if (psm_intr_ops
== NULL
)
1667 return (DDI_FAILURE
);
1670 * If the PSM module is "APIX" then pass the request for it
1671 * to free up the vector now.
1673 bzero(&info_hdl
, sizeof (ddi_intr_handle_impl_t
));
1674 info_hdl
.ih_private
= &type_info
;
1675 if ((*psm_intr_ops
)(NULL
, &info_hdl
, PSM_INTR_OP_APIC_TYPE
, NULL
) ==
1676 PSM_SUCCESS
&& strcmp(type_info
.avgi_type
, APIC_APIX_NAME
) == 0) {
1677 if ((ispec
= rootnex_get_ispec(rdip
, hdlp
->ih_inum
)) == NULL
)
1678 return (DDI_FAILURE
);
1679 ((ihdl_plat_t
*)hdlp
->ih_private
)->ip_ispecp
= ispec
;
1680 ret
= (*psm_intr_ops
)(rdip
, hdlp
,
1681 PSM_INTR_OP_FREE_VECTORS
, NULL
);
1684 * No APIX module; fall back to the old scheme where
1685 * the interrupt vector was already freed during
1686 * ddi_disable_intr() call.
1691 pdp
= ddi_get_parent_data(rdip
);
1694 * Special case for 'pcic' driver' only.
1695 * If an intrspec was created for it, clean it up here
1696 * See detailed comments on this in the function
1697 * rootnex_get_ispec().
1699 if (pdp
->par_intr
&& strcmp(ddi_get_name(rdip
), "pcic") == 0) {
1700 kmem_free(pdp
->par_intr
, sizeof (struct intrspec
) *
1703 * Set it to zero; so that
1704 * DDI framework doesn't free it again
1706 pdp
->par_intr
= NULL
;
1715 * ******************
1717 * ******************
1722 rootnex_coredma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1723 ddi_dma_attr_t
*attr
, int (*waitfp
)(caddr_t
), caddr_t arg
,
1724 ddi_dma_handle_t
*handlep
)
1726 uint64_t maxsegmentsize_ll
;
1727 uint_t maxsegmentsize
;
1736 /* convert our sleep flags */
1737 if (waitfp
== DDI_DMA_SLEEP
) {
1740 kmflag
= KM_NOSLEEP
;
1744 * We try to do only one memory allocation here. We'll do a little
1745 * pointer manipulation later. If the bind ends up taking more than
1746 * our prealloc's space, we'll have to allocate more memory in the
1747 * bind operation. Not great, but much better than before and the
1748 * best we can do with the current bind interfaces.
1750 hp
= kmem_cache_alloc(rootnex_state
->r_dmahdl_cache
, kmflag
);
1752 return (DDI_DMA_NORESOURCES
);
1754 /* Do our pointer manipulation now, align the structures */
1755 hp
->dmai_private
= (void *)(((uintptr_t)hp
+
1756 (uintptr_t)sizeof (ddi_dma_impl_t
) + 0x7) & ~0x7);
1757 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1758 dma
->dp_prealloc_buffer
= (uchar_t
*)(((uintptr_t)dma
+
1759 sizeof (rootnex_dma_t
) + 0x7) & ~0x7);
1761 /* setup the handle */
1762 rootnex_clean_dmahdl(hp
);
1763 hp
->dmai_error
.err_fep
= NULL
;
1764 hp
->dmai_error
.err_cf
= NULL
;
1766 dma
->dp_sglinfo
.si_flags
= attr
->dma_attr_flags
;
1767 dma
->dp_sglinfo
.si_min_addr
= attr
->dma_attr_addr_lo
;
1770 * The BOUNCE_ON_SEG workaround is not needed when an IOMMU
1771 * is being used. Set the upper limit to the seg value.
1772 * There will be enough DVMA space to always get addresses
1773 * that will match the constraints.
1775 if (IOMMU_USED(rdip
) &&
1776 (attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
)) {
1777 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_seg
;
1778 dma
->dp_sglinfo
.si_flags
&= ~_DDI_DMA_BOUNCE_ON_SEG
;
1780 dma
->dp_sglinfo
.si_max_addr
= attr
->dma_attr_addr_hi
;
1782 hp
->dmai_minxfer
= attr
->dma_attr_minxfer
;
1783 hp
->dmai_burstsizes
= attr
->dma_attr_burstsizes
;
1784 hp
->dmai_rdip
= rdip
;
1785 hp
->dmai_attr
= *attr
;
1787 if (attr
->dma_attr_seg
>= dma
->dp_sglinfo
.si_max_addr
)
1788 dma
->dp_sglinfo
.si_cancross
= B_FALSE
;
1790 dma
->dp_sglinfo
.si_cancross
= B_TRUE
;
1792 /* we don't need to worry about the SPL since we do a tryenter */
1793 mutex_init(&dma
->dp_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
1796 * Figure out our maximum segment size. If the segment size is greater
1797 * than 4G, we will limit it to (4G - 1) since the max size of a dma
1798 * object (ddi_dma_obj_t.dmao_size) is 32 bits. dma_attr_seg and
1799 * dma_attr_count_max are size-1 type values.
1801 * Maximum segment size is the largest physically contiguous chunk of
1802 * memory that we can return from a bind (i.e. the maximum size of a
1806 /* handle the rollover cases */
1807 seg
= attr
->dma_attr_seg
+ 1;
1808 if (seg
< attr
->dma_attr_seg
) {
1809 seg
= attr
->dma_attr_seg
;
1811 count_max
= attr
->dma_attr_count_max
+ 1;
1812 if (count_max
< attr
->dma_attr_count_max
) {
1813 count_max
= attr
->dma_attr_count_max
;
1817 * granularity may or may not be a power of two. If it isn't, we can't
1818 * use a simple mask.
1820 if (attr
->dma_attr_granular
& (attr
->dma_attr_granular
- 1)) {
1821 dma
->dp_granularity_power_2
= B_FALSE
;
1823 dma
->dp_granularity_power_2
= B_TRUE
;
1827 * maxxfer should be a whole multiple of granularity. If we're going to
1828 * break up a window because we're greater than maxxfer, we might as
1829 * well make sure it's maxxfer is a whole multiple so we don't have to
1830 * worry about triming the window later on for this case.
1832 if (attr
->dma_attr_granular
> 1) {
1833 if (dma
->dp_granularity_power_2
) {
1834 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1835 (attr
->dma_attr_maxxfer
&
1836 (attr
->dma_attr_granular
- 1));
1838 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
-
1839 (attr
->dma_attr_maxxfer
% attr
->dma_attr_granular
);
1842 dma
->dp_maxxfer
= attr
->dma_attr_maxxfer
;
1845 maxsegmentsize_ll
= MIN(seg
, dma
->dp_maxxfer
);
1846 maxsegmentsize_ll
= MIN(maxsegmentsize_ll
, count_max
);
1847 if (maxsegmentsize_ll
== 0 || (maxsegmentsize_ll
> 0xFFFFFFFF)) {
1848 maxsegmentsize
= 0xFFFFFFFF;
1850 maxsegmentsize
= maxsegmentsize_ll
;
1852 dma
->dp_sglinfo
.si_max_cookie_size
= maxsegmentsize
;
1853 dma
->dp_sglinfo
.si_segmask
= attr
->dma_attr_seg
;
1855 /* check the ddi_dma_attr arg to make sure it makes a little sense */
1856 if (rootnex_alloc_check_parms
) {
1857 e
= rootnex_valid_alloc_parms(attr
, maxsegmentsize
);
1858 if (e
!= DDI_SUCCESS
) {
1859 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ALLOC_FAIL
]);
1860 (void) rootnex_dma_freehdl(dip
, rdip
,
1861 (ddi_dma_handle_t
)hp
);
1866 *handlep
= (ddi_dma_handle_t
)hp
;
1868 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1869 ROOTNEX_DPROBE1(rootnex__alloc__handle
, uint64_t,
1870 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1872 return (DDI_SUCCESS
);
1877 * rootnex_dma_allochdl()
1878 * called from ddi_dma_alloc_handle().
1881 rootnex_dma_allochdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_attr_t
*attr
,
1882 int (*waitfp
)(caddr_t
), caddr_t arg
, ddi_dma_handle_t
*handlep
)
1884 int retval
= DDI_SUCCESS
;
1885 #if defined(__amd64) && !defined(__xpv)
1887 if (IOMMU_UNITIALIZED(rdip
)) {
1888 retval
= iommulib_nex_open(dip
, rdip
);
1890 if (retval
!= DDI_SUCCESS
&& retval
!= DDI_ENOTSUP
)
1894 if (IOMMU_UNUSED(rdip
)) {
1895 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1898 retval
= iommulib_nexdma_allochdl(dip
, rdip
, attr
,
1899 waitfp
, arg
, handlep
);
1902 retval
= rootnex_coredma_allochdl(dip
, rdip
, attr
, waitfp
, arg
,
1906 case DDI_DMA_NORESOURCES
:
1907 if (waitfp
!= DDI_DMA_DONTWAIT
) {
1908 ddi_set_callback(waitfp
, arg
,
1909 &rootnex_state
->r_dvma_call_list_id
);
1913 ndi_fmc_insert(rdip
, DMA_HANDLE
, *handlep
, NULL
);
1923 rootnex_coredma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1924 ddi_dma_handle_t handle
)
1930 hp
= (ddi_dma_impl_t
*)handle
;
1931 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1933 /* unbind should have been called first */
1934 ASSERT(!dma
->dp_inuse
);
1936 mutex_destroy(&dma
->dp_mutex
);
1937 kmem_cache_free(rootnex_state
->r_dmahdl_cache
, hp
);
1939 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1940 ROOTNEX_DPROBE1(rootnex__free__handle
, uint64_t,
1941 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_HDLS
]);
1943 return (DDI_SUCCESS
);
1947 * rootnex_dma_freehdl()
1948 * called from ddi_dma_free_handle().
1951 rootnex_dma_freehdl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
)
1955 ndi_fmc_remove(rdip
, DMA_HANDLE
, handle
);
1956 #if defined(__amd64) && !defined(__xpv)
1957 if (IOMMU_USED(rdip
))
1958 ret
= iommulib_nexdma_freehdl(dip
, rdip
, handle
);
1961 ret
= rootnex_coredma_freehdl(dip
, rdip
, handle
);
1963 if (rootnex_state
->r_dvma_call_list_id
)
1964 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
1971 rootnex_coredma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
1972 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
1973 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
1975 rootnex_sglinfo_t
*sinfo
;
1976 ddi_dma_obj_t
*dmao
;
1977 #if defined(__amd64) && !defined(__xpv)
1978 struct dvmaseg
*dvs
;
1979 ddi_dma_cookie_t
*cookie
;
1981 ddi_dma_attr_t
*attr
;
1988 hp
= (ddi_dma_impl_t
*)handle
;
1989 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
1990 dmao
= &dma
->dp_dma
;
1991 sinfo
= &dma
->dp_sglinfo
;
1992 attr
= &hp
->dmai_attr
;
1994 /* convert the sleep flags */
1995 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
1996 dma
->dp_sleep_flags
= kmflag
= KM_SLEEP
;
1998 dma
->dp_sleep_flags
= kmflag
= KM_NOSLEEP
;
2001 hp
->dmai_rflags
= dmareq
->dmar_flags
& DMP_DDIFLAGS
;
2004 * This is useful for debugging a driver. Not as useful in a production
2005 * system. The only time this will fail is if you have a driver bug.
2007 if (rootnex_bind_check_inuse
) {
2009 * No one else should ever have this lock unless someone else
2010 * is trying to use this handle. So contention on the lock
2011 * is the same as inuse being set.
2013 e
= mutex_tryenter(&dma
->dp_mutex
);
2015 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2016 return (DDI_DMA_INUSE
);
2018 if (dma
->dp_inuse
) {
2019 mutex_exit(&dma
->dp_mutex
);
2020 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2021 return (DDI_DMA_INUSE
);
2023 dma
->dp_inuse
= B_TRUE
;
2024 mutex_exit(&dma
->dp_mutex
);
2027 /* check the ddi_dma_attr arg to make sure it makes a little sense */
2028 if (rootnex_bind_check_parms
) {
2029 e
= rootnex_valid_bind_parms(dmareq
, attr
);
2030 if (e
!= DDI_SUCCESS
) {
2031 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2032 rootnex_clean_dmahdl(hp
);
2037 /* save away the original bind info */
2038 dma
->dp_dma
= dmareq
->dmar_object
;
2040 #if defined(__amd64) && !defined(__xpv)
2041 if (IOMMU_USED(rdip
)) {
2042 dmao
= &dma
->dp_dvma
;
2043 e
= iommulib_nexdma_mapobject(dip
, rdip
, handle
, dmareq
, dmao
);
2046 if (sinfo
->si_cancross
||
2047 dmao
->dmao_obj
.dvma_obj
.dv_nseg
!= 1 ||
2048 dmao
->dmao_size
> sinfo
->si_max_cookie_size
) {
2049 dma
->dp_dvma_used
= B_TRUE
;
2052 sinfo
->si_sgl_size
= 1;
2053 hp
->dmai_rflags
|= DMP_NOSYNC
;
2055 dma
->dp_dvma_used
= B_TRUE
;
2056 dma
->dp_need_to_free_cookie
= B_FALSE
;
2058 dvs
= &dmao
->dmao_obj
.dvma_obj
.dv_seg
[0];
2059 cookie
= hp
->dmai_cookie
= dma
->dp_cookies
=
2060 (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2061 cookie
->dmac_laddress
= dvs
->dvs_start
+
2062 dmao
->dmao_obj
.dvma_obj
.dv_off
;
2063 cookie
->dmac_size
= dvs
->dvs_len
;
2064 cookie
->dmac_type
= 0;
2066 ROOTNEX_DPROBE1(rootnex__bind__dvmafast
, dev_info_t
*,
2072 rootnex_clean_dmahdl(hp
);
2079 * Figure out a rough estimate of what maximum number of pages
2080 * this buffer could use (a high estimate of course).
2082 sinfo
->si_max_pages
= mmu_btopr(dma
->dp_dma
.dmao_size
) + 1;
2084 if (dma
->dp_dvma_used
) {
2086 * The number of physical pages is the worst case.
2088 * For DVMA, the worst case is the length divided
2089 * by the maximum cookie length, plus 1. Add to that
2090 * the number of segment boundaries potentially crossed, and
2091 * the additional number of DVMA segments that was returned.
2093 * In the normal case, for modern devices, si_cancross will
2094 * be false, and dv_nseg will be 1, and the fast path will
2095 * have been taken above.
2097 ncookies
= (dma
->dp_dma
.dmao_size
/ sinfo
->si_max_cookie_size
)
2099 if (sinfo
->si_cancross
)
2101 (dma
->dp_dma
.dmao_size
/ attr
->dma_attr_seg
) + 1;
2102 ncookies
+= (dmao
->dmao_obj
.dvma_obj
.dv_nseg
- 1);
2104 sinfo
->si_max_pages
= MIN(sinfo
->si_max_pages
, ncookies
);
2108 * We'll use the pre-allocated cookies for any bind that will *always*
2109 * fit (more important to be consistent, we don't want to create
2110 * additional degenerate cases).
2112 if (sinfo
->si_max_pages
<= rootnex_state
->r_prealloc_cookies
) {
2113 dma
->dp_cookies
= (ddi_dma_cookie_t
*)dma
->dp_prealloc_buffer
;
2114 dma
->dp_need_to_free_cookie
= B_FALSE
;
2115 ROOTNEX_DPROBE2(rootnex__bind__prealloc
, dev_info_t
*, rdip
,
2116 uint_t
, sinfo
->si_max_pages
);
2119 * For anything larger than that, we'll go ahead and allocate the
2120 * maximum number of pages we expect to see. Hopefuly, we won't be
2121 * seeing this path in the fast path for high performance devices very
2124 * a ddi bind interface that allowed the driver to provide storage to
2125 * the bind interface would speed this case up.
2129 * Save away how much memory we allocated. If we're doing a
2130 * nosleep, the alloc could fail...
2132 dma
->dp_cookie_size
= sinfo
->si_max_pages
*
2133 sizeof (ddi_dma_cookie_t
);
2134 dma
->dp_cookies
= kmem_alloc(dma
->dp_cookie_size
, kmflag
);
2135 if (dma
->dp_cookies
== NULL
) {
2136 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2137 rootnex_clean_dmahdl(hp
);
2138 return (DDI_DMA_NORESOURCES
);
2140 dma
->dp_need_to_free_cookie
= B_TRUE
;
2141 ROOTNEX_DPROBE2(rootnex__bind__alloc
, dev_info_t
*, rdip
,
2142 uint_t
, sinfo
->si_max_pages
);
2144 hp
->dmai_cookie
= dma
->dp_cookies
;
2147 * Get the real sgl. rootnex_get_sgl will fill in cookie array while
2148 * looking at the constraints in the dma structure. It will then put
2149 * some additional state about the sgl in the dma struct (i.e. is
2150 * the sgl clean, or do we need to do some munging; how many pages
2151 * need to be copied, etc.)
2153 if (dma
->dp_dvma_used
)
2154 rootnex_dvma_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2156 rootnex_get_sgl(dmao
, dma
->dp_cookies
, &dma
->dp_sglinfo
);
2159 ASSERT(sinfo
->si_sgl_size
<= sinfo
->si_max_pages
);
2160 /* if we don't need a copy buffer, we don't need to sync */
2161 if (sinfo
->si_copybuf_req
== 0) {
2162 hp
->dmai_rflags
|= DMP_NOSYNC
;
2166 * if we don't need the copybuf and we don't need to do a partial, we
2167 * hit the fast path. All the high performance devices should be trying
2168 * to hit this path. To hit this path, a device should be able to reach
2169 * all of memory, shouldn't try to bind more than it can transfer, and
2170 * the buffer shouldn't require more cookies than the driver/device can
2173 if ((sinfo
->si_copybuf_req
== 0) &&
2174 (sinfo
->si_sgl_size
<= attr
->dma_attr_sgllen
) &&
2175 (dmao
->dmao_size
< dma
->dp_maxxfer
)) {
2178 * If the driver supports FMA, insert the handle in the FMA DMA
2181 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2182 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2185 * copy out the first cookie and ccountp, set the cookie
2186 * pointer to the second cookie. The first cookie is passed
2187 * back on the stack. Additional cookies are accessed via
2188 * ddi_dma_nextcookie()
2190 *cookiep
= dma
->dp_cookies
[0];
2191 *ccountp
= sinfo
->si_sgl_size
;
2193 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2194 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2195 ROOTNEX_DPROBE4(rootnex__bind__fast
, dev_info_t
*, rdip
,
2196 uint64_t, rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
],
2197 uint_t
, dmao
->dmao_size
, uint_t
, *ccountp
);
2200 return (DDI_DMA_MAPPED
);
2204 * go to the slow path, we may need to alloc more memory, create
2205 * multiple windows, and munge up a sgl to make the device happy.
2209 * With the IOMMU mapobject method used, we should never hit
2210 * the slow path. If we do, something is seriously wrong.
2211 * Clean up and return an error.
2214 #if defined(__amd64) && !defined(__xpv)
2216 if (dma
->dp_dvma_used
) {
2217 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2219 e
= DDI_DMA_NOMAPPING
;
2222 e
= rootnex_bind_slowpath(hp
, dmareq
, dma
, attr
, &dma
->dp_dma
,
2224 #if defined(__amd64) && !defined(__xpv)
2227 if ((e
!= DDI_DMA_MAPPED
) && (e
!= DDI_DMA_PARTIAL_MAP
)) {
2228 if (dma
->dp_need_to_free_cookie
) {
2229 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2231 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_BIND_FAIL
]);
2232 rootnex_clean_dmahdl(hp
); /* must be after free cookie */
2237 * If the driver supports FMA, insert the handle in the FMA DMA handle
2240 if (attr
->dma_attr_flags
& DDI_DMA_FLAGERR
)
2241 hp
->dmai_error
.err_cf
= rootnex_dma_check
;
2243 /* if the first window uses the copy buffer, sync it for the device */
2244 if ((dma
->dp_window
[dma
->dp_current_win
].wd_dosync
) &&
2245 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
2246 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2247 DDI_DMA_SYNC_FORDEV
);
2251 * copy out the first cookie and ccountp, set the cookie pointer to the
2252 * second cookie. Make sure the partial flag is set/cleared correctly.
2253 * If we have a partial map (i.e. multiple windows), the number of
2254 * cookies we return is the number of cookies in the first window.
2256 if (e
== DDI_DMA_MAPPED
) {
2257 hp
->dmai_rflags
&= ~DDI_DMA_PARTIAL
;
2258 *ccountp
= sinfo
->si_sgl_size
;
2261 hp
->dmai_rflags
|= DDI_DMA_PARTIAL
;
2262 *ccountp
= dma
->dp_window
[dma
->dp_current_win
].wd_cookie_cnt
;
2263 ASSERT(hp
->dmai_nwin
<= dma
->dp_max_win
);
2265 *cookiep
= dma
->dp_cookies
[0];
2268 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2269 ROOTNEX_DPROBE4(rootnex__bind__slow
, dev_info_t
*, rdip
, uint64_t,
2270 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
], uint_t
,
2271 dmao
->dmao_size
, uint_t
, *ccountp
);
2276 * rootnex_dma_bindhdl()
2277 * called from ddi_dma_addr_bind_handle() and ddi_dma_buf_bind_handle().
2280 rootnex_dma_bindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2281 ddi_dma_handle_t handle
, struct ddi_dma_req
*dmareq
,
2282 ddi_dma_cookie_t
*cookiep
, uint_t
*ccountp
)
2285 #if defined(__amd64) && !defined(__xpv)
2286 if (IOMMU_USED(rdip
))
2287 ret
= iommulib_nexdma_bindhdl(dip
, rdip
, handle
, dmareq
,
2291 ret
= rootnex_coredma_bindhdl(dip
, rdip
, handle
, dmareq
,
2294 if (ret
== DDI_DMA_NORESOURCES
&& dmareq
->dmar_fp
!= DDI_DMA_DONTWAIT
) {
2295 ddi_set_callback(dmareq
->dmar_fp
, dmareq
->dmar_arg
,
2296 &rootnex_state
->r_dvma_call_list_id
);
2306 rootnex_coredma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2307 ddi_dma_handle_t handle
)
2313 hp
= (ddi_dma_impl_t
*)handle
;
2314 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2316 /* make sure the buffer wasn't free'd before calling unbind */
2317 if (rootnex_unbind_verify_buffer
) {
2318 e
= rootnex_verify_buffer(dma
);
2319 if (e
!= DDI_SUCCESS
) {
2321 return (DDI_FAILURE
);
2325 /* sync the current window before unbinding the buffer */
2326 if (dma
->dp_window
&& dma
->dp_window
[dma
->dp_current_win
].wd_dosync
&&
2327 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
2328 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
2329 DDI_DMA_SYNC_FORCPU
);
2333 * cleanup and copy buffer or window state. if we didn't use the copy
2334 * buffer or windows, there won't be much to do :-)
2336 rootnex_teardown_copybuf(dma
);
2337 rootnex_teardown_windows(dma
);
2339 #if defined(__amd64) && !defined(__xpv)
2340 if (IOMMU_USED(rdip
))
2341 (void) iommulib_nexdma_unmapobject(dip
, rdip
, handle
,
2346 * If we had to allocate space to for the worse case sgl (it didn't
2347 * fit into our pre-allocate buffer), free that up now
2349 if (dma
->dp_need_to_free_cookie
) {
2350 kmem_free(dma
->dp_cookies
, dma
->dp_cookie_size
);
2354 * clean up the handle so it's ready for the next bind (i.e. if the
2355 * handle is reused).
2357 rootnex_clean_dmahdl(hp
);
2358 hp
->dmai_error
.err_cf
= NULL
;
2360 ROOTNEX_DPROF_DEC(&rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2361 ROOTNEX_DPROBE1(rootnex__unbind
, uint64_t,
2362 rootnex_cnt
[ROOTNEX_CNT_ACTIVE_BINDS
]);
2364 return (DDI_SUCCESS
);
2368 * rootnex_dma_unbindhdl()
2369 * called from ddi_dma_unbind_handle()
2373 rootnex_dma_unbindhdl(dev_info_t
*dip
, dev_info_t
*rdip
,
2374 ddi_dma_handle_t handle
)
2378 #if defined(__amd64) && !defined(__xpv)
2379 if (IOMMU_USED(rdip
))
2380 ret
= iommulib_nexdma_unbindhdl(dip
, rdip
, handle
);
2383 ret
= rootnex_coredma_unbindhdl(dip
, rdip
, handle
);
2385 if (rootnex_state
->r_dvma_call_list_id
)
2386 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
2391 #if defined(__amd64) && !defined(__xpv)
2394 rootnex_coredma_get_sleep_flags(ddi_dma_handle_t handle
)
2396 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2397 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2399 if (dma
->dp_sleep_flags
!= KM_SLEEP
&&
2400 dma
->dp_sleep_flags
!= KM_NOSLEEP
)
2401 cmn_err(CE_PANIC
, "kmem sleep flags not set in DMA handle");
2402 return (dma
->dp_sleep_flags
);
2406 rootnex_coredma_reset_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2408 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2409 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2410 rootnex_window_t
*window
;
2412 if (dma
->dp_window
) {
2413 window
= &dma
->dp_window
[dma
->dp_current_win
];
2414 hp
->dmai_cookie
= window
->wd_first_cookie
;
2416 hp
->dmai_cookie
= dma
->dp_cookies
;
2423 rootnex_coredma_get_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2424 ddi_dma_cookie_t
**cookiepp
, uint_t
*ccountp
)
2428 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2429 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2430 rootnex_window_t
*window
;
2431 ddi_dma_cookie_t
*cp
;
2432 ddi_dma_cookie_t
*cookie
;
2434 ASSERT(*cookiepp
== NULL
);
2435 ASSERT(*ccountp
== 0);
2437 if (dma
->dp_window
) {
2438 window
= &dma
->dp_window
[dma
->dp_current_win
];
2439 cp
= window
->wd_first_cookie
;
2440 *ccountp
= window
->wd_cookie_cnt
;
2442 cp
= dma
->dp_cookies
;
2443 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
2446 km_flags
= rootnex_coredma_get_sleep_flags(handle
);
2447 cookie
= kmem_zalloc(sizeof (ddi_dma_cookie_t
) * (*ccountp
), km_flags
);
2448 if (cookie
== NULL
) {
2449 return (DDI_DMA_NORESOURCES
);
2452 for (i
= 0; i
< *ccountp
; i
++) {
2453 cookie
[i
].dmac_notused
= cp
[i
].dmac_notused
;
2454 cookie
[i
].dmac_type
= cp
[i
].dmac_type
;
2455 cookie
[i
].dmac_address
= cp
[i
].dmac_address
;
2456 cookie
[i
].dmac_size
= cp
[i
].dmac_size
;
2461 return (DDI_SUCCESS
);
2466 rootnex_coredma_set_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
,
2467 ddi_dma_cookie_t
*cookiep
, uint_t ccount
)
2469 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2470 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2471 rootnex_window_t
*window
;
2472 ddi_dma_cookie_t
*cur_cookiep
;
2475 ASSERT(ccount
!= 0);
2476 ASSERT(dma
->dp_need_to_switch_cookies
== B_FALSE
);
2478 if (dma
->dp_window
) {
2479 window
= &dma
->dp_window
[dma
->dp_current_win
];
2480 dma
->dp_saved_cookies
= window
->wd_first_cookie
;
2481 window
->wd_first_cookie
= cookiep
;
2482 ASSERT(ccount
== window
->wd_cookie_cnt
);
2483 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2484 + window
->wd_first_cookie
;
2486 dma
->dp_saved_cookies
= dma
->dp_cookies
;
2487 dma
->dp_cookies
= cookiep
;
2488 ASSERT(ccount
== dma
->dp_sglinfo
.si_sgl_size
);
2489 cur_cookiep
= (hp
->dmai_cookie
- dma
->dp_saved_cookies
)
2493 dma
->dp_need_to_switch_cookies
= B_TRUE
;
2494 hp
->dmai_cookie
= cur_cookiep
;
2496 return (DDI_SUCCESS
);
2501 rootnex_coredma_clear_cookies(dev_info_t
*dip
, ddi_dma_handle_t handle
)
2503 ddi_dma_impl_t
*hp
= (ddi_dma_impl_t
*)handle
;
2504 rootnex_dma_t
*dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2505 rootnex_window_t
*window
;
2506 ddi_dma_cookie_t
*cur_cookiep
;
2507 ddi_dma_cookie_t
*cookie_array
;
2510 /* check if cookies have not been switched */
2511 if (dma
->dp_need_to_switch_cookies
== B_FALSE
)
2512 return (DDI_SUCCESS
);
2514 ASSERT(dma
->dp_saved_cookies
);
2516 if (dma
->dp_window
) {
2517 window
= &dma
->dp_window
[dma
->dp_current_win
];
2518 cookie_array
= window
->wd_first_cookie
;
2519 window
->wd_first_cookie
= dma
->dp_saved_cookies
;
2520 dma
->dp_saved_cookies
= NULL
;
2521 ccount
= window
->wd_cookie_cnt
;
2522 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2523 + window
->wd_first_cookie
;
2525 cookie_array
= dma
->dp_cookies
;
2526 dma
->dp_cookies
= dma
->dp_saved_cookies
;
2527 dma
->dp_saved_cookies
= NULL
;
2528 ccount
= dma
->dp_sglinfo
.si_sgl_size
;
2529 cur_cookiep
= (hp
->dmai_cookie
- cookie_array
)
2533 kmem_free(cookie_array
, sizeof (ddi_dma_cookie_t
) * ccount
);
2535 hp
->dmai_cookie
= cur_cookiep
;
2537 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2539 return (DDI_SUCCESS
);
2545 rootnex_get_as(ddi_dma_obj_t
*dmao
)
2549 switch (dmao
->dmao_type
) {
2550 case DMA_OTYP_VADDR
:
2551 case DMA_OTYP_BUFVADDR
:
2552 asp
= dmao
->dmao_obj
.virt_obj
.v_as
;
2564 * rootnex_verify_buffer()
2565 * verify buffer wasn't free'd
2568 rootnex_verify_buffer(rootnex_dma_t
*dma
)
2578 /* Figure out how many pages this buffer occupies */
2579 if (dma
->dp_dma
.dmao_type
== DMA_OTYP_PAGES
) {
2580 poff
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_offset
& MMU_PAGEOFFSET
;
2582 vaddr
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_addr
;
2583 poff
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2585 pcnt
= mmu_btopr(dma
->dp_dma
.dmao_size
+ poff
);
2587 switch (dma
->dp_dma
.dmao_type
) {
2588 case DMA_OTYP_PAGES
:
2590 * for a linked list of pp's walk through them to make sure
2591 * they're locked and not free.
2593 pp
= dma
->dp_dma
.dmao_obj
.pp_obj
.pp_pp
;
2594 for (i
= 0; i
< pcnt
; i
++) {
2595 if (PP_ISFREE(pp
) || !PAGE_LOCKED(pp
)) {
2596 return (DDI_FAILURE
);
2602 case DMA_OTYP_VADDR
:
2603 case DMA_OTYP_BUFVADDR
:
2604 pplist
= dma
->dp_dma
.dmao_obj
.virt_obj
.v_priv
;
2606 * for an array of pp's walk through them to make sure they're
2607 * not free. It's possible that they may not be locked.
2610 for (i
= 0; i
< pcnt
; i
++) {
2611 if (PP_ISFREE(pplist
[i
])) {
2612 return (DDI_FAILURE
);
2616 /* For a virtual address, try to peek at each page */
2618 if (rootnex_get_as(&dma
->dp_dma
) == &kas
) {
2619 for (i
= 0; i
< pcnt
; i
++) {
2620 if (ddi_peek8(NULL
, vaddr
, &b
) ==
2622 return (DDI_FAILURE
);
2623 vaddr
+= MMU_PAGESIZE
;
2630 cmn_err(CE_PANIC
, "rootnex_verify_buffer: bad DMA object");
2634 return (DDI_SUCCESS
);
2639 * rootnex_clean_dmahdl()
2640 * Clean the dma handle. This should be called on a handle alloc and an
2641 * unbind handle. Set the handle state to the default settings.
2644 rootnex_clean_dmahdl(ddi_dma_impl_t
*hp
)
2649 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
2652 dma
->dp_current_cookie
= 0;
2653 dma
->dp_copybuf_size
= 0;
2654 dma
->dp_window
= NULL
;
2655 dma
->dp_cbaddr
= NULL
;
2656 dma
->dp_inuse
= B_FALSE
;
2657 dma
->dp_dvma_used
= B_FALSE
;
2658 dma
->dp_need_to_free_cookie
= B_FALSE
;
2659 dma
->dp_need_to_switch_cookies
= B_FALSE
;
2660 dma
->dp_saved_cookies
= NULL
;
2661 dma
->dp_sleep_flags
= KM_PANIC
;
2662 dma
->dp_need_to_free_window
= B_FALSE
;
2663 dma
->dp_partial_required
= B_FALSE
;
2664 dma
->dp_trim_required
= B_FALSE
;
2665 dma
->dp_sglinfo
.si_copybuf_req
= 0;
2666 #if !defined(__amd64)
2667 dma
->dp_cb_remaping
= B_FALSE
;
2671 /* FMA related initialization */
2673 hp
->dmai_fault_check
= NULL
;
2674 hp
->dmai_fault_notify
= NULL
;
2675 hp
->dmai_error
.err_ena
= 0;
2676 hp
->dmai_error
.err_status
= DDI_FM_OK
;
2677 hp
->dmai_error
.err_expected
= DDI_FM_ERR_UNEXPECTED
;
2678 hp
->dmai_error
.err_ontrap
= NULL
;
2683 * rootnex_valid_alloc_parms()
2684 * Called in ddi_dma_alloc_handle path to validate its parameters.
2687 rootnex_valid_alloc_parms(ddi_dma_attr_t
*attr
, uint_t maxsegmentsize
)
2689 if ((attr
->dma_attr_seg
< MMU_PAGEOFFSET
) ||
2690 (attr
->dma_attr_count_max
< MMU_PAGEOFFSET
) ||
2691 (attr
->dma_attr_granular
> MMU_PAGESIZE
) ||
2692 (attr
->dma_attr_maxxfer
< MMU_PAGESIZE
)) {
2693 return (DDI_DMA_BADATTR
);
2696 if (attr
->dma_attr_addr_hi
<= attr
->dma_attr_addr_lo
) {
2697 return (DDI_DMA_BADATTR
);
2700 if ((attr
->dma_attr_seg
& MMU_PAGEOFFSET
) != MMU_PAGEOFFSET
||
2701 MMU_PAGESIZE
& (attr
->dma_attr_granular
- 1) ||
2702 attr
->dma_attr_sgllen
<= 0) {
2703 return (DDI_DMA_BADATTR
);
2706 /* We should be able to DMA into every byte offset in a page */
2707 if (maxsegmentsize
< MMU_PAGESIZE
) {
2708 return (DDI_DMA_BADATTR
);
2711 /* if we're bouncing on seg, seg must be <= addr_hi */
2712 if ((attr
->dma_attr_flags
& _DDI_DMA_BOUNCE_ON_SEG
) &&
2713 (attr
->dma_attr_seg
> attr
->dma_attr_addr_hi
)) {
2714 return (DDI_DMA_BADATTR
);
2716 return (DDI_SUCCESS
);
2720 * rootnex_valid_bind_parms()
2721 * Called in ddi_dma_*_bind_handle path to validate its parameters.
2725 rootnex_valid_bind_parms(ddi_dma_req_t
*dmareq
, ddi_dma_attr_t
*attr
)
2727 #if !defined(__amd64)
2729 * we only support up to a 2G-1 transfer size on 32-bit kernels so
2730 * we can track the offset for the obsoleted interfaces.
2732 if (dmareq
->dmar_object
.dmao_size
> 0x7FFFFFFF) {
2733 return (DDI_DMA_TOOBIG
);
2737 return (DDI_SUCCESS
);
2742 * rootnex_need_bounce_seg()
2743 * check to see if the buffer lives on both side of the seg.
2746 rootnex_need_bounce_seg(ddi_dma_obj_t
*dmar_object
, rootnex_sglinfo_t
*sglinfo
)
2748 ddi_dma_atyp_t buftype
;
2749 rootnex_addr_t raddr
;
2750 boolean_t lower_addr
;
2751 boolean_t upper_addr
;
2763 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2764 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2765 buftype
= dmar_object
->dmao_type
;
2766 size
= dmar_object
->dmao_size
;
2768 lower_addr
= B_FALSE
;
2769 upper_addr
= B_FALSE
;
2773 * Process the first page to handle the initial offset of the buffer.
2774 * We'll use the base address we get later when we loop through all
2777 if (buftype
== DMA_OTYP_PAGES
) {
2778 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2779 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2781 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2782 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2784 sglinfo
->si_asp
= NULL
;
2785 } else if (pplist
!= NULL
) {
2786 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2787 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2788 if (sglinfo
->si_asp
== NULL
) {
2789 sglinfo
->si_asp
= &kas
;
2791 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2793 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2796 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2797 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2798 if (sglinfo
->si_asp
== NULL
) {
2799 sglinfo
->si_asp
= &kas
;
2801 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2803 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2807 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2809 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2810 upper_addr
= B_TRUE
;
2812 lower_addr
= B_TRUE
;
2817 * Walk through the rest of the pages in the buffer. Track to see
2818 * if we have pages on both sides of the segment boundary.
2821 /* partial or full page */
2822 psize
= MIN(size
, MMU_PAGESIZE
);
2824 if (buftype
== DMA_OTYP_PAGES
) {
2825 /* get the paddr from the page_t */
2826 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2827 paddr
= pfn_to_pa(pp
->p_pagenum
);
2829 } else if (pplist
!= NULL
) {
2830 /* index into the array of page_t's to get the paddr */
2831 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2832 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2835 /* call into the VM to get the paddr */
2836 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
2841 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2843 if ((raddr
+ psize
) > sglinfo
->si_segmask
) {
2844 upper_addr
= B_TRUE
;
2846 lower_addr
= B_TRUE
;
2849 * if the buffer lives both above and below the segment
2850 * boundary, or the current page is the page immediately
2851 * after the segment, we will use a copy/bounce buffer for
2854 if ((lower_addr
&& upper_addr
) ||
2855 (raddr
== (sglinfo
->si_segmask
+ 1))) {
2867 * Called in bind fastpath to get the sgl. Most of this will be replaced
2868 * with a call to the vm layer when vm2.0 comes around...
2871 rootnex_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
2872 rootnex_sglinfo_t
*sglinfo
)
2874 ddi_dma_atyp_t buftype
;
2875 rootnex_addr_t raddr
;
2892 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
2893 vaddr
= dmar_object
->dmao_obj
.virt_obj
.v_addr
;
2894 maxseg
= sglinfo
->si_max_cookie_size
;
2895 buftype
= dmar_object
->dmao_type
;
2896 addrhi
= sglinfo
->si_max_addr
;
2897 addrlo
= sglinfo
->si_min_addr
;
2898 size
= dmar_object
->dmao_size
;
2905 * check to see if we need to use the copy buffer for pages over
2908 sglinfo
->si_bounce_on_seg
= B_FALSE
;
2909 if (sglinfo
->si_flags
& _DDI_DMA_BOUNCE_ON_SEG
) {
2910 sglinfo
->si_bounce_on_seg
= rootnex_need_bounce_seg(
2911 dmar_object
, sglinfo
);
2915 * if we were passed down a linked list of pages, i.e. pointer to
2916 * page_t, use this to get our physical address and buf offset.
2918 if (buftype
== DMA_OTYP_PAGES
) {
2919 pp
= dmar_object
->dmao_obj
.pp_obj
.pp_pp
;
2920 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
2921 offset
= dmar_object
->dmao_obj
.pp_obj
.pp_offset
&
2923 paddr
= pfn_to_pa(pp
->p_pagenum
) + offset
;
2924 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2926 sglinfo
->si_asp
= NULL
;
2929 * We weren't passed down a linked list of pages, but if we were passed
2930 * down an array of pages, use this to get our physical address and buf
2933 } else if (pplist
!= NULL
) {
2934 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2935 (buftype
== DMA_OTYP_BUFVADDR
));
2937 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2938 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2939 if (sglinfo
->si_asp
== NULL
) {
2940 sglinfo
->si_asp
= &kas
;
2943 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
2944 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
2946 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2950 * All we have is a virtual address, we'll need to call into the VM
2951 * to get the physical address.
2954 ASSERT((buftype
== DMA_OTYP_VADDR
) ||
2955 (buftype
== DMA_OTYP_BUFVADDR
));
2957 offset
= (uintptr_t)vaddr
& MMU_PAGEOFFSET
;
2958 sglinfo
->si_asp
= dmar_object
->dmao_obj
.virt_obj
.v_as
;
2959 if (sglinfo
->si_asp
== NULL
) {
2960 sglinfo
->si_asp
= &kas
;
2963 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
, vaddr
));
2965 psize
= MIN(size
, (MMU_PAGESIZE
- offset
));
2969 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
2972 * Setup the first cookie with the physical address of the page and the
2973 * size of the page (which takes into account the initial offset into
2976 sgl
[cnt
].dmac_laddress
= raddr
;
2977 sgl
[cnt
].dmac_size
= psize
;
2978 sgl
[cnt
].dmac_type
= 0;
2981 * Save away the buffer offset into the page. We'll need this later in
2982 * the copy buffer code to help figure out the page index within the
2983 * buffer and the offset into the current page.
2985 sglinfo
->si_buf_offset
= offset
;
2988 * If we are using the copy buffer for anything over the segment
2989 * boundary, and this page is over the segment boundary.
2991 * if the DMA engine can't reach the physical address.
2993 if (((sglinfo
->si_bounce_on_seg
) &&
2994 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
2995 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
2997 * Increase how much copy buffer we use. We always increase by
2998 * pagesize so we don't have to worry about converting offsets.
2999 * Set a flag in the cookies dmac_type to indicate that it uses
3000 * the copy buffer. If this isn't the last cookie, go to the
3001 * next cookie (since we separate each page which uses the copy
3002 * buffer in case the copy buffer is not physically contiguous.
3004 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3005 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3006 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3008 sgl
[cnt
].dmac_laddress
= 0;
3009 sgl
[cnt
].dmac_size
= 0;
3010 sgl
[cnt
].dmac_type
= 0;
3015 * save this page's physical address so we can figure out if the next
3016 * page is physically contiguous. Keep decrementing size until we are
3017 * done with the buffer.
3019 last_page
= raddr
& MMU_PAGEMASK
;
3023 /* Get the size for this page (i.e. partial or full page) */
3024 psize
= MIN(size
, MMU_PAGESIZE
);
3026 if (buftype
== DMA_OTYP_PAGES
) {
3027 /* get the paddr from the page_t */
3028 ASSERT(!PP_ISFREE(pp
) && PAGE_LOCKED(pp
));
3029 paddr
= pfn_to_pa(pp
->p_pagenum
);
3031 } else if (pplist
!= NULL
) {
3032 /* index into the array of page_t's to get the paddr */
3033 ASSERT(!PP_ISFREE(pplist
[pcnt
]));
3034 paddr
= pfn_to_pa(pplist
[pcnt
]->p_pagenum
);
3037 /* call into the VM to get the paddr */
3038 paddr
= pfn_to_pa(hat_getpfnum(sglinfo
->si_asp
->a_hat
,
3043 raddr
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3046 * If we are using the copy buffer for anything over the
3047 * segment boundary, and this page is over the segment
3050 * if the DMA engine can't reach the physical address.
3052 if (((sglinfo
->si_bounce_on_seg
) &&
3053 ((raddr
+ psize
) > sglinfo
->si_segmask
)) ||
3054 ((raddr
< addrlo
) || ((raddr
+ psize
) > addrhi
))) {
3056 sglinfo
->si_copybuf_req
+= MMU_PAGESIZE
;
3059 * if there is something in the current cookie, go to
3060 * the next one. We only want one page in a cookie which
3061 * uses the copybuf since the copybuf doesn't have to
3062 * be physically contiguous.
3064 if (sgl
[cnt
].dmac_size
!= 0) {
3067 sgl
[cnt
].dmac_laddress
= raddr
;
3068 sgl
[cnt
].dmac_size
= psize
;
3069 #if defined(__amd64)
3070 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
;
3073 * save the buf offset for 32-bit kernel. used in the
3074 * obsoleted interfaces.
3076 sgl
[cnt
].dmac_type
= ROOTNEX_USES_COPYBUF
|
3077 (dmar_object
->dmao_size
- size
);
3079 /* if this isn't the last cookie, go to the next one */
3080 if ((cnt
+ 1) < sglinfo
->si_max_pages
) {
3082 sgl
[cnt
].dmac_laddress
= 0;
3083 sgl
[cnt
].dmac_size
= 0;
3084 sgl
[cnt
].dmac_type
= 0;
3088 * this page didn't need the copy buffer, if it's not physically
3089 * contiguous, or it would put us over a segment boundary, or it
3090 * puts us over the max cookie size, or the current sgl doesn't
3091 * have anything in it.
3093 } else if (((last_page
+ MMU_PAGESIZE
) != raddr
) ||
3094 !(raddr
& sglinfo
->si_segmask
) ||
3095 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3096 (sgl
[cnt
].dmac_size
== 0)) {
3098 * if we're not already in a new cookie, go to the next
3101 if (sgl
[cnt
].dmac_size
!= 0) {
3105 /* save the cookie information */
3106 sgl
[cnt
].dmac_laddress
= raddr
;
3107 sgl
[cnt
].dmac_size
= psize
;
3108 #if defined(__amd64)
3109 sgl
[cnt
].dmac_type
= 0;
3112 * save the buf offset for 32-bit kernel. used in the
3113 * obsoleted interfaces.
3115 sgl
[cnt
].dmac_type
= dmar_object
->dmao_size
- size
;
3119 * this page didn't need the copy buffer, it is physically
3120 * contiguous with the last page, and it's <= the max cookie
3124 sgl
[cnt
].dmac_size
+= psize
;
3127 * if this exactly == the maximum cookie size, and
3128 * it isn't the last cookie, go to the next cookie.
3130 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3131 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3133 sgl
[cnt
].dmac_laddress
= 0;
3134 sgl
[cnt
].dmac_size
= 0;
3135 sgl
[cnt
].dmac_type
= 0;
3140 * save this page's physical address so we can figure out if the
3141 * next page is physically contiguous. Keep decrementing size
3142 * until we are done with the buffer.
3148 /* we're done, save away how many cookies the sgl has */
3149 if (sgl
[cnt
].dmac_size
== 0) {
3150 ASSERT(cnt
< sglinfo
->si_max_pages
);
3151 sglinfo
->si_sgl_size
= cnt
;
3153 sglinfo
->si_sgl_size
= cnt
+ 1;
3158 rootnex_dvma_get_sgl(ddi_dma_obj_t
*dmar_object
, ddi_dma_cookie_t
*sgl
,
3159 rootnex_sglinfo_t
*sglinfo
)
3164 struct dvmaseg
*dvs
;
3166 uint32_t psize
, ssize
;
3171 ASSERT(dmar_object
->dmao_type
== DMA_OTYP_DVADDR
);
3174 maxseg
= sglinfo
->si_max_cookie_size
;
3175 size
= dmar_object
->dmao_size
;
3178 sglinfo
->si_bounce_on_seg
= B_FALSE
;
3180 dvs
= dmar_object
->dmao_obj
.dvma_obj
.dv_seg
;
3181 offset
= dmar_object
->dmao_obj
.dvma_obj
.dv_off
;
3182 ssize
= dvs
->dvs_len
;
3183 paddr
= dvs
->dvs_start
;
3185 psize
= MIN(ssize
, (maxseg
- offset
));
3186 dvaddr
= paddr
+ psize
;
3189 sgl
[cnt
].dmac_laddress
= paddr
;
3190 sgl
[cnt
].dmac_size
= psize
;
3191 sgl
[cnt
].dmac_type
= 0;
3197 ssize
= dvs
->dvs_len
;
3198 dvaddr
= dvs
->dvs_start
;
3204 psize
= MIN(ssize
, maxseg
);
3208 if (!physcontig
|| !(paddr
& sglinfo
->si_segmask
) ||
3209 ((sgl
[cnt
].dmac_size
+ psize
) > maxseg
) ||
3210 (sgl
[cnt
].dmac_size
== 0)) {
3212 * if we're not already in a new cookie, go to the next
3215 if (sgl
[cnt
].dmac_size
!= 0) {
3219 /* save the cookie information */
3220 sgl
[cnt
].dmac_laddress
= paddr
;
3221 sgl
[cnt
].dmac_size
= psize
;
3222 sgl
[cnt
].dmac_type
= 0;
3224 sgl
[cnt
].dmac_size
+= psize
;
3227 * if this exactly == the maximum cookie size, and
3228 * it isn't the last cookie, go to the next cookie.
3230 if (((sgl
[cnt
].dmac_size
+ psize
) == maxseg
) &&
3231 ((cnt
+ 1) < sglinfo
->si_max_pages
)) {
3233 sgl
[cnt
].dmac_laddress
= 0;
3234 sgl
[cnt
].dmac_size
= 0;
3235 sgl
[cnt
].dmac_type
= 0;
3241 /* we're done, save away how many cookies the sgl has */
3242 if (sgl
[cnt
].dmac_size
== 0) {
3243 sglinfo
->si_sgl_size
= cnt
;
3245 sglinfo
->si_sgl_size
= cnt
+ 1;
3250 * rootnex_bind_slowpath()
3251 * Call in the bind path if the calling driver can't use the sgl without
3252 * modifying it. We either need to use the copy buffer and/or we will end up
3253 * with a partial bind.
3256 rootnex_bind_slowpath(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3257 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3259 rootnex_sglinfo_t
*sinfo
;
3260 rootnex_window_t
*window
;
3261 ddi_dma_cookie_t
*cookie
;
3262 size_t copybuf_used
;
3272 sinfo
= &dma
->dp_sglinfo
;
3277 * If we're using the copybuf, set the copybuf state in dma struct.
3278 * Needs to be first since it sets the copy buffer size.
3280 if (sinfo
->si_copybuf_req
!= 0) {
3281 e
= rootnex_setup_copybuf(hp
, dmareq
, dma
, attr
);
3282 if (e
!= DDI_SUCCESS
) {
3286 dma
->dp_copybuf_size
= 0;
3290 * Figure out if we need to do a partial mapping. If so, figure out
3291 * if we need to trim the buffers when we munge the sgl.
3293 if ((dma
->dp_copybuf_size
< sinfo
->si_copybuf_req
) ||
3294 (dmao
->dmao_size
> dma
->dp_maxxfer
) ||
3295 (attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
)) {
3296 dma
->dp_partial_required
= B_TRUE
;
3297 if (attr
->dma_attr_granular
!= 1) {
3298 dma
->dp_trim_required
= B_TRUE
;
3301 dma
->dp_partial_required
= B_FALSE
;
3302 dma
->dp_trim_required
= B_FALSE
;
3305 /* If we need to do a partial bind, make sure the driver supports it */
3306 if (dma
->dp_partial_required
&&
3307 !(dmareq
->dmar_flags
& DDI_DMA_PARTIAL
)) {
3309 mnum
= ddi_driver_major(dma
->dp_dip
);
3311 * patchable which allows us to print one warning per major
3314 if ((rootnex_bind_warn
) &&
3315 ((rootnex_warn_list
[mnum
] & ROOTNEX_BIND_WARNING
) == 0)) {
3316 rootnex_warn_list
[mnum
] |= ROOTNEX_BIND_WARNING
;
3317 cmn_err(CE_WARN
, "!%s: coding error detected, the "
3318 "driver is using ddi_dma_attr(9S) incorrectly. "
3319 "There is a small risk of data corruption in "
3320 "particular with large I/Os. The driver should be "
3321 "replaced with a corrected version for proper "
3322 "system operation. To disable this warning, add "
3323 "'set rootnex:rootnex_bind_warn=0' to "
3324 "/etc/system(4).", ddi_driver_name(dma
->dp_dip
));
3326 return (DDI_DMA_TOOBIG
);
3330 * we might need multiple windows, setup state to handle them. In this
3331 * code path, we will have at least one window.
3333 e
= rootnex_setup_windows(hp
, dma
, attr
, dmao
, kmflag
);
3334 if (e
!= DDI_SUCCESS
) {
3335 rootnex_teardown_copybuf(dma
);
3339 window
= &dma
->dp_window
[0];
3340 cookie
= &dma
->dp_cookies
[0];
3342 rootnex_init_win(hp
, dma
, window
, cookie
, cur_offset
);
3343 if (dmao
->dmao_type
== DMA_OTYP_PAGES
) {
3344 cur_pp
= dmareq
->dmar_object
.dmao_obj
.pp_obj
.pp_pp
;
3347 /* loop though all the cookies we got back from get_sgl() */
3348 for (i
= 0; i
< sinfo
->si_sgl_size
; i
++) {
3350 * If we're using the copy buffer, check this cookie and setup
3351 * its associated copy buffer state. If this cookie uses the
3352 * copy buffer, make sure we sync this window during dma_sync.
3354 if (dma
->dp_copybuf_size
> 0) {
3355 rootnex_setup_cookie(dmao
, dma
, cookie
,
3356 cur_offset
, ©buf_used
, &cur_pp
);
3357 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3358 window
->wd_dosync
= B_TRUE
;
3363 * save away the cookie size, since it could be modified in
3364 * the windowing code.
3366 dmac_size
= cookie
->dmac_size
;
3368 /* if we went over max copybuf size */
3369 if (dma
->dp_copybuf_size
&&
3370 (copybuf_used
> dma
->dp_copybuf_size
)) {
3372 e
= rootnex_copybuf_window_boundary(hp
, dma
, &window
,
3373 cookie
, cur_offset
, ©buf_used
);
3374 if (e
!= DDI_SUCCESS
) {
3375 rootnex_teardown_copybuf(dma
);
3376 rootnex_teardown_windows(dma
);
3381 * if the coookie uses the copy buffer, make sure the
3382 * new window we just moved to is set to sync.
3384 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3385 window
->wd_dosync
= B_TRUE
;
3387 ROOTNEX_DPROBE1(rootnex__copybuf__window
, dev_info_t
*,
3390 /* if the cookie cnt == max sgllen, move to the next window */
3391 } else if (window
->wd_cookie_cnt
>= attr
->dma_attr_sgllen
) {
3393 ASSERT(window
->wd_cookie_cnt
== attr
->dma_attr_sgllen
);
3394 e
= rootnex_sgllen_window_boundary(hp
, dma
, &window
,
3395 cookie
, attr
, cur_offset
);
3396 if (e
!= DDI_SUCCESS
) {
3397 rootnex_teardown_copybuf(dma
);
3398 rootnex_teardown_windows(dma
);
3403 * if the coookie uses the copy buffer, make sure the
3404 * new window we just moved to is set to sync.
3406 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3407 window
->wd_dosync
= B_TRUE
;
3409 ROOTNEX_DPROBE1(rootnex__sgllen__window
, dev_info_t
*,
3412 /* else if we will be over maxxfer */
3413 } else if ((window
->wd_size
+ dmac_size
) >
3416 e
= rootnex_maxxfer_window_boundary(hp
, dma
, &window
,
3418 if (e
!= DDI_SUCCESS
) {
3419 rootnex_teardown_copybuf(dma
);
3420 rootnex_teardown_windows(dma
);
3425 * if the coookie uses the copy buffer, make sure the
3426 * new window we just moved to is set to sync.
3428 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3429 window
->wd_dosync
= B_TRUE
;
3431 ROOTNEX_DPROBE1(rootnex__maxxfer__window
, dev_info_t
*,
3434 /* else this cookie fits in the current window */
3436 window
->wd_cookie_cnt
++;
3437 window
->wd_size
+= dmac_size
;
3440 /* track our offset into the buffer, go to the next cookie */
3441 ASSERT(dmac_size
<= dmao
->dmao_size
);
3442 ASSERT(cookie
->dmac_size
<= dmac_size
);
3443 cur_offset
+= dmac_size
;
3447 /* if we ended up with a zero sized window in the end, clean it up */
3448 if (window
->wd_size
== 0) {
3453 ASSERT(window
->wd_trim
.tr_trim_last
== B_FALSE
);
3456 return (DDI_DMA_MAPPED
);
3459 ASSERT(dma
->dp_partial_required
);
3460 return (DDI_DMA_PARTIAL_MAP
);
3464 * rootnex_setup_copybuf()
3465 * Called in bind slowpath. Figures out if we're going to use the copy
3466 * buffer, and if we do, sets up the basic state to handle it.
3469 rootnex_setup_copybuf(ddi_dma_impl_t
*hp
, struct ddi_dma_req
*dmareq
,
3470 rootnex_dma_t
*dma
, ddi_dma_attr_t
*attr
)
3472 rootnex_sglinfo_t
*sinfo
;
3473 ddi_dma_attr_t lattr
;
3477 #if !defined(__amd64)
3481 ASSERT(!dma
->dp_dvma_used
);
3483 sinfo
= &dma
->dp_sglinfo
;
3485 /* read this first so it's consistent through the routine */
3486 max_copybuf
= i_ddi_copybuf_size() & MMU_PAGEMASK
;
3488 /* We need to call into the rootnex on ddi_dma_sync() */
3489 hp
->dmai_rflags
&= ~DMP_NOSYNC
;
3491 /* make sure the copybuf size <= the max size */
3492 dma
->dp_copybuf_size
= MIN(sinfo
->si_copybuf_req
, max_copybuf
);
3493 ASSERT((dma
->dp_copybuf_size
& MMU_PAGEOFFSET
) == 0);
3495 #if !defined(__amd64)
3497 * if we don't have kva space to copy to/from, allocate the KVA space
3498 * now. We only do this for the 32-bit kernel. We use seg kpm space for
3499 * the 64-bit kernel.
3501 if ((dmareq
->dmar_object
.dmao_type
== DMA_OTYP_PAGES
) ||
3502 (dmareq
->dmar_object
.dmao_obj
.virt_obj
.v_as
!= NULL
)) {
3504 /* convert the sleep flags */
3505 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3508 vmflag
= VM_NOSLEEP
;
3511 /* allocate Kernel VA space that we can bcopy to/from */
3512 dma
->dp_kva
= vmem_alloc(heap_arena
, dma
->dp_copybuf_size
,
3514 if (dma
->dp_kva
== NULL
) {
3515 return (DDI_DMA_NORESOURCES
);
3520 /* convert the sleep flags */
3521 if (dmareq
->dmar_fp
== DDI_DMA_SLEEP
) {
3528 * Allocate the actual copy buffer. This needs to fit within the DMA
3529 * engine limits, so we can't use kmem_alloc... We don't need
3530 * contiguous memory (sgllen) since we will be forcing windows on
3534 lattr
.dma_attr_align
= MMU_PAGESIZE
;
3536 * this should be < 0 to indicate no limit, but due to a bug in
3537 * the rootnex, we'll set it to the maximum positive int.
3539 lattr
.dma_attr_sgllen
= 0x7fffffff;
3541 * if we're using the copy buffer because of seg, use that for our
3542 * upper address limit.
3544 if (sinfo
->si_bounce_on_seg
) {
3545 lattr
.dma_attr_addr_hi
= lattr
.dma_attr_seg
;
3547 e
= i_ddi_mem_alloc(dma
->dp_dip
, &lattr
, dma
->dp_copybuf_size
, cansleep
,
3548 0, NULL
, &dma
->dp_cbaddr
, &dma
->dp_cbsize
, NULL
);
3549 if (e
!= DDI_SUCCESS
) {
3550 #if !defined(__amd64)
3551 if (dma
->dp_kva
!= NULL
) {
3552 vmem_free(heap_arena
, dma
->dp_kva
,
3553 dma
->dp_copybuf_size
);
3556 return (DDI_DMA_NORESOURCES
);
3559 ROOTNEX_DPROBE2(rootnex__alloc__copybuf
, dev_info_t
*, dma
->dp_dip
,
3560 size_t, dma
->dp_copybuf_size
);
3562 return (DDI_SUCCESS
);
3567 * rootnex_setup_windows()
3568 * Called in bind slowpath to setup the window state. We always have windows
3569 * in the slowpath. Even if the window count = 1.
3572 rootnex_setup_windows(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3573 ddi_dma_attr_t
*attr
, ddi_dma_obj_t
*dmao
, int kmflag
)
3575 rootnex_window_t
*windowp
;
3576 rootnex_sglinfo_t
*sinfo
;
3577 size_t copy_state_size
;
3578 size_t win_state_size
;
3579 size_t state_available
;
3580 size_t space_needed
;
3587 sinfo
= &dma
->dp_sglinfo
;
3589 dma
->dp_current_win
= 0;
3592 /* If we don't need to do a partial, we only have one window */
3593 if (!dma
->dp_partial_required
) {
3594 dma
->dp_max_win
= 1;
3597 * we need multiple windows, need to figure out the worse case number
3602 * if we need windows because we need more copy buffer that
3603 * we allow, the worse case number of windows we could need
3604 * here would be (copybuf space required / copybuf space that
3605 * we have) plus one for remainder, and plus 2 to handle the
3606 * extra pages on the trim for the first and last pages of the
3607 * buffer (a page is the minimum window size so under the right
3608 * attr settings, you could have a window for each page).
3609 * The last page will only be hit here if the size is not a
3610 * multiple of the granularity (which theoretically shouldn't
3611 * be the case but never has been enforced, so we could have
3612 * broken things without it).
3614 if (sinfo
->si_copybuf_req
> dma
->dp_copybuf_size
) {
3615 ASSERT(dma
->dp_copybuf_size
> 0);
3616 copybuf_win
= (sinfo
->si_copybuf_req
/
3617 dma
->dp_copybuf_size
) + 1 + 2;
3623 * if we need windows because we have more cookies than the H/W
3624 * can handle, the number of windows we would need here would
3625 * be (cookie count / cookies count H/W supports minus 1[for
3626 * trim]) plus one for remainder.
3628 if (attr
->dma_attr_sgllen
< sinfo
->si_sgl_size
) {
3629 sglwin
= (sinfo
->si_sgl_size
/
3630 (attr
->dma_attr_sgllen
- 1)) + 1;
3636 * if we need windows because we're binding more memory than the
3637 * H/W can transfer at once, the number of windows we would need
3638 * here would be (xfer count / max xfer H/W supports) plus one
3639 * for remainder, and plus 2 to handle the extra pages on the
3640 * trim (see above comment about trim)
3642 if (dmao
->dmao_size
> dma
->dp_maxxfer
) {
3643 maxxfer_win
= (dmao
->dmao_size
/
3644 dma
->dp_maxxfer
) + 1 + 2;
3648 dma
->dp_max_win
= copybuf_win
+ sglwin
+ maxxfer_win
;
3649 ASSERT(dma
->dp_max_win
> 0);
3651 win_state_size
= dma
->dp_max_win
* sizeof (rootnex_window_t
);
3654 * Get space for window and potential copy buffer state. Before we
3655 * go and allocate memory, see if we can get away with using what's
3656 * left in the pre-allocted state or the dynamically allocated sgl.
3658 space_used
= (uintptr_t)(sinfo
->si_sgl_size
*
3659 sizeof (ddi_dma_cookie_t
));
3661 /* if we dynamically allocated space for the cookies */
3662 if (dma
->dp_need_to_free_cookie
) {
3663 /* if we have more space in the pre-allocted buffer, use it */
3664 ASSERT(space_used
<= dma
->dp_cookie_size
);
3665 if ((dma
->dp_cookie_size
- space_used
) <=
3666 rootnex_state
->r_prealloc_size
) {
3667 state_available
= rootnex_state
->r_prealloc_size
;
3668 windowp
= (rootnex_window_t
*)dma
->dp_prealloc_buffer
;
3671 * else, we have more free space in the dynamically allocated
3672 * buffer, i.e. the buffer wasn't worse case fragmented so we
3673 * didn't need a lot of cookies.
3676 state_available
= dma
->dp_cookie_size
- space_used
;
3677 windowp
= (rootnex_window_t
*)
3678 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3681 /* we used the pre-alloced buffer */
3683 ASSERT(space_used
<= rootnex_state
->r_prealloc_size
);
3684 state_available
= rootnex_state
->r_prealloc_size
- space_used
;
3685 windowp
= (rootnex_window_t
*)
3686 &dma
->dp_cookies
[sinfo
->si_sgl_size
];
3690 * figure out how much state we need to track the copy buffer. Add an
3691 * addition 8 bytes for pointer alignemnt later.
3693 if (dma
->dp_copybuf_size
> 0) {
3694 copy_state_size
= sinfo
->si_max_pages
*
3695 sizeof (rootnex_pgmap_t
);
3697 copy_state_size
= 0;
3699 /* add an additional 8 bytes for pointer alignment */
3700 space_needed
= win_state_size
+ copy_state_size
+ 0x8;
3702 /* if we have enough space already, use it */
3703 if (state_available
>= space_needed
) {
3704 dma
->dp_window
= windowp
;
3705 dma
->dp_need_to_free_window
= B_FALSE
;
3707 /* not enough space, need to allocate more. */
3709 dma
->dp_window
= kmem_alloc(space_needed
, kmflag
);
3710 if (dma
->dp_window
== NULL
) {
3711 return (DDI_DMA_NORESOURCES
);
3713 dma
->dp_need_to_free_window
= B_TRUE
;
3714 dma
->dp_window_size
= space_needed
;
3715 ROOTNEX_DPROBE2(rootnex__bind__sp__alloc
, dev_info_t
*,
3716 dma
->dp_dip
, size_t, space_needed
);
3720 * we allocate copy buffer state and window state at the same time.
3721 * setup our copy buffer state pointers. Make sure it's aligned.
3723 if (dma
->dp_copybuf_size
> 0) {
3724 dma
->dp_pgmap
= (rootnex_pgmap_t
*)(((uintptr_t)
3725 &dma
->dp_window
[dma
->dp_max_win
] + 0x7) & ~0x7);
3727 #if !defined(__amd64)
3729 * make sure all pm_mapped, pm_vaddr, and pm_pp are set to
3730 * false/NULL. Should be quicker to bzero vs loop and set.
3732 bzero(dma
->dp_pgmap
, copy_state_size
);
3735 dma
->dp_pgmap
= NULL
;
3738 return (DDI_SUCCESS
);
3743 * rootnex_teardown_copybuf()
3744 * cleans up after rootnex_setup_copybuf()
3747 rootnex_teardown_copybuf(rootnex_dma_t
*dma
)
3749 #if !defined(__amd64)
3753 * if we allocated kernel heap VMEM space, go through all the pages and
3754 * map out any of the ones that we're mapped into the kernel heap VMEM
3755 * arena. Then free the VMEM space.
3757 if (dma
->dp_kva
!= NULL
) {
3758 for (i
= 0; i
< dma
->dp_sglinfo
.si_max_pages
; i
++) {
3759 if (dma
->dp_pgmap
[i
].pm_mapped
) {
3760 hat_unload(kas
.a_hat
, dma
->dp_pgmap
[i
].pm_kaddr
,
3761 MMU_PAGESIZE
, HAT_UNLOAD
);
3762 dma
->dp_pgmap
[i
].pm_mapped
= B_FALSE
;
3766 vmem_free(heap_arena
, dma
->dp_kva
, dma
->dp_copybuf_size
);
3771 /* if we allocated a copy buffer, free it */
3772 if (dma
->dp_cbaddr
!= NULL
) {
3773 i_ddi_mem_free(dma
->dp_cbaddr
, NULL
);
3779 * rootnex_teardown_windows()
3780 * cleans up after rootnex_setup_windows()
3783 rootnex_teardown_windows(rootnex_dma_t
*dma
)
3786 * if we had to allocate window state on the last bind (because we
3787 * didn't have enough pre-allocated space in the handle), free it.
3789 if (dma
->dp_need_to_free_window
) {
3790 kmem_free(dma
->dp_window
, dma
->dp_window_size
);
3796 * rootnex_init_win()
3797 * Called in bind slow path during creation of a new window. Initializes
3798 * window state to default values.
3802 rootnex_init_win(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
3803 rootnex_window_t
*window
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
)
3806 window
->wd_dosync
= B_FALSE
;
3807 window
->wd_offset
= cur_offset
;
3808 window
->wd_size
= 0;
3809 window
->wd_first_cookie
= cookie
;
3810 window
->wd_cookie_cnt
= 0;
3811 window
->wd_trim
.tr_trim_first
= B_FALSE
;
3812 window
->wd_trim
.tr_trim_last
= B_FALSE
;
3813 window
->wd_trim
.tr_first_copybuf_win
= B_FALSE
;
3814 window
->wd_trim
.tr_last_copybuf_win
= B_FALSE
;
3815 #if !defined(__amd64)
3816 window
->wd_remap_copybuf
= dma
->dp_cb_remaping
;
3822 * rootnex_setup_cookie()
3823 * Called in the bind slow path when the sgl uses the copy buffer. If any of
3824 * the sgl uses the copy buffer, we need to go through each cookie, figure
3825 * out if it uses the copy buffer, and if it does, save away everything we'll
3829 rootnex_setup_cookie(ddi_dma_obj_t
*dmar_object
, rootnex_dma_t
*dma
,
3830 ddi_dma_cookie_t
*cookie
, off_t cur_offset
, size_t *copybuf_used
,
3833 boolean_t copybuf_sz_power_2
;
3834 rootnex_sglinfo_t
*sinfo
;
3839 #if defined(__amd64)
3845 ASSERT(dmar_object
->dmao_type
!= DMA_OTYP_DVADDR
);
3847 sinfo
= &dma
->dp_sglinfo
;
3850 * Calculate the page index relative to the start of the buffer. The
3851 * index to the current page for our buffer is the offset into the
3852 * first page of the buffer plus our current offset into the buffer
3853 * itself, shifted of course...
3855 pidx
= (sinfo
->si_buf_offset
+ cur_offset
) >> MMU_PAGESHIFT
;
3856 ASSERT(pidx
< sinfo
->si_max_pages
);
3858 /* if this cookie uses the copy buffer */
3859 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
3861 * NOTE: we know that since this cookie uses the copy buffer, it
3862 * is <= MMU_PAGESIZE.
3866 * get the offset into the page. For the 64-bit kernel, get the
3867 * pfn which we'll use with seg kpm.
3869 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
3870 #if defined(__amd64)
3871 /* mfn_to_pfn() is a NOP on i86pc */
3872 pfn
= mfn_to_pfn(cookie
->dmac_laddress
>> MMU_PAGESHIFT
);
3873 #endif /* __amd64 */
3875 /* figure out if the copybuf size is a power of 2 */
3876 if (dma
->dp_copybuf_size
& (dma
->dp_copybuf_size
- 1)) {
3877 copybuf_sz_power_2
= B_FALSE
;
3879 copybuf_sz_power_2
= B_TRUE
;
3882 /* This page uses the copy buffer */
3883 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_TRUE
;
3886 * save the copy buffer KVA that we'll use with this page.
3887 * if we still fit within the copybuf, it's a simple add.
3888 * otherwise, we need to wrap over using & or % accordingly.
3890 if ((*copybuf_used
+ MMU_PAGESIZE
) <= dma
->dp_copybuf_size
) {
3891 dma
->dp_pgmap
[pidx
].pm_cbaddr
= dma
->dp_cbaddr
+
3894 if (copybuf_sz_power_2
) {
3895 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3896 (uintptr_t)dma
->dp_cbaddr
+
3898 (dma
->dp_copybuf_size
- 1)));
3900 dma
->dp_pgmap
[pidx
].pm_cbaddr
= (caddr_t
)(
3901 (uintptr_t)dma
->dp_cbaddr
+
3902 (*copybuf_used
% dma
->dp_copybuf_size
));
3907 * over write the cookie physical address with the address of
3908 * the physical address of the copy buffer page that we will
3911 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
3912 dma
->dp_pgmap
[pidx
].pm_cbaddr
)) + poff
;
3914 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
3916 /* if we have a kernel VA, it's easy, just save that address */
3917 if ((dmar_object
->dmao_type
!= DMA_OTYP_PAGES
) &&
3918 (sinfo
->si_asp
== &kas
)) {
3920 * save away the page aligned virtual address of the
3921 * driver buffer. Offsets are handled in the sync code.
3923 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)(((uintptr_t)
3924 dmar_object
->dmao_obj
.virt_obj
.v_addr
+ cur_offset
)
3926 #if !defined(__amd64)
3928 * we didn't need to, and will never need to map this
3931 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
3934 /* we don't have a kernel VA. We need one for the bcopy. */
3936 #if defined(__amd64)
3938 * for the 64-bit kernel, it's easy. We use seg kpm to
3939 * get a Kernel VA for the corresponding pfn.
3941 dma
->dp_pgmap
[pidx
].pm_kaddr
= hat_kpm_pfn2va(pfn
);
3944 * for the 32-bit kernel, this is a pain. First we'll
3945 * save away the page_t or user VA for this page. This
3946 * is needed in rootnex_dma_win() when we switch to a
3947 * new window which requires us to re-map the copy
3950 pplist
= dmar_object
->dmao_obj
.virt_obj
.v_priv
;
3951 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
3952 dma
->dp_pgmap
[pidx
].pm_pp
= *cur_pp
;
3953 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3954 } else if (pplist
!= NULL
) {
3955 dma
->dp_pgmap
[pidx
].pm_pp
= pplist
[pidx
];
3956 dma
->dp_pgmap
[pidx
].pm_vaddr
= NULL
;
3958 dma
->dp_pgmap
[pidx
].pm_pp
= NULL
;
3959 dma
->dp_pgmap
[pidx
].pm_vaddr
= (caddr_t
)
3961 dmar_object
->dmao_obj
.virt_obj
.v_addr
+
3962 cur_offset
) & MMU_PAGEMASK
);
3966 * save away the page aligned virtual address which was
3967 * allocated from the kernel heap arena (taking into
3968 * account if we need more copy buffer than we alloced
3969 * and use multiple windows to handle this, i.e. &,%).
3970 * NOTE: there isn't and physical memory backing up this
3971 * virtual address space currently.
3973 if ((*copybuf_used
+ MMU_PAGESIZE
) <=
3974 dma
->dp_copybuf_size
) {
3975 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3976 (((uintptr_t)dma
->dp_kva
+ *copybuf_used
) &
3979 if (copybuf_sz_power_2
) {
3980 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3981 (((uintptr_t)dma
->dp_kva
+
3983 (dma
->dp_copybuf_size
- 1))) &
3986 dma
->dp_pgmap
[pidx
].pm_kaddr
= (caddr_t
)
3987 (((uintptr_t)dma
->dp_kva
+
3989 dma
->dp_copybuf_size
)) &
3995 * if we haven't used up the available copy buffer yet,
3996 * map the kva to the physical page.
3998 if (!dma
->dp_cb_remaping
&& ((*copybuf_used
+
3999 MMU_PAGESIZE
) <= dma
->dp_copybuf_size
)) {
4000 dma
->dp_pgmap
[pidx
].pm_mapped
= B_TRUE
;
4001 if (dma
->dp_pgmap
[pidx
].pm_pp
!= NULL
) {
4002 i86_pp_map(dma
->dp_pgmap
[pidx
].pm_pp
,
4003 dma
->dp_pgmap
[pidx
].pm_kaddr
);
4005 i86_va_map(dma
->dp_pgmap
[pidx
].pm_vaddr
,
4007 dma
->dp_pgmap
[pidx
].pm_kaddr
);
4011 * we've used up the available copy buffer, this page
4012 * will have to be mapped during rootnex_dma_win() when
4013 * we switch to a new window which requires a re-map
4014 * the copy buffer. (32-bit kernel only)
4017 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4020 /* go to the next page_t */
4021 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4022 *cur_pp
= (*cur_pp
)->p_next
;
4026 /* add to the copy buffer count */
4027 *copybuf_used
+= MMU_PAGESIZE
;
4030 * This cookie doesn't use the copy buffer. Walk through the pages this
4031 * cookie occupies to reflect this.
4035 * figure out how many pages the cookie occupies. We need to
4036 * use the original page offset of the buffer and the cookies
4037 * offset in the buffer to do this.
4039 poff
= (sinfo
->si_buf_offset
+ cur_offset
) & MMU_PAGEOFFSET
;
4040 pcnt
= mmu_btopr(cookie
->dmac_size
+ poff
);
4043 #if !defined(__amd64)
4045 * the 32-bit kernel doesn't have seg kpm, so we need
4046 * to map in the driver buffer (if it didn't come down
4047 * with a kernel VA) on the fly. Since this page doesn't
4048 * use the copy buffer, it's not, or will it ever, have
4051 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4053 dma
->dp_pgmap
[pidx
].pm_uses_copybuf
= B_FALSE
;
4056 * we need to update pidx and cur_pp or we'll loose
4057 * track of where we are.
4059 if (dmar_object
->dmao_type
== DMA_OTYP_PAGES
) {
4060 *cur_pp
= (*cur_pp
)->p_next
;
4070 * rootnex_sgllen_window_boundary()
4071 * Called in the bind slow path when the next cookie causes us to exceed (in
4072 * this case == since we start at 0 and sgllen starts at 1) the maximum sgl
4073 * length supported by the DMA H/W.
4076 rootnex_sgllen_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4077 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, ddi_dma_attr_t
*attr
,
4086 * if we know we'll never have to trim, it's pretty easy. Just move to
4087 * the next window and init it. We're done.
4089 if (!dma
->dp_trim_required
) {
4091 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4092 (*windowp
)->wd_cookie_cnt
++;
4093 (*windowp
)->wd_size
= cookie
->dmac_size
;
4094 return (DDI_SUCCESS
);
4097 /* figure out how much we need to trim from the window */
4098 ASSERT(attr
->dma_attr_granular
!= 0);
4099 if (dma
->dp_granularity_power_2
) {
4100 trim_sz
= (*windowp
)->wd_size
& (attr
->dma_attr_granular
- 1);
4102 trim_sz
= (*windowp
)->wd_size
% attr
->dma_attr_granular
;
4105 /* The window's a whole multiple of granularity. We're done */
4108 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4109 (*windowp
)->wd_cookie_cnt
++;
4110 (*windowp
)->wd_size
= cookie
->dmac_size
;
4111 return (DDI_SUCCESS
);
4115 * The window's not a whole multiple of granularity, since we know this
4116 * is due to the sgllen, we need to go back to the last cookie and trim
4117 * that one, add the left over part of the old cookie into the new
4118 * window, and then add in the new cookie into the new window.
4122 * make sure the driver isn't making us do something bad... Trimming and
4123 * sgllen == 1 don't go together.
4125 if (attr
->dma_attr_sgllen
== 1) {
4126 return (DDI_DMA_NOMAPPING
);
4130 * first, setup the current window to account for the trim. Need to go
4131 * back to the last cookie for this.
4134 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4135 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4136 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4137 ASSERT(cookie
->dmac_size
> trim_sz
);
4138 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4139 (*windowp
)->wd_size
-= trim_sz
;
4141 /* save the buffer offsets for the next window */
4142 coffset
= cookie
->dmac_size
- trim_sz
;
4143 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4146 * set this now in case this is the first window. all other cases are
4149 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4152 * initialize the next window using what's left over in the previous
4156 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4157 (*windowp
)->wd_cookie_cnt
++;
4158 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4159 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4160 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4161 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4162 (*windowp
)->wd_dosync
= B_TRUE
;
4166 * now go back to the current cookie and add it to the new window. set
4167 * the new window size to the what was left over from the previous
4168 * cookie and what's in the current cookie.
4171 (*windowp
)->wd_cookie_cnt
++;
4172 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4175 * trim plus the next cookie could put us over maxxfer (a cookie can be
4176 * a max size of maxxfer). Handle that case.
4178 if ((*windowp
)->wd_size
> dma
->dp_maxxfer
) {
4180 * maxxfer is already a whole multiple of granularity, and this
4181 * trim will be <= the previous trim (since a cookie can't be
4182 * larger than maxxfer). Make things simple here.
4184 trim_sz
= (*windowp
)->wd_size
- dma
->dp_maxxfer
;
4185 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4186 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4187 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4188 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4189 (*windowp
)->wd_size
-= trim_sz
;
4190 ASSERT((*windowp
)->wd_size
== dma
->dp_maxxfer
);
4192 /* save the buffer offsets for the next window */
4193 coffset
= cookie
->dmac_size
- trim_sz
;
4194 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4196 /* setup the next window */
4198 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4199 (*windowp
)->wd_cookie_cnt
++;
4200 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4201 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4203 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4206 return (DDI_SUCCESS
);
4211 * rootnex_copybuf_window_boundary()
4212 * Called in bind slowpath when we get to a window boundary because we used
4213 * up all the copy buffer that we have.
4216 rootnex_copybuf_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4217 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
, off_t cur_offset
,
4218 size_t *copybuf_used
)
4220 rootnex_sglinfo_t
*sinfo
;
4229 sinfo
= &dma
->dp_sglinfo
;
4232 * the copy buffer should be a whole multiple of page size. We know that
4233 * this cookie is <= MMU_PAGESIZE.
4235 ASSERT(cookie
->dmac_size
<= MMU_PAGESIZE
);
4238 * from now on, all new windows in this bind need to be re-mapped during
4239 * ddi_dma_getwin() (32-bit kernel only). i.e. we ran out out copybuf
4242 #if !defined(__amd64)
4243 dma
->dp_cb_remaping
= B_TRUE
;
4246 /* reset copybuf used */
4250 * if we don't have to trim (since granularity is set to 1), go to the
4251 * next window and add the current cookie to it. We know the current
4252 * cookie uses the copy buffer since we're in this code path.
4254 if (!dma
->dp_trim_required
) {
4256 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4258 /* Add this cookie to the new window */
4259 (*windowp
)->wd_cookie_cnt
++;
4260 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4261 *copybuf_used
+= MMU_PAGESIZE
;
4262 return (DDI_SUCCESS
);
4266 * *** may need to trim, figure it out.
4269 /* figure out how much we need to trim from the window */
4270 if (dma
->dp_granularity_power_2
) {
4271 trim_sz
= (*windowp
)->wd_size
&
4272 (hp
->dmai_attr
.dma_attr_granular
- 1);
4274 trim_sz
= (*windowp
)->wd_size
% hp
->dmai_attr
.dma_attr_granular
;
4278 * if the window's a whole multiple of granularity, go to the next
4279 * window, init it, then add in the current cookie. We know the current
4280 * cookie uses the copy buffer since we're in this code path.
4284 rootnex_init_win(hp
, dma
, *windowp
, cookie
, cur_offset
);
4286 /* Add this cookie to the new window */
4287 (*windowp
)->wd_cookie_cnt
++;
4288 (*windowp
)->wd_size
+= cookie
->dmac_size
;
4289 *copybuf_used
+= MMU_PAGESIZE
;
4290 return (DDI_SUCCESS
);
4294 * *** We figured it out, we definitly need to trim
4298 * make sure the driver isn't making us do something bad...
4299 * Trimming and sgllen == 1 don't go together.
4301 if (hp
->dmai_attr
.dma_attr_sgllen
== 1) {
4302 return (DDI_DMA_NOMAPPING
);
4306 * first, setup the current window to account for the trim. Need to go
4307 * back to the last cookie for this. Some of the last cookie will be in
4308 * the current window, and some of the last cookie will be in the new
4309 * window. All of the current cookie will be in the new window.
4312 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4313 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4314 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4315 ASSERT(cookie
->dmac_size
> trim_sz
);
4316 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4317 (*windowp
)->wd_size
-= trim_sz
;
4320 * we're trimming the last cookie (not the current cookie). So that
4321 * last cookie may have or may not have been using the copy buffer (
4322 * we know the cookie passed in uses the copy buffer since we're in
4325 * If the last cookie doesn't use the copy buffer, nothing special to
4326 * do. However, if it does uses the copy buffer, it will be both the
4327 * last page in the current window and the first page in the next
4328 * window. Since we are reusing the copy buffer (and KVA space on the
4329 * 32-bit kernel), this page will use the end of the copy buffer in the
4330 * current window, and the start of the copy buffer in the next window.
4331 * Track that info... The cookie physical address was already set to
4332 * the copy buffer physical address in setup_cookie..
4334 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4335 pidx
= (sinfo
->si_buf_offset
+ (*windowp
)->wd_offset
+
4336 (*windowp
)->wd_size
) >> MMU_PAGESHIFT
;
4337 (*windowp
)->wd_trim
.tr_last_copybuf_win
= B_TRUE
;
4338 (*windowp
)->wd_trim
.tr_last_pidx
= pidx
;
4339 (*windowp
)->wd_trim
.tr_last_cbaddr
=
4340 dma
->dp_pgmap
[pidx
].pm_cbaddr
;
4341 #if !defined(__amd64)
4342 (*windowp
)->wd_trim
.tr_last_kaddr
=
4343 dma
->dp_pgmap
[pidx
].pm_kaddr
;
4347 /* save the buffer offsets for the next window */
4348 coffset
= cookie
->dmac_size
- trim_sz
;
4349 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4352 * set this now in case this is the first window. all other cases are
4355 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4358 * initialize the next window using what's left over in the previous
4362 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4363 (*windowp
)->wd_cookie_cnt
++;
4364 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4365 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+ coffset
;
4366 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4369 * again, we're tracking if the last cookie uses the copy buffer.
4370 * read the comment above for more info on why we need to track
4373 * For the first cookie in the new window, we need reset the physical
4374 * address to DMA into to the start of the copy buffer plus any
4375 * initial page offset which may be present.
4377 if (cookie
->dmac_type
& ROOTNEX_USES_COPYBUF
) {
4378 (*windowp
)->wd_dosync
= B_TRUE
;
4379 (*windowp
)->wd_trim
.tr_first_copybuf_win
= B_TRUE
;
4380 (*windowp
)->wd_trim
.tr_first_pidx
= pidx
;
4381 (*windowp
)->wd_trim
.tr_first_cbaddr
= dma
->dp_cbaddr
;
4382 poff
= (*windowp
)->wd_trim
.tr_first_paddr
& MMU_PAGEOFFSET
;
4384 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
, dma
->dp_cbaddr
)) +
4386 (*windowp
)->wd_trim
.tr_first_paddr
=
4387 ROOTNEX_PADDR_TO_RBASE(paddr
);
4389 #if !defined(__amd64)
4390 (*windowp
)->wd_trim
.tr_first_kaddr
= dma
->dp_kva
;
4392 /* account for the cookie copybuf usage in the new window */
4393 *copybuf_used
+= MMU_PAGESIZE
;
4396 * every piece of code has to have a hack, and here is this
4399 * There is a complex interaction between setup_cookie and the
4400 * copybuf window boundary. The complexity had to be in either
4401 * the maxxfer window, or the copybuf window, and I chose the
4404 * So in this code path, we have taken the last cookie,
4405 * virtually broken it in half due to the trim, and it happens
4406 * to use the copybuf which further complicates life. At the
4407 * same time, we have already setup the current cookie, which
4408 * is now wrong. More background info: the current cookie uses
4409 * the copybuf, so it is only a page long max. So we need to
4410 * fix the current cookies copy buffer address, physical
4411 * address, and kva for the 32-bit kernel. We due this by
4412 * bumping them by page size (of course, we can't due this on
4413 * the physical address since the copy buffer may not be
4414 * physically contiguous).
4417 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
+= MMU_PAGESIZE
;
4418 poff
= cookie
->dmac_laddress
& MMU_PAGEOFFSET
;
4420 paddr
= pfn_to_pa(hat_getpfnum(kas
.a_hat
,
4421 dma
->dp_pgmap
[pidx
+ 1].pm_cbaddr
)) + poff
;
4422 cookie
->dmac_laddress
= ROOTNEX_PADDR_TO_RBASE(paddr
);
4424 #if !defined(__amd64)
4425 ASSERT(dma
->dp_pgmap
[pidx
+ 1].pm_mapped
== B_FALSE
);
4426 dma
->dp_pgmap
[pidx
+ 1].pm_kaddr
+= MMU_PAGESIZE
;
4429 /* go back to the current cookie */
4434 * add the current cookie to the new window. set the new window size to
4435 * the what was left over from the previous cookie and what's in the
4438 (*windowp
)->wd_cookie_cnt
++;
4439 (*windowp
)->wd_size
= trim_sz
+ cookie
->dmac_size
;
4440 ASSERT((*windowp
)->wd_size
< dma
->dp_maxxfer
);
4443 * we know that the cookie passed in always uses the copy buffer. We
4444 * wouldn't be here if it didn't.
4446 *copybuf_used
+= MMU_PAGESIZE
;
4448 return (DDI_SUCCESS
);
4453 * rootnex_maxxfer_window_boundary()
4454 * Called in bind slowpath when we get to a window boundary because we will
4458 rootnex_maxxfer_window_boundary(ddi_dma_impl_t
*hp
, rootnex_dma_t
*dma
,
4459 rootnex_window_t
**windowp
, ddi_dma_cookie_t
*cookie
)
4468 * calculate how much we have to trim off of the current cookie to equal
4469 * maxxfer. We don't have to account for granularity here since our
4470 * maxxfer already takes that into account.
4472 trim_sz
= ((*windowp
)->wd_size
+ cookie
->dmac_size
) - dma
->dp_maxxfer
;
4473 ASSERT(trim_sz
<= cookie
->dmac_size
);
4474 ASSERT(trim_sz
<= dma
->dp_maxxfer
);
4476 /* save cookie size since we need it later and we might change it */
4477 dmac_size
= cookie
->dmac_size
;
4480 * if we're not trimming the entire cookie, setup the current window to
4481 * account for the trim.
4483 if (trim_sz
< cookie
->dmac_size
) {
4484 (*windowp
)->wd_cookie_cnt
++;
4485 (*windowp
)->wd_trim
.tr_trim_last
= B_TRUE
;
4486 (*windowp
)->wd_trim
.tr_last_cookie
= cookie
;
4487 (*windowp
)->wd_trim
.tr_last_paddr
= cookie
->dmac_laddress
;
4488 (*windowp
)->wd_trim
.tr_last_size
= cookie
->dmac_size
- trim_sz
;
4489 (*windowp
)->wd_size
= dma
->dp_maxxfer
;
4492 * set the adjusted cookie size now in case this is the first
4493 * window. All other windows are taken care of in get win
4495 cookie
->dmac_size
= (*windowp
)->wd_trim
.tr_last_size
;
4499 * coffset is the current offset within the cookie, new_offset is the
4500 * current offset with the entire buffer.
4502 coffset
= dmac_size
- trim_sz
;
4503 new_offset
= (*windowp
)->wd_offset
+ (*windowp
)->wd_size
;
4505 /* initialize the next window */
4507 rootnex_init_win(hp
, dma
, *windowp
, cookie
, new_offset
);
4508 (*windowp
)->wd_cookie_cnt
++;
4509 (*windowp
)->wd_size
= trim_sz
;
4510 if (trim_sz
< dmac_size
) {
4511 (*windowp
)->wd_trim
.tr_trim_first
= B_TRUE
;
4512 (*windowp
)->wd_trim
.tr_first_paddr
= cookie
->dmac_laddress
+
4514 (*windowp
)->wd_trim
.tr_first_size
= trim_sz
;
4517 return (DDI_SUCCESS
);
4523 rootnex_coredma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4524 off_t off
, size_t len
, uint_t cache_flags
)
4526 rootnex_sglinfo_t
*sinfo
;
4527 rootnex_pgmap_t
*cbpage
;
4528 rootnex_window_t
*win
;
4541 hp
= (ddi_dma_impl_t
*)handle
;
4542 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4543 sinfo
= &dma
->dp_sglinfo
;
4546 * if we don't have any windows, we don't need to sync. A copybuf
4547 * will cause us to have at least one window.
4549 if (dma
->dp_window
== NULL
) {
4550 return (DDI_SUCCESS
);
4553 /* This window may not need to be sync'd */
4554 win
= &dma
->dp_window
[dma
->dp_current_win
];
4555 if (!win
->wd_dosync
) {
4556 return (DDI_SUCCESS
);
4559 /* handle off and len special cases */
4560 if ((off
== 0) || (rootnex_sync_ignore_params
)) {
4561 offset
= win
->wd_offset
;
4565 if ((len
== 0) || (rootnex_sync_ignore_params
)) {
4566 size
= win
->wd_size
;
4571 /* check the sync args to make sure they make a little sense */
4572 if (rootnex_sync_check_parms
) {
4573 e
= rootnex_valid_sync_parms(hp
, win
, offset
, size
,
4575 if (e
!= DDI_SUCCESS
) {
4576 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_SYNC_FAIL
]);
4577 return (DDI_FAILURE
);
4582 * special case the first page to handle the offset into the page. The
4583 * offset to the current page for our buffer is the offset into the
4584 * first page of the buffer plus our current offset into the buffer
4585 * itself, masked of course.
4587 poff
= (sinfo
->si_buf_offset
+ offset
) & MMU_PAGEOFFSET
;
4588 psize
= MIN((MMU_PAGESIZE
- poff
), size
);
4590 /* go through all the pages that we want to sync */
4593 * Calculate the page index relative to the start of the buffer.
4594 * The index to the current page for our buffer is the offset
4595 * into the first page of the buffer plus our current offset
4596 * into the buffer itself, shifted of course...
4598 pidx
= (sinfo
->si_buf_offset
+ offset
) >> MMU_PAGESHIFT
;
4599 ASSERT(pidx
< sinfo
->si_max_pages
);
4602 * if this page uses the copy buffer, we need to sync it,
4603 * otherwise, go on to the next page.
4605 cbpage
= &dma
->dp_pgmap
[pidx
];
4606 ASSERT((cbpage
->pm_uses_copybuf
== B_TRUE
) ||
4607 (cbpage
->pm_uses_copybuf
== B_FALSE
));
4608 if (cbpage
->pm_uses_copybuf
) {
4609 /* cbaddr and kaddr should be page aligned */
4610 ASSERT(((uintptr_t)cbpage
->pm_cbaddr
&
4611 MMU_PAGEOFFSET
) == 0);
4612 ASSERT(((uintptr_t)cbpage
->pm_kaddr
&
4613 MMU_PAGEOFFSET
) == 0);
4616 * if we're copying for the device, we are going to
4617 * copy from the drivers buffer and to the rootnex
4618 * allocated copy buffer.
4620 if (cache_flags
== DDI_DMA_SYNC_FORDEV
) {
4621 fromaddr
= cbpage
->pm_kaddr
+ poff
;
4622 toaddr
= cbpage
->pm_cbaddr
+ poff
;
4623 ROOTNEX_DPROBE2(rootnex__sync__dev
,
4624 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4627 * if we're copying for the cpu/kernel, we are going to
4628 * copy from the rootnex allocated copy buffer to the
4632 fromaddr
= cbpage
->pm_cbaddr
+ poff
;
4633 toaddr
= cbpage
->pm_kaddr
+ poff
;
4634 ROOTNEX_DPROBE2(rootnex__sync__cpu
,
4635 dev_info_t
*, dma
->dp_dip
, size_t, psize
);
4638 bcopy(fromaddr
, toaddr
, psize
);
4642 * decrement size until we're done, update our offset into the
4643 * buffer, and get the next page size.
4647 psize
= MIN(MMU_PAGESIZE
, size
);
4649 /* page offset is zero for the rest of this loop */
4653 return (DDI_SUCCESS
);
4657 * rootnex_dma_sync()
4658 * called from ddi_dma_sync() if DMP_NOSYNC is not set in hp->dmai_rflags.
4659 * We set DMP_NOSYNC if we're not using the copy buffer. If DMP_NOSYNC
4660 * is set, ddi_dma_sync() returns immediately passing back success.
4664 rootnex_dma_sync(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4665 off_t off
, size_t len
, uint_t cache_flags
)
4667 #if defined(__amd64) && !defined(__xpv)
4668 if (IOMMU_USED(rdip
)) {
4669 return (iommulib_nexdma_sync(dip
, rdip
, handle
, off
, len
,
4673 return (rootnex_coredma_sync(dip
, rdip
, handle
, off
, len
,
4678 * rootnex_valid_sync_parms()
4679 * checks the parameters passed to sync to verify they are correct.
4682 rootnex_valid_sync_parms(ddi_dma_impl_t
*hp
, rootnex_window_t
*win
,
4683 off_t offset
, size_t size
, uint_t cache_flags
)
4689 * the first part of the test to make sure the offset passed in is
4690 * within the window.
4692 if (offset
< win
->wd_offset
) {
4693 return (DDI_FAILURE
);
4697 * second and last part of the test to make sure the offset and length
4698 * passed in is within the window.
4700 woffset
= offset
- win
->wd_offset
;
4701 if ((woffset
+ size
) > win
->wd_size
) {
4702 return (DDI_FAILURE
);
4706 * if we are sync'ing for the device, the DDI_DMA_WRITE flag should
4709 if ((cache_flags
== DDI_DMA_SYNC_FORDEV
) &&
4710 (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4711 return (DDI_SUCCESS
);
4715 * at this point, either DDI_DMA_SYNC_FORCPU or DDI_DMA_SYNC_FORKERNEL
4716 * should be set. Also DDI_DMA_READ should be set in the flags.
4718 if (((cache_flags
== DDI_DMA_SYNC_FORCPU
) ||
4719 (cache_flags
== DDI_DMA_SYNC_FORKERNEL
)) &&
4720 (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4721 return (DDI_SUCCESS
);
4724 return (DDI_FAILURE
);
4730 rootnex_coredma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4731 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4734 rootnex_window_t
*window
;
4735 rootnex_trim_t
*trim
;
4738 ddi_dma_obj_t
*dmao
;
4739 #if !defined(__amd64)
4740 rootnex_sglinfo_t
*sinfo
;
4741 rootnex_pgmap_t
*pmap
;
4749 hp
= (ddi_dma_impl_t
*)handle
;
4750 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4751 #if !defined(__amd64)
4752 sinfo
= &dma
->dp_sglinfo
;
4755 /* If we try and get a window which doesn't exist, return failure */
4756 if (win
>= hp
->dmai_nwin
) {
4757 ROOTNEX_DPROF_INC(&rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4758 return (DDI_FAILURE
);
4761 dmao
= dma
->dp_dvma_used
? &dma
->dp_dvma
: &dma
->dp_dma
;
4764 * if we don't have any windows, and they're asking for the first
4765 * window, setup the cookie pointer to the first cookie in the bind.
4766 * setup our return values, then increment the cookie since we return
4767 * the first cookie on the stack.
4769 if (dma
->dp_window
== NULL
) {
4772 &rootnex_cnt
[ROOTNEX_CNT_GETWIN_FAIL
]);
4773 return (DDI_FAILURE
);
4775 hp
->dmai_cookie
= dma
->dp_cookies
;
4777 *lenp
= dmao
->dmao_size
;
4778 *ccountp
= dma
->dp_sglinfo
.si_sgl_size
;
4779 *cookiep
= hp
->dmai_cookie
[0];
4781 return (DDI_SUCCESS
);
4784 /* sync the old window before moving on to the new one */
4785 window
= &dma
->dp_window
[dma
->dp_current_win
];
4786 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_READ
)) {
4787 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4788 DDI_DMA_SYNC_FORCPU
);
4791 #if !defined(__amd64)
4793 * before we move to the next window, if we need to re-map, unmap all
4794 * the pages in this window.
4796 if (dma
->dp_cb_remaping
) {
4798 * If we switch to this window again, we'll need to map in
4799 * on the fly next time.
4801 window
->wd_remap_copybuf
= B_TRUE
;
4804 * calculate the page index into the buffer where this window
4805 * starts, and the number of pages this window takes up.
4807 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4809 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4811 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4812 ASSERT((pidx
+ pcnt
) <= sinfo
->si_max_pages
);
4814 /* unmap pages which are currently mapped in this window */
4815 for (i
= 0; i
< pcnt
; i
++) {
4816 if (dma
->dp_pgmap
[pidx
].pm_mapped
) {
4817 hat_unload(kas
.a_hat
,
4818 dma
->dp_pgmap
[pidx
].pm_kaddr
, MMU_PAGESIZE
,
4820 dma
->dp_pgmap
[pidx
].pm_mapped
= B_FALSE
;
4828 * Move to the new window.
4829 * NOTE: current_win must be set for sync to work right
4831 dma
->dp_current_win
= win
;
4832 window
= &dma
->dp_window
[win
];
4834 /* if needed, adjust the first and/or last cookies for trim */
4835 trim
= &window
->wd_trim
;
4836 if (trim
->tr_trim_first
) {
4837 window
->wd_first_cookie
->dmac_laddress
= trim
->tr_first_paddr
;
4838 window
->wd_first_cookie
->dmac_size
= trim
->tr_first_size
;
4839 #if !defined(__amd64)
4840 window
->wd_first_cookie
->dmac_type
=
4841 (window
->wd_first_cookie
->dmac_type
&
4842 ROOTNEX_USES_COPYBUF
) + window
->wd_offset
;
4844 if (trim
->tr_first_copybuf_win
) {
4845 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_cbaddr
=
4846 trim
->tr_first_cbaddr
;
4847 #if !defined(__amd64)
4848 dma
->dp_pgmap
[trim
->tr_first_pidx
].pm_kaddr
=
4849 trim
->tr_first_kaddr
;
4853 if (trim
->tr_trim_last
) {
4854 trim
->tr_last_cookie
->dmac_laddress
= trim
->tr_last_paddr
;
4855 trim
->tr_last_cookie
->dmac_size
= trim
->tr_last_size
;
4856 if (trim
->tr_last_copybuf_win
) {
4857 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_cbaddr
=
4858 trim
->tr_last_cbaddr
;
4859 #if !defined(__amd64)
4860 dma
->dp_pgmap
[trim
->tr_last_pidx
].pm_kaddr
=
4861 trim
->tr_last_kaddr
;
4867 * setup the cookie pointer to the first cookie in the window. setup
4868 * our return values, then increment the cookie since we return the
4869 * first cookie on the stack.
4871 hp
->dmai_cookie
= window
->wd_first_cookie
;
4872 *offp
= window
->wd_offset
;
4873 *lenp
= window
->wd_size
;
4874 *ccountp
= window
->wd_cookie_cnt
;
4875 *cookiep
= hp
->dmai_cookie
[0];
4878 #if !defined(__amd64)
4879 /* re-map copybuf if required for this window */
4880 if (dma
->dp_cb_remaping
) {
4882 * calculate the page index into the buffer where this
4885 pidx
= (sinfo
->si_buf_offset
+ window
->wd_offset
) >>
4887 ASSERT(pidx
< sinfo
->si_max_pages
);
4890 * the first page can get unmapped if it's shared with the
4891 * previous window. Even if the rest of this window is already
4892 * mapped in, we need to still check this one.
4894 pmap
= &dma
->dp_pgmap
[pidx
];
4895 if ((pmap
->pm_uses_copybuf
) && (pmap
->pm_mapped
== B_FALSE
)) {
4896 if (pmap
->pm_pp
!= NULL
) {
4897 pmap
->pm_mapped
= B_TRUE
;
4898 i86_pp_map(pmap
->pm_pp
, pmap
->pm_kaddr
);
4899 } else if (pmap
->pm_vaddr
!= NULL
) {
4900 pmap
->pm_mapped
= B_TRUE
;
4901 i86_va_map(pmap
->pm_vaddr
, sinfo
->si_asp
,
4907 /* map in the rest of the pages if required */
4908 if (window
->wd_remap_copybuf
) {
4909 window
->wd_remap_copybuf
= B_FALSE
;
4911 /* figure out many pages this window takes up */
4912 poff
= (sinfo
->si_buf_offset
+ window
->wd_offset
) &
4914 pcnt
= mmu_btopr(window
->wd_size
+ poff
);
4915 ASSERT(((pidx
- 1) + pcnt
) <= sinfo
->si_max_pages
);
4917 /* map pages which require it */
4918 for (i
= 1; i
< pcnt
; i
++) {
4919 pmap
= &dma
->dp_pgmap
[pidx
];
4920 if (pmap
->pm_uses_copybuf
) {
4921 ASSERT(pmap
->pm_mapped
== B_FALSE
);
4922 if (pmap
->pm_pp
!= NULL
) {
4923 pmap
->pm_mapped
= B_TRUE
;
4924 i86_pp_map(pmap
->pm_pp
,
4926 } else if (pmap
->pm_vaddr
!= NULL
) {
4927 pmap
->pm_mapped
= B_TRUE
;
4928 i86_va_map(pmap
->pm_vaddr
,
4939 /* if the new window uses the copy buffer, sync it for the device */
4940 if ((window
->wd_dosync
) && (hp
->dmai_rflags
& DDI_DMA_WRITE
)) {
4941 (void) rootnex_coredma_sync(dip
, rdip
, handle
, 0, 0,
4942 DDI_DMA_SYNC_FORDEV
);
4945 return (DDI_SUCCESS
);
4950 * called from ddi_dma_getwin()
4954 rootnex_dma_win(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
4955 uint_t win
, off_t
*offp
, size_t *lenp
, ddi_dma_cookie_t
*cookiep
,
4958 #if defined(__amd64) && !defined(__xpv)
4959 if (IOMMU_USED(rdip
)) {
4960 return (iommulib_nexdma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4965 return (rootnex_coredma_win(dip
, rdip
, handle
, win
, offp
, lenp
,
4969 #if defined(__amd64) && !defined(__xpv)
4972 rootnex_coredma_hdl_setprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4973 ddi_dma_handle_t handle
, void *v
)
4978 hp
= (ddi_dma_impl_t
*)handle
;
4979 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4980 dma
->dp_iommu_private
= v
;
4982 return (DDI_SUCCESS
);
4987 rootnex_coredma_hdl_getprivate(dev_info_t
*dip
, dev_info_t
*rdip
,
4988 ddi_dma_handle_t handle
)
4993 hp
= (ddi_dma_impl_t
*)handle
;
4994 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
4996 return (dma
->dp_iommu_private
);
5001 * ************************
5002 * obsoleted dma routines
5003 * ************************
5008 * called from ddi_dma_setup()
5009 * NO IOMMU in 32 bit mode. The below routines doesn't work in 64 bit mode.
5013 rootnex_dma_map(dev_info_t
*dip
, dev_info_t
*rdip
,
5014 struct ddi_dma_req
*dmareq
, ddi_dma_handle_t
*handlep
)
5016 #if defined(__amd64)
5018 * this interface is not supported in 64-bit x86 kernel. See comment in
5019 * rootnex_dma_mctl()
5021 return (DDI_DMA_NORESOURCES
);
5023 #else /* 32-bit x86 kernel */
5024 ddi_dma_handle_t
*lhandlep
;
5025 ddi_dma_handle_t lhandle
;
5026 ddi_dma_cookie_t cookie
;
5027 ddi_dma_attr_t dma_attr
;
5028 ddi_dma_lim_t
*dma_lim
;
5034 * if the driver is just testing to see if it's possible to do the bind,
5035 * we'll use local state. Otherwise, use the handle pointer passed in.
5037 if (handlep
== NULL
) {
5038 lhandlep
= &lhandle
;
5043 /* convert the limit structure to a dma_attr one */
5044 dma_lim
= dmareq
->dmar_limits
;
5045 dma_attr
.dma_attr_version
= DMA_ATTR_V0
;
5046 dma_attr
.dma_attr_addr_lo
= dma_lim
->dlim_addr_lo
;
5047 dma_attr
.dma_attr_addr_hi
= dma_lim
->dlim_addr_hi
;
5048 dma_attr
.dma_attr_minxfer
= dma_lim
->dlim_minxfer
;
5049 dma_attr
.dma_attr_seg
= dma_lim
->dlim_adreg_max
;
5050 dma_attr
.dma_attr_count_max
= dma_lim
->dlim_ctreg_max
;
5051 dma_attr
.dma_attr_granular
= dma_lim
->dlim_granular
;
5052 dma_attr
.dma_attr_sgllen
= dma_lim
->dlim_sgllen
;
5053 dma_attr
.dma_attr_maxxfer
= dma_lim
->dlim_reqsize
;
5054 dma_attr
.dma_attr_burstsizes
= dma_lim
->dlim_burstsizes
;
5055 dma_attr
.dma_attr_align
= MMU_PAGESIZE
;
5056 dma_attr
.dma_attr_flags
= 0;
5058 e
= rootnex_dma_allochdl(dip
, rdip
, &dma_attr
, dmareq
->dmar_fp
,
5059 dmareq
->dmar_arg
, lhandlep
);
5060 if (e
!= DDI_SUCCESS
) {
5064 e
= rootnex_dma_bindhdl(dip
, rdip
, *lhandlep
, dmareq
, &cookie
, &ccnt
);
5065 if ((e
!= DDI_DMA_MAPPED
) && (e
!= DDI_DMA_PARTIAL_MAP
)) {
5066 (void) rootnex_dma_freehdl(dip
, rdip
, *lhandlep
);
5071 * if the driver is just testing to see if it's possible to do the bind,
5072 * free up the local state and return the result.
5074 if (handlep
== NULL
) {
5075 (void) rootnex_dma_unbindhdl(dip
, rdip
, *lhandlep
);
5076 (void) rootnex_dma_freehdl(dip
, rdip
, *lhandlep
);
5077 if (e
== DDI_DMA_MAPPED
) {
5078 return (DDI_DMA_MAPOK
);
5080 return (DDI_DMA_NOMAPPING
);
5085 #endif /* defined(__amd64) */
5089 * rootnex_dma_mctl()
5091 * No IOMMU in 32 bit mode. The below routine doesn't work in 64 bit mode.
5095 rootnex_dma_mctl(dev_info_t
*dip
, dev_info_t
*rdip
, ddi_dma_handle_t handle
,
5096 enum ddi_dma_ctlops request
, off_t
*offp
, size_t *lenp
, caddr_t
*objpp
,
5099 #if defined(__amd64)
5101 * DDI_DMA_SMEM_ALLOC & DDI_DMA_IOPB_ALLOC we're changed to have a
5102 * common implementation in genunix, so they no longer have x86
5103 * specific functionality which called into dma_ctl.
5105 * The rest of the obsoleted interfaces were never supported in the
5106 * 64-bit x86 kernel. For s10, the obsoleted DDI_DMA_SEGTOC interface
5107 * was not ported to the x86 64-bit kernel do to serious x86 rootnex
5108 * implementation issues.
5110 * If you can't use DDI_DMA_SEGTOC; DDI_DMA_NEXTSEG, DDI_DMA_FREE, and
5111 * DDI_DMA_NEXTWIN are useless since you can get to the cookie, so we
5112 * reflect that now too...
5114 * Even though we fixed the pointer problem in DDI_DMA_SEGTOC, we are
5115 * not going to put this functionality into the 64-bit x86 kernel now.
5116 * It wasn't ported to the 64-bit kernel for s10, no reason to change
5117 * that in a future release.
5119 return (DDI_FAILURE
);
5121 #else /* 32-bit x86 kernel */
5122 ddi_dma_cookie_t lcookie
;
5123 ddi_dma_cookie_t
*cookie
;
5124 rootnex_window_t
*window
;
5135 * DDI_DMA_SEGTOC, DDI_DMA_NEXTSEG, and DDI_DMA_NEXTWIN are a little
5136 * hacky since were optimizing for the current interfaces and so we can
5137 * cleanup the mess in genunix. Hopefully we will remove the this
5138 * obsoleted routines someday soon.
5143 case DDI_DMA_SEGTOC
: /* ddi_dma_segtocookie() */
5144 hp
= (ddi_dma_impl_t
*)handle
;
5145 cookie
= (ddi_dma_cookie_t
*)objpp
;
5148 * convert segment to cookie. We don't distinguish between the
5151 *cookie
= *hp
->dmai_cookie
;
5152 *lenp
= cookie
->dmac_size
;
5153 *offp
= cookie
->dmac_type
& ~ROOTNEX_USES_COPYBUF
;
5154 return (DDI_SUCCESS
);
5156 case DDI_DMA_NEXTSEG
: /* ddi_dma_nextseg() */
5157 hp
= (ddi_dma_impl_t
*)handle
;
5158 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5160 if ((*lenp
!= NULL
) && ((uintptr_t)*lenp
!= (uintptr_t)hp
)) {
5161 return (DDI_DMA_STALE
);
5164 /* handle the case where we don't have any windows */
5165 if (dma
->dp_window
== NULL
) {
5167 * if seg == NULL, and we don't have any windows,
5168 * return the first cookie in the sgl.
5170 if (*lenp
== NULL
) {
5171 dma
->dp_current_cookie
= 0;
5172 hp
->dmai_cookie
= dma
->dp_cookies
;
5173 *objpp
= (caddr_t
)handle
;
5174 return (DDI_SUCCESS
);
5176 /* if we have more cookies, go to the next cookie */
5178 if ((dma
->dp_current_cookie
+ 1) >=
5179 dma
->dp_sglinfo
.si_sgl_size
) {
5180 return (DDI_DMA_DONE
);
5182 dma
->dp_current_cookie
++;
5184 return (DDI_SUCCESS
);
5188 /* We have one or more windows */
5189 window
= &dma
->dp_window
[dma
->dp_current_win
];
5192 * if seg == NULL, return the first cookie in the current
5195 if (*lenp
== NULL
) {
5196 dma
->dp_current_cookie
= 0;
5197 hp
->dmai_cookie
= window
->wd_first_cookie
;
5200 * go to the next cookie in the window then see if we done with
5204 if ((dma
->dp_current_cookie
+ 1) >=
5205 window
->wd_cookie_cnt
) {
5206 return (DDI_DMA_DONE
);
5208 dma
->dp_current_cookie
++;
5211 *objpp
= (caddr_t
)handle
;
5212 return (DDI_SUCCESS
);
5214 case DDI_DMA_NEXTWIN
: /* ddi_dma_nextwin() */
5215 hp
= (ddi_dma_impl_t
*)handle
;
5216 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5218 if ((*offp
!= NULL
) && ((uintptr_t)*offp
!= (uintptr_t)hp
)) {
5219 return (DDI_DMA_STALE
);
5222 /* if win == NULL, return the first window in the bind */
5223 if (*offp
== NULL
) {
5227 * else, go to the next window then see if we're done with all
5231 nwin
= dma
->dp_current_win
+ 1;
5232 if (nwin
>= hp
->dmai_nwin
) {
5233 return (DDI_DMA_DONE
);
5237 /* switch to the next window */
5238 e
= rootnex_dma_win(dip
, rdip
, handle
, nwin
, &off
, &len
,
5240 ASSERT(e
== DDI_SUCCESS
);
5241 if (e
!= DDI_SUCCESS
) {
5242 return (DDI_DMA_STALE
);
5245 /* reset the cookie back to the first cookie in the window */
5246 if (dma
->dp_window
!= NULL
) {
5247 window
= &dma
->dp_window
[dma
->dp_current_win
];
5248 hp
->dmai_cookie
= window
->wd_first_cookie
;
5250 hp
->dmai_cookie
= dma
->dp_cookies
;
5253 *objpp
= (caddr_t
)handle
;
5254 return (DDI_SUCCESS
);
5256 case DDI_DMA_FREE
: /* ddi_dma_free() */
5257 (void) rootnex_dma_unbindhdl(dip
, rdip
, handle
);
5258 (void) rootnex_dma_freehdl(dip
, rdip
, handle
);
5259 if (rootnex_state
->r_dvma_call_list_id
) {
5260 ddi_run_callback(&rootnex_state
->r_dvma_call_list_id
);
5262 return (DDI_SUCCESS
);
5264 case DDI_DMA_IOPB_ALLOC
: /* get contiguous DMA-able memory */
5265 case DDI_DMA_SMEM_ALLOC
: /* get contiguous DMA-able memory */
5266 /* should never get here, handled in genunix */
5268 return (DDI_FAILURE
);
5270 case DDI_DMA_KVADDR
:
5271 case DDI_DMA_GETERR
:
5273 return (DDI_FAILURE
);
5276 return (DDI_FAILURE
);
5277 #endif /* defined(__amd64) */
5292 rootnex_fm_init(dev_info_t
*dip
, dev_info_t
*tdip
, int tcap
,
5293 ddi_iblock_cookie_t
*ibc
)
5295 *ibc
= rootnex_state
->r_err_ibc
;
5297 return (ddi_system_fmcap
);
5301 * rootnex_dma_check()
5302 * Function called after a dma fault occurred to find out whether the
5303 * fault address is associated with a driver that is able to handle faults
5304 * and recover from faults.
5308 rootnex_dma_check(dev_info_t
*dip
, const void *handle
, const void *addr
,
5309 const void *not_used
)
5311 rootnex_window_t
*window
;
5312 uint64_t start_addr
;
5313 uint64_t fault_addr
;
5322 /* The driver has to set DDI_DMA_FLAGERR to recover from dma faults */
5323 hp
= (ddi_dma_impl_t
*)handle
;
5326 dma
= (rootnex_dma_t
*)hp
->dmai_private
;
5328 /* Get the address that we need to search for */
5329 fault_addr
= *(uint64_t *)addr
;
5332 * if we don't have any windows, we can just walk through all the
5335 if (dma
->dp_window
== NULL
) {
5336 /* for each cookie */
5337 for (i
= 0; i
< dma
->dp_sglinfo
.si_sgl_size
; i
++) {
5339 * if the faulted address is within the physical address
5340 * range of the cookie, return DDI_FM_NONFATAL.
5342 if ((fault_addr
>= dma
->dp_cookies
[i
].dmac_laddress
) &&
5343 (fault_addr
<= (dma
->dp_cookies
[i
].dmac_laddress
+
5344 dma
->dp_cookies
[i
].dmac_size
))) {
5345 return (DDI_FM_NONFATAL
);
5349 /* fault_addr not within this DMA handle */
5350 return (DDI_FM_UNKNOWN
);
5353 /* we have mutiple windows, walk through each window */
5354 for (i
= 0; i
< hp
->dmai_nwin
; i
++) {
5355 window
= &dma
->dp_window
[i
];
5357 /* Go through all the cookies in the window */
5358 for (j
= 0; j
< window
->wd_cookie_cnt
; j
++) {
5360 start_addr
= window
->wd_first_cookie
[j
].dmac_laddress
;
5361 csize
= window
->wd_first_cookie
[j
].dmac_size
;
5364 * if we are trimming the first cookie in the window,
5365 * and this is the first cookie, adjust the start
5366 * address and size of the cookie to account for the
5369 if (window
->wd_trim
.tr_trim_first
&& (j
== 0)) {
5370 start_addr
= window
->wd_trim
.tr_first_paddr
;
5371 csize
= window
->wd_trim
.tr_first_size
;
5375 * if we are trimming the last cookie in the window,
5376 * and this is the last cookie, adjust the start
5377 * address and size of the cookie to account for the
5380 if (window
->wd_trim
.tr_trim_last
&&
5381 (j
== (window
->wd_cookie_cnt
- 1))) {
5382 start_addr
= window
->wd_trim
.tr_last_paddr
;
5383 csize
= window
->wd_trim
.tr_last_size
;
5386 end_addr
= start_addr
+ csize
;
5389 * if the faulted address is within the physical
5390 * address of the cookie, return DDI_FM_NONFATAL.
5392 if ((fault_addr
>= start_addr
) &&
5393 (fault_addr
<= end_addr
)) {
5394 return (DDI_FM_NONFATAL
);
5399 /* fault_addr not within this DMA handle */
5400 return (DDI_FM_UNKNOWN
);
5405 rootnex_quiesce(dev_info_t
*dip
)
5407 #if defined(__amd64) && !defined(__xpv)
5408 return (immu_quiesce());
5410 return (DDI_SUCCESS
);
5428 immu_physmem_update(uint64_t addr
, uint64_t size
)