6811333 Remove prom_printf() message in emlxs driver
[opensolaris.git] / usr / src / uts / common / sys / ddidmareq.h
blob423482ee1809c9fa088211af3aeaa12b603f8079
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
22 * Copyright 2006 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
26 #ifndef _SYS_DDIDMAREQ_H
27 #define _SYS_DDIDMAREQ_H
29 #pragma ident "%Z%%M% %I% %E% SMI"
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
36 * Memory Objects
38 * Definitions of structures that can describe
39 * an object that can be mapped for DMA.
43 * Structure describing a virtual address
45 struct v_address {
46 caddr_t v_addr; /* base virtual address */
47 struct as *v_as; /* pointer to address space */
48 void *v_priv; /* priv data for shadow I/O */
52 * Structure describing a page-based address
54 struct pp_address {
56 * A pointer to a circularly linked list of page structures.
58 struct page *pp_pp;
59 uint_t pp_offset; /* offset within first page */
63 * Structure to describe a physical memory address.
65 struct phy_address {
66 ulong_t p_addr; /* base physical address */
67 ulong_t p_memtype; /* memory type */
71 * A union of all of the above structures.
73 * This union describes the relationship between
74 * the kind of an address description and an object.
76 typedef union {
77 struct v_address virt_obj; /* Some virtual address */
78 struct pp_address pp_obj; /* Some page-based address */
79 struct phy_address phys_obj; /* Some physical address */
80 } ddi_dma_aobj_t;
83 * DMA object types - used to select how the object
84 * being mapped is being addressed by the IU.
86 typedef enum {
87 DMA_OTYP_VADDR = 0, /* enforce starting value of zero */
88 DMA_OTYP_PAGES,
89 DMA_OTYP_PADDR,
90 DMA_OTYP_BUFVADDR
91 } ddi_dma_atyp_t;
94 * A compact package to describe an object that is to be mapped for DMA.
96 typedef struct {
97 uint_t dmao_size; /* size, in bytes, of the object */
98 ddi_dma_atyp_t dmao_type; /* type of object */
99 ddi_dma_aobj_t dmao_obj; /* the object described */
100 } ddi_dma_obj_t;
103 * DMA addressing limits.
105 * This structure describes the constraints that a particular device's
106 * DMA engine has to its parent so that the parent may correctly set
107 * things up for a DMA mapping. Each parent may in turn modify the
108 * constraints listed in a DMA request structure in order to describe
109 * to its parent any changed or additional constraints. The rules
110 * are that each parent may modify a constraint in order to further
111 * constrain things (e.g., picking a more limited address range than
112 * that permitted by the child), but that the parent may not ignore
113 * a child's constraints.
115 * A particular constraint that we do *not* address is whether or not
116 * a requested mapping is too large for a DMA engine's counter to
117 * correctly track. It is still up to each driver to explicitly handle
118 * transfers that are too large for its own hardware to deal with directly.
120 * The mapping routines that are cognizant of this structure will
121 * copy any user defined limits structure if they need to modify
122 * the fields (as alluded to above).
124 * A note as to how to define constraints:
126 * How you define the constraints for your device depends on how you
127 * define your device. For example, you may have an SBus card with a
128 * device on it that address only the bottom 16mb of virtual DMA space.
129 * However, if the card also has ancillary circuitry that pulls the high 8
130 * bits of address lines high, the more correct expression for your device
131 * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
133 #if defined(__sparc)
134 typedef struct ddi_dma_lim {
137 * Low range of 32 bit addressing capability.
139 uint_t dlim_addr_lo;
142 * Upper inclusive bound of addressing capability. It is an
143 * inclusive boundary limit to allow for the addressing range
144 * [0..0xffffffff] to be specified in preference to [0..0].
146 uint_t dlim_addr_hi;
149 * Inclusive upper bound with which The DMA engine's counter acts as
150 * a register.
152 * This handles the case where an upper portion of a DMA address
153 * register is a latch instead of being a full 32 bit register
154 * (e.g., the upper 8 bits may remain constant while the lower
155 * 24 bits are the real address register).
157 * This essentially gives a hint about segment limitations
158 * to the mapping routines.
160 uint_t dlim_cntr_max;
163 * DMA burst sizes.
165 * At the time of a mapping request, this tag defines the possible
166 * DMA burst cycle sizes that the requestor's DMA engine can
167 * emit. The format of the data is binary encoding of burst sizes
168 * assumed to be powers of two. That is, if a DMA engine is capable
169 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
171 * As the mapping request is handled by intervening nexi, the
172 * burstsizes value may be modified. Prior to enabling DMA for
173 * the specific device, the driver that owns the DMA engine should
174 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
175 * have become and program their DMA engine appropriately.
177 uint_t dlim_burstsizes;
180 * Minimum effective DMA transfer size, in units of bytes.
182 * This value specifies the minimum effective granularity of the
183 * DMA engine. It is distinct from dlim_burtsizes in that it
184 * describes the minimum amount of access a DMA transfer will
185 * effect. dlim_burtsizes describes in what electrical fashion
186 * the DMA engine might perform its accesses, while dlim_minxfer
187 * describes the minimum amount of memory that can be touched by
188 * the DMA transfer.
190 * As the mapping request is handled by intervening nexi, the
191 * dlim_minxfer value may be modifed contingent upon the presence
192 * (and use) of I/O caches and DMA write buffers in between the
193 * DMA engine and the object that DMA is being performed on.
196 uint_t dlim_minxfer;
199 * Expected average data rate for this DMA engine
200 * while transferring data.
202 * This is used as a hint for a number of operations that might
203 * want to know the possible optimal latency requirements of this
204 * device. A value of zero will be interpreted as a 'do not care'.
206 uint_t dlim_dmaspeed;
208 } ddi_dma_lim_t;
210 #elif defined(__x86)
213 * values for dlim_minxfer
215 #define DMA_UNIT_8 1
216 #define DMA_UNIT_16 2
217 #define DMA_UNIT_32 4
220 * Version number
222 #define DMALIM_VER0 ((0x86000000) + 0)
224 typedef struct ddi_dma_lim {
227 * Low range of 32 bit addressing capability.
229 uint_t dlim_addr_lo;
232 * Upper Inclusive bound of 32 bit addressing capability.
234 * The ISA nexus restricts this to 0x00ffffff, since this bus has
235 * only 24 address lines. This enforces the 16 Mb address limitation.
236 * The EISA nexus restricts this to 0xffffffff.
238 uint_t dlim_addr_hi;
241 * DMA engine counter not used; set to 0
243 uint_t dlim_cntr_max;
246 * DMA burst sizes not used; set to 1
248 uint_t dlim_burstsizes;
251 * Minimum effective DMA transfer size.
253 * This value specifies the minimum effective granularity of the
254 * DMA engine. It is distinct from dlim_burstsizes in that it
255 * describes the minimum amount of access a DMA transfer will
256 * effect. dlim_burstsizes describes in what electrical fashion
257 * the DMA engine might perform its accesses, while dlim_minxfer
258 * describes the minimum amount of memory that can be touched by
259 * the DMA transfer.
261 * This value also implies the required address alignment.
262 * The number of bytes transferred is assumed to be
263 * dlim_minxfer * (DMA engine count)
265 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
267 uint_t dlim_minxfer;
270 * Expected average data rate for this DMA engine
271 * while transferring data.
273 * This is used as a hint for a number of operations that might
274 * want to know the possible optimal latency requirements of this
275 * device. A value of zero will be interpreted as a 'do not care'.
277 uint_t dlim_dmaspeed;
281 * Version number of this structure
283 uint_t dlim_version; /* = 0x86 << 24 + 0 */
286 * Inclusive upper bound with which the DMA engine's Address acts as
287 * a register.
288 * This handles the case where an upper portion of a DMA address
289 * register is a latch instead of being a full 32 bit register
290 * (e.g., the upper 16 bits remain constant while the lower 16 bits
291 * are incremented for each DMA transfer).
293 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
294 * since the ISA DMA engine has a 16-bit register for low address and
295 * an 8-bit latch for high address. This enforces the first 64 Kb
296 * limitation (address boundary).
297 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
299 uint_t dlim_adreg_max;
302 * Maximum transfer count that the DMA engine can handle.
304 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
305 * since the ISA DMA engine has a 16-bit register for counting.
306 * This enforces the other 64 Kb limitation (count size).
307 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
308 * since the EISA DMA engine has a 24-bit register for counting.
310 * This transfer count limitation is a per segment limitation.
311 * It can also be used to restrict the size of segments.
313 * This is used as a bit mask, so it must be a power of 2, minus 1.
315 uint_t dlim_ctreg_max;
318 * Granularity of DMA transfer, in units of bytes.
320 * Breakup sizes must be multiples of this value.
321 * If no scatter/gather capabilty is specified, then the size of
322 * each DMA transfer must be a multiple of this value.
324 * If there is scatter/gather capability, then a single cookie cannot
325 * be smaller in size than the minimum xfer value, and may be less
326 * than the granularity value. The total transfer length of the
327 * scatter/gather list should be a multiple of the granularity value;
328 * use dlim_sgllen to specify the length of the scatter/gather list.
330 * This value should be equal to the sector size of the device.
332 uint_t dlim_granular;
335 * Length of scatter/gather list
337 * This value specifies the number of segments or cookies that a DMA
338 * engine can consume in one i/o request to the device. For 3rd-party
339 * DMA that uses the bus nexus this should be set to 1. Devices with
340 * 1st-party DMA capability should specify the number of entries in
341 * its scatter/gather list. The breakup routine will ensure that each
342 * group of dlim_sgllen cookies (within a DMA window) will have a
343 * total transfer length that is a multiple of dlim_granular.
345 * < 0 : tbd
346 * = 0 : breakup is for PIO.
347 * = 1 : breakup is for DMA engine with no scatter/gather
348 * capability.
349 * >= 2 : breakup is for DMA engine with scatter/gather
350 * capability; value is max number of entries in list.
352 * Note that this list length is not dependent on the DMA window
353 * size. The size of the DMA window is based on resources consumed,
354 * such as intermediate buffers. Several s/g lists may exist within
355 * a window. But the end of a window does imply the end of the s/g
356 * list.
358 short dlim_sgllen;
361 * Size of device i/o request
363 * This value indicates the maximum number of bytes the device
364 * can transmit/receive for one i/o command. This limitation is
365 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
367 uint_t dlim_reqsize;
369 } ddi_dma_lim_t;
371 #else
372 #error "struct ddi_dma_lim not defined for this architecture"
373 #endif /* defined(__sparc) */
376 * Flags definition for dma_attr_flags
380 * return physical DMA address on platforms
381 * which support DVMA
383 #define DDI_DMA_FORCE_PHYSICAL 0x0100
386 * An error will be flagged for DMA data path errors
388 #define DDI_DMA_FLAGERR 0x200
391 * Enable relaxed ordering
393 #define DDI_DMA_RELAXED_ORDERING 0x400
395 #define DMA_ATTR_V0 0
396 #define DMA_ATTR_VERSION DMA_ATTR_V0
398 typedef struct ddi_dma_attr {
399 uint_t dma_attr_version; /* version number */
400 uint64_t dma_attr_addr_lo; /* low DMA address range */
401 uint64_t dma_attr_addr_hi; /* high DMA address range */
402 uint64_t dma_attr_count_max; /* DMA counter register */
403 uint64_t dma_attr_align; /* DMA address alignment */
404 uint_t dma_attr_burstsizes; /* DMA burstsizes */
405 uint32_t dma_attr_minxfer; /* min effective DMA size */
406 uint64_t dma_attr_maxxfer; /* max DMA xfer size */
407 uint64_t dma_attr_seg; /* segment boundary */
408 int dma_attr_sgllen; /* s/g length */
409 uint32_t dma_attr_granular; /* granularity of device */
410 uint_t dma_attr_flags; /* Bus specific DMA flags */
411 } ddi_dma_attr_t;
414 * Handy macro to set a maximum bit value (should be elsewhere)
416 * Clear off all bits lower then 'mybit' in val; if there are no
417 * bits higher than or equal to mybit in val then set mybit. Assumes
418 * mybit equals some power of 2 and is not zero.
420 #define maxbit(val, mybit) \
421 ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
424 * Handy macro to set a minimum bit value (should be elsewhere)
426 * Clear off all bits higher then 'mybit' in val; if there are no
427 * bits lower than or equal to mybit in val then set mybit. Assumes
428 * mybit equals some pow2 and is not zero.
430 #define minbit(val, mybit) \
431 (((val)&((mybit)|((mybit)-1))) | \
432 ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
435 * Structure of a request to map an object for DMA.
437 typedef struct ddi_dma_req {
439 * Caller's DMA engine constraints.
441 * If there are no particular constraints to the caller's DMA
442 * engine, this field may be set to NULL. The implementation DMA
443 * setup functions will then select a set of standard beginning
444 * constraints.
446 * In either case, as the mapping proceeds, the initial DMA
447 * constraints may become more restrictive as each intervening
448 * nexus might add further restrictions.
450 ddi_dma_lim_t *dmar_limits;
453 * Contains the information passed to the DMA mapping allocation
454 * routine(s).
456 uint_t dmar_flags;
459 * Callback function. A caller of the DMA mapping functions must
460 * specify by filling in this field whether the allocation routines
461 * can sleep awaiting mapping resources, must *not* sleep awaiting
462 * resources, or may *not* sleep awaiting any resources and must
463 * call the function specified by dmar_fp with the the argument
464 * dmar_arg when resources might have become available at a future
465 * time.
467 int (*dmar_fp)();
469 caddr_t dmar_arg; /* Callback function argument */
472 * Description of the object to be mapped for DMA.
473 * Must be last in this structure in case that the
474 * union ddi_dma_obj_t changes in the future.
476 ddi_dma_obj_t dmar_object;
478 } ddi_dma_req_t;
481 * Defines for the DMA mapping allocation functions
483 * If a DMA callback funtion is set to anything other than the following
484 * defines then it is assumed that one wishes a callback and is providing
485 * a function address.
487 #ifdef __STDC__
488 #define DDI_DMA_DONTWAIT ((int (*)(caddr_t))0)
489 #define DDI_DMA_SLEEP ((int (*)(caddr_t))1)
490 #else
491 #define DDI_DMA_DONTWAIT ((int (*)())0)
492 #define DDI_DMA_SLEEP ((int (*)())1)
493 #endif
496 * Return values from callback functions.
498 #define DDI_DMA_CALLBACK_RUNOUT 0
499 #define DDI_DMA_CALLBACK_DONE 1
502 * Flag definitions for the allocation functions.
504 #define DDI_DMA_WRITE 0x0001 /* Direction memory --> IO */
505 #define DDI_DMA_READ 0x0002 /* Direction IO --> memory */
506 #define DDI_DMA_RDWR (DDI_DMA_READ | DDI_DMA_WRITE)
509 * If possible, establish a MMU redzone after the mapping (to protect
510 * against cheap DMA hardware that might get out of control).
512 #define DDI_DMA_REDZONE 0x0004
515 * A partial allocation is allowed. That is, if the size of the object
516 * exceeds the mapping resources available, only map a portion of the
517 * object and return status indicating that this took place. The caller
518 * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
519 * change, at a later point, the actual mapped portion of the object.
521 * The mapped portion begins at offset 0 of the object.
524 #define DDI_DMA_PARTIAL 0x0008
527 * Map the object for byte consistent access. Note that explicit
528 * synchronization (via ddi_dma_sync(9F)) will still be required.
529 * Consider this flag to be a hint to the mapping routines as to
530 * the intended use of the mapping.
532 * Normal data transfers can be usually consider to use 'streaming'
533 * modes of operations. They start at a specific point, transfer a
534 * fairly large amount of data sequentially, and then stop (usually
535 * on a well aligned boundary).
537 * Control mode data transfers (for memory resident device control blocks,
538 * e.g., ethernet message descriptors) do not access memory in such
539 * a streaming sequential fashion. Instead, they tend to modify a few
540 * words or bytes, move around and maybe modify a few more.
542 * There are many machine implementations that make this difficult to
543 * control in a generic and seamless fashion. Therefore, explicit synch-
544 * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
545 * ask for a byte-consistent mapping) in order to make the view of the
546 * memory object shared between a CPU and a DMA master in consistent.
547 * However, judicious use of this flag can give sufficient hints to
548 * the mapping routines to attempt to pick the most efficacious mapping
549 * such that the synchronization steps are as efficient as possible.
552 #define DDI_DMA_CONSISTENT 0x0010
555 * Some DMA mappings have to be 'exclusive' access.
557 #define DDI_DMA_EXCLUSIVE 0x0020
560 * Sequential, unidirectional, block-sized and block aligned transfers
562 #define DDI_DMA_STREAMING 0x0040
565 * Support for 64-bit SBus devices
567 #define DDI_DMA_SBUS_64BIT 0x2000
570 * Return values from the mapping allocation functions.
574 * succeeded in satisfying request
576 #define DDI_DMA_MAPPED 0
579 * Mapping is legitimate (for advisory calls).
581 #define DDI_DMA_MAPOK 0
584 * Succeeded in mapping a portion of the request.
586 #define DDI_DMA_PARTIAL_MAP 1
589 * indicates end of window/segment list
591 #define DDI_DMA_DONE 2
594 * No resources to map request.
596 #define DDI_DMA_NORESOURCES -1
599 * Can't establish a mapping to the specified object
600 * (no specific reason).
602 #define DDI_DMA_NOMAPPING -2
605 * The request is too big to be mapped.
607 #define DDI_DMA_TOOBIG -3
610 * The request is too small to be mapped.
612 #define DDI_DMA_TOOSMALL -4
615 * The request cannot be mapped because the object
616 * is locked against mapping by another DMA master.
618 #define DDI_DMA_LOCKED -5
621 * The request cannot be mapped because the limits
622 * structure has bogus values.
624 #define DDI_DMA_BADLIMITS -6
627 * the segment/window pointer is stale
629 #define DDI_DMA_STALE -7
632 * The system can't allocate DMA resources using
633 * the given DMA attributes
635 #define DDI_DMA_BADATTR -8
638 * A DMA handle is already used for a DMA
640 #define DDI_DMA_INUSE -9
643 * In order for the access to a memory object to be consistent
644 * between a device and a CPU, the function ddi_dma_sync(9F)
645 * must be called upon the DMA handle. The following flags
646 * define whose view of the object should be made consistent.
647 * There are different flags here because on different machines
648 * there are definite performance implications of how long
649 * such synchronization takes.
651 * DDI_DMA_SYNC_FORDEV makes all device references to the object
652 * mapped by the DMA handle up to date. It should be used by a
653 * driver after a cpu modifies the memory object (over the range
654 * specified by the other arguments to the ddi_dma_sync(9F) call).
656 * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
657 * mapped by the DMA handle up to date. It should be used
658 * by a driver after the receipt of data from the device to
659 * the memory object is done (over the range specified by
660 * the other arguments to the ddi_dma_sync(9F) call).
662 * If the only mapping that concerns the driver is one for the
663 * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
664 * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
665 * system that if it can synchronize the kernel's view faster
666 * that the CPU's view, it can do so, otherwise it acts the
667 * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
668 * speed up the synchronization of kernel mappings in case of
669 * non IO-coherent CPU caches.
671 #define DDI_DMA_SYNC_FORDEV 0x0
672 #define DDI_DMA_SYNC_FORCPU 0x1
673 #define DDI_DMA_SYNC_FORKERNEL 0x2
676 * Bus nexus control functions for DMA
680 * Control operations, defined here so that devops.h can be included
681 * by drivers without having to include a specific SYSDDI implementation
682 * header file.
685 enum ddi_dma_ctlops {
686 DDI_DMA_FREE, /* free reference to object */
687 DDI_DMA_SYNC, /* synchronize cache references */
688 DDI_DMA_HTOC, /* return DMA cookie for handle */
689 DDI_DMA_KVADDR, /* return kernel virtual address */
690 DDI_DMA_MOVWIN, /* change mapped DMA window on object */
691 DDI_DMA_REPWIN, /* report current window on DMA object */
692 DDI_DMA_GETERR, /* report any post-transfer DMA errors */
693 DDI_DMA_COFF, /* convert a DMA cookie to an offset */
694 DDI_DMA_NEXTWIN, /* get next window within object */
695 DDI_DMA_NEXTSEG, /* get next segment within window */
696 DDI_DMA_SEGTOC, /* return segment DMA cookie */
697 DDI_DMA_RESERVE, /* reserve some DVMA range */
698 DDI_DMA_RELEASE, /* free preallocated DVMA range */
699 DDI_DMA_RESETH, /* reset next cookie ptr in handle */
700 DDI_DMA_CKSYNC, /* sync intermediate buffer to cookies */
701 DDI_DMA_IOPB_ALLOC, /* get contiguous DMA-able memory */
702 DDI_DMA_IOPB_FREE, /* return contiguous DMA-able memory */
703 DDI_DMA_SMEM_ALLOC, /* get contiguous DMA-able memory */
704 DDI_DMA_SMEM_FREE, /* return contiguous DMA-able memory */
705 DDI_DMA_SET_SBUS64, /* 64 bit SBus support */
706 DDI_DMA_REMAP, /* remap DMA buffers after relocation */
709 * control ops for DMA engine on motherboard
711 DDI_DMA_E_ACQUIRE, /* get channel for exclusive use */
712 DDI_DMA_E_FREE, /* release channel */
713 DDI_DMA_E_1STPTY, /* setup channel for 1st party DMA */
714 DDI_DMA_E_GETCB, /* get control block for DMA engine */
715 DDI_DMA_E_FREECB, /* free control blk for DMA engine */
716 DDI_DMA_E_PROG, /* program channel of DMA engine */
717 DDI_DMA_E_SWSETUP, /* setup channel for software control */
718 DDI_DMA_E_SWSTART, /* software operation of DMA channel */
719 DDI_DMA_E_ENABLE, /* enable channel of DMA engine */
720 DDI_DMA_E_STOP, /* stop a channel of DMA engine */
721 DDI_DMA_E_DISABLE, /* disable channel of DMA engine */
722 DDI_DMA_E_GETCNT, /* get remaining xfer count */
723 DDI_DMA_E_GETLIM, /* get DMA engine limits */
724 DDI_DMA_E_GETATTR /* get DMA engine attributes */
728 * Cache attribute flags:
730 * IOMEM_DATA_CACHED
731 * The CPU can cache the data it fetches and push it to memory at a later
732 * time. This is the default attribute and used if no cache attributes is
733 * specified.
735 * IOMEM_DATA_UC_WR_COMBINE
736 * The CPU never caches the data but writes may occur out of order or be
737 * combined. It implies re-ordering.
739 * IOMEM_DATA_UNCACHED
740 * The CPU never caches the data and has uncacheable access to memory.
741 * It also implies strict ordering.
743 * The cache attributes are mutually exclusive, and any combination of the
744 * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
745 * is meaningful, but others lead to a failure.
747 #define IOMEM_DATA_CACHED 0x10000 /* data is cached */
748 #define IOMEM_DATA_UC_WR_COMBINE 0x20000 /* data is not cached, but */
749 /* writes might be combined */
750 #define IOMEM_DATA_UNCACHED 0x40000 /* data is not cached. */
751 #define IOMEM_DATA_MASK 0xF0000 /* cache attrs mask */
754 * Check if either uncacheable or write-combining specified. (those flags are
755 * mutually exclusive) This macro is used to override hat attributes if either
756 * one is set.
758 #define OVERRIDE_CACHE_ATTR(attr) \
759 (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
762 * Get the cache attribute from flags. If there is no attributes,
763 * return IOMEM_DATA_CACHED (default attribute).
765 #define IOMEM_CACHE_ATTR(flags) \
766 ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
767 IOMEM_DATA_CACHED)
769 #ifdef __cplusplus
771 #endif
773 #endif /* _SYS_DDIDMAREQ_H */