4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2014 Garrett D'Amore <garrett@damore.org>
24 * Copyright (c) 1990, 2010, Oracle and/or its affiliates. All rights reserved.
27 #ifndef _SYS_DDIDMAREQ_H
28 #define _SYS_DDIDMAREQ_H
37 * Definitions of structures that can describe
38 * an object that can be mapped for DMA.
42 * Structure describing a virtual address
45 caddr_t v_addr
; /* base virtual address */
46 struct as
*v_as
; /* pointer to address space */
47 void *v_priv
; /* priv data for shadow I/O */
51 * Structure describing a page-based address
55 * A pointer to a circularly linked list of page structures.
58 uint_t pp_offset
; /* offset within first page */
62 * Structure to describe a physical memory address.
65 ulong_t p_addr
; /* base physical address */
66 ulong_t p_memtype
; /* memory type */
70 * Structure to describe an array DVMA addresses.
71 * Under normal circumstances, dv_nseg will be 1.
72 * dvs_start is always page aligned.
84 * A union of all of the above structures.
86 * This union describes the relationship between
87 * the kind of an address description and an object.
90 struct v_address virt_obj
; /* Some virtual address */
91 struct pp_address pp_obj
; /* Some page-based address */
92 struct phy_address phys_obj
; /* Some physical address */
93 struct dvma_address dvma_obj
;
97 * DMA object types - used to select how the object
98 * being mapped is being addressed by the IU.
101 DMA_OTYP_VADDR
= 0, /* enforce starting value of zero */
109 * A compact package to describe an object that is to be mapped for DMA.
112 uint_t dmao_size
; /* size, in bytes, of the object */
113 ddi_dma_atyp_t dmao_type
; /* type of object */
114 ddi_dma_aobj_t dmao_obj
; /* the object described */
118 * DMA addressing limits.
120 * This structure describes the constraints that a particular device's
121 * DMA engine has to its parent so that the parent may correctly set
122 * things up for a DMA mapping. Each parent may in turn modify the
123 * constraints listed in a DMA request structure in order to describe
124 * to its parent any changed or additional constraints. The rules
125 * are that each parent may modify a constraint in order to further
126 * constrain things (e.g., picking a more limited address range than
127 * that permitted by the child), but that the parent may not ignore
128 * a child's constraints.
130 * A particular constraint that we do *not* address is whether or not
131 * a requested mapping is too large for a DMA engine's counter to
132 * correctly track. It is still up to each driver to explicitly handle
133 * transfers that are too large for its own hardware to deal with directly.
135 * The mapping routines that are cognizant of this structure will
136 * copy any user defined limits structure if they need to modify
137 * the fields (as alluded to above).
139 * A note as to how to define constraints:
141 * How you define the constraints for your device depends on how you
142 * define your device. For example, you may have an SBus card with a
143 * device on it that address only the bottom 16mb of virtual DMA space.
144 * However, if the card also has ancillary circuitry that pulls the high 8
145 * bits of address lines high, the more correct expression for your device
146 * is that it address [0xff000000..0xffffffff] rather than [0..0x00ffffff].
149 typedef struct ddi_dma_lim
{
152 * Low range of 32 bit addressing capability.
157 * Upper inclusive bound of addressing capability. It is an
158 * inclusive boundary limit to allow for the addressing range
159 * [0..0xffffffff] to be specified in preference to [0..0].
164 * Inclusive upper bound with which The DMA engine's counter acts as
167 * This handles the case where an upper portion of a DMA address
168 * register is a latch instead of being a full 32 bit register
169 * (e.g., the upper 8 bits may remain constant while the lower
170 * 24 bits are the real address register).
172 * This essentially gives a hint about segment limitations
173 * to the mapping routines.
175 uint_t dlim_cntr_max
;
180 * At the time of a mapping request, this tag defines the possible
181 * DMA burst cycle sizes that the requestor's DMA engine can
182 * emit. The format of the data is binary encoding of burst sizes
183 * assumed to be powers of two. That is, if a DMA engine is capable
184 * of doing 1, 2, 4 and 16 byte transfers, the encoding would be 0x17.
186 * As the mapping request is handled by intervening nexi, the
187 * burstsizes value may be modified. Prior to enabling DMA for
188 * the specific device, the driver that owns the DMA engine should
189 * check (via ddi_dma_burstsizes(9F)) what the allowed burstsizes
190 * have become and program their DMA engine appropriately.
192 uint_t dlim_burstsizes
;
195 * Minimum effective DMA transfer size, in units of bytes.
197 * This value specifies the minimum effective granularity of the
198 * DMA engine. It is distinct from dlim_burtsizes in that it
199 * describes the minimum amount of access a DMA transfer will
200 * effect. dlim_burtsizes describes in what electrical fashion
201 * the DMA engine might perform its accesses, while dlim_minxfer
202 * describes the minimum amount of memory that can be touched by
205 * As the mapping request is handled by intervening nexi, the
206 * dlim_minxfer value may be modifed contingent upon the presence
207 * (and use) of I/O caches and DMA write buffers in between the
208 * DMA engine and the object that DMA is being performed on.
214 * Expected average data rate for this DMA engine
215 * while transferring data.
217 * This is used as a hint for a number of operations that might
218 * want to know the possible optimal latency requirements of this
219 * device. A value of zero will be interpreted as a 'do not care'.
221 uint_t dlim_dmaspeed
;
228 * values for dlim_minxfer
231 #define DMA_UNIT_16 2
232 #define DMA_UNIT_32 4
237 #define DMALIM_VER0 ((0x86000000) + 0)
239 typedef struct ddi_dma_lim
{
242 * Low range of 32 bit addressing capability.
247 * Upper Inclusive bound of 32 bit addressing capability.
249 * The ISA nexus restricts this to 0x00ffffff, since this bus has
250 * only 24 address lines. This enforces the 16 Mb address limitation.
251 * The EISA nexus restricts this to 0xffffffff.
256 * DMA engine counter not used; set to 0
258 uint_t dlim_cntr_max
;
261 * DMA burst sizes not used; set to 1
263 uint_t dlim_burstsizes
;
266 * Minimum effective DMA transfer size.
268 * This value specifies the minimum effective granularity of the
269 * DMA engine. It is distinct from dlim_burstsizes in that it
270 * describes the minimum amount of access a DMA transfer will
271 * effect. dlim_burstsizes describes in what electrical fashion
272 * the DMA engine might perform its accesses, while dlim_minxfer
273 * describes the minimum amount of memory that can be touched by
276 * This value also implies the required address alignment.
277 * The number of bytes transferred is assumed to be
278 * dlim_minxfer * (DMA engine count)
280 * It should be set to DMA_UNIT_8, DMA_UNIT_16, or DMA_UNIT_32.
285 * Expected average data rate for this DMA engine
286 * while transferring data.
288 * This is used as a hint for a number of operations that might
289 * want to know the possible optimal latency requirements of this
290 * device. A value of zero will be interpreted as a 'do not care'.
292 uint_t dlim_dmaspeed
;
296 * Version number of this structure
298 uint_t dlim_version
; /* = 0x86 << 24 + 0 */
301 * Inclusive upper bound with which the DMA engine's Address acts as
303 * This handles the case where an upper portion of a DMA address
304 * register is a latch instead of being a full 32 bit register
305 * (e.g., the upper 16 bits remain constant while the lower 16 bits
306 * are incremented for each DMA transfer).
308 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
309 * since the ISA DMA engine has a 16-bit register for low address and
310 * an 8-bit latch for high address. This enforces the first 64 Kb
311 * limitation (address boundary).
312 * The EISA nexus restricts only 3rd-party DMA requests to 0xffffffff.
314 uint_t dlim_adreg_max
;
317 * Maximum transfer count that the DMA engine can handle.
319 * The ISA nexus restricts only 3rd-party DMA requests to 0x0000ffff,
320 * since the ISA DMA engine has a 16-bit register for counting.
321 * This enforces the other 64 Kb limitation (count size).
322 * The EISA nexus restricts only 3rd-party DMA requests to 0x00ffffff,
323 * since the EISA DMA engine has a 24-bit register for counting.
325 * This transfer count limitation is a per segment limitation.
326 * It can also be used to restrict the size of segments.
328 * This is used as a bit mask, so it must be a power of 2, minus 1.
330 uint_t dlim_ctreg_max
;
333 * Granularity of DMA transfer, in units of bytes.
335 * Breakup sizes must be multiples of this value.
336 * If no scatter/gather capabilty is specified, then the size of
337 * each DMA transfer must be a multiple of this value.
339 * If there is scatter/gather capability, then a single cookie cannot
340 * be smaller in size than the minimum xfer value, and may be less
341 * than the granularity value. The total transfer length of the
342 * scatter/gather list should be a multiple of the granularity value;
343 * use dlim_sgllen to specify the length of the scatter/gather list.
345 * This value should be equal to the sector size of the device.
347 uint_t dlim_granular
;
350 * Length of scatter/gather list
352 * This value specifies the number of segments or cookies that a DMA
353 * engine can consume in one i/o request to the device. For 3rd-party
354 * DMA that uses the bus nexus this should be set to 1. Devices with
355 * 1st-party DMA capability should specify the number of entries in
356 * its scatter/gather list. The breakup routine will ensure that each
357 * group of dlim_sgllen cookies (within a DMA window) will have a
358 * total transfer length that is a multiple of dlim_granular.
361 * = 0 : breakup is for PIO.
362 * = 1 : breakup is for DMA engine with no scatter/gather
364 * >= 2 : breakup is for DMA engine with scatter/gather
365 * capability; value is max number of entries in list.
367 * Note that this list length is not dependent on the DMA window
368 * size. The size of the DMA window is based on resources consumed,
369 * such as intermediate buffers. Several s/g lists may exist within
370 * a window. But the end of a window does imply the end of the s/g
376 * Size of device i/o request
378 * This value indicates the maximum number of bytes the device
379 * can transmit/receive for one i/o command. This limitation is
380 * significant ony if it is less than (dlim_ctreg_max * dlim_sgllen).
387 #error "struct ddi_dma_lim not defined for this architecture"
388 #endif /* defined(__sparc) */
391 * Flags definition for dma_attr_flags
395 * return physical DMA address on platforms
398 #define DDI_DMA_FORCE_PHYSICAL 0x0100
401 * An error will be flagged for DMA data path errors
403 #define DDI_DMA_FLAGERR 0x200
406 * Enable relaxed ordering
408 #define DDI_DMA_RELAXED_ORDERING 0x400
412 * Consolidation private x86 only flag which will cause a bounce buffer
413 * (paddr < dma_attr_seg) to be used if the buffer passed to the bind
414 * operation contains pages both above and below dma_attr_seg. If this flag
415 * is set, dma_attr_seg must be <= dma_attr_addr_hi.
417 #define _DDI_DMA_BOUNCE_ON_SEG 0x8000
419 #define DMA_ATTR_V0 0
420 #define DMA_ATTR_VERSION DMA_ATTR_V0
422 typedef struct ddi_dma_attr
{
423 uint_t dma_attr_version
; /* version number */
424 uint64_t dma_attr_addr_lo
; /* low DMA address range */
425 uint64_t dma_attr_addr_hi
; /* high DMA address range */
426 uint64_t dma_attr_count_max
; /* DMA counter register */
427 uint64_t dma_attr_align
; /* DMA address alignment */
428 uint_t dma_attr_burstsizes
; /* DMA burstsizes */
429 uint32_t dma_attr_minxfer
; /* min effective DMA size */
430 uint64_t dma_attr_maxxfer
; /* max DMA xfer size */
431 uint64_t dma_attr_seg
; /* segment boundary */
432 int dma_attr_sgllen
; /* s/g length */
433 uint32_t dma_attr_granular
; /* granularity of device */
434 uint_t dma_attr_flags
; /* Bus specific DMA flags */
438 * Handy macro to set a maximum bit value (should be elsewhere)
440 * Clear off all bits lower then 'mybit' in val; if there are no
441 * bits higher than or equal to mybit in val then set mybit. Assumes
442 * mybit equals some power of 2 and is not zero.
444 #define maxbit(val, mybit) \
445 ((val) & ~((mybit)-1)) | ((((val) & ~((mybit)-1)) == 0) ? (mybit) : 0)
448 * Handy macro to set a minimum bit value (should be elsewhere)
450 * Clear off all bits higher then 'mybit' in val; if there are no
451 * bits lower than or equal to mybit in val then set mybit. Assumes
452 * mybit equals some pow2 and is not zero.
454 #define minbit(val, mybit) \
455 (((val)&((mybit)|((mybit)-1))) | \
456 ((((val) & ((mybit)-1)) == 0) ? (mybit) : 0))
459 * Structure of a request to map an object for DMA.
461 typedef struct ddi_dma_req
{
463 * Caller's DMA engine constraints.
465 * If there are no particular constraints to the caller's DMA
466 * engine, this field may be set to NULL. The implementation DMA
467 * setup functions will then select a set of standard beginning
470 * In either case, as the mapping proceeds, the initial DMA
471 * constraints may become more restrictive as each intervening
472 * nexus might add further restrictions.
474 ddi_dma_lim_t
*dmar_limits
;
477 * Contains the information passed to the DMA mapping allocation
483 * Callback function. A caller of the DMA mapping functions must
484 * specify by filling in this field whether the allocation routines
485 * can sleep awaiting mapping resources, must *not* sleep awaiting
486 * resources, or may *not* sleep awaiting any resources and must
487 * call the function specified by dmar_fp with the the argument
488 * dmar_arg when resources might have become available at a future
493 caddr_t dmar_arg
; /* Callback function argument */
496 * Description of the object to be mapped for DMA.
497 * Must be last in this structure in case that the
498 * union ddi_dma_obj_t changes in the future.
500 ddi_dma_obj_t dmar_object
;
505 * Defines for the DMA mapping allocation functions
507 * If a DMA callback funtion is set to anything other than the following
508 * defines then it is assumed that one wishes a callback and is providing
509 * a function address.
511 #define DDI_DMA_DONTWAIT ((int (*)(caddr_t))0)
512 #define DDI_DMA_SLEEP ((int (*)(caddr_t))1)
515 * Return values from callback functions.
517 #define DDI_DMA_CALLBACK_RUNOUT 0
518 #define DDI_DMA_CALLBACK_DONE 1
521 * Flag definitions for the allocation functions.
523 #define DDI_DMA_WRITE 0x0001 /* Direction memory --> IO */
524 #define DDI_DMA_READ 0x0002 /* Direction IO --> memory */
525 #define DDI_DMA_RDWR (DDI_DMA_READ | DDI_DMA_WRITE)
528 * If possible, establish a MMU redzone after the mapping (to protect
529 * against cheap DMA hardware that might get out of control).
531 #define DDI_DMA_REDZONE 0x0004
534 * A partial allocation is allowed. That is, if the size of the object
535 * exceeds the mapping resources available, only map a portion of the
536 * object and return status indicating that this took place. The caller
537 * can use the functions ddi_dma_numwin(9F) and ddi_dma_getwin(9F) to
538 * change, at a later point, the actual mapped portion of the object.
540 * The mapped portion begins at offset 0 of the object.
543 #define DDI_DMA_PARTIAL 0x0008
546 * Map the object for byte consistent access. Note that explicit
547 * synchronization (via ddi_dma_sync(9F)) will still be required.
548 * Consider this flag to be a hint to the mapping routines as to
549 * the intended use of the mapping.
551 * Normal data transfers can be usually consider to use 'streaming'
552 * modes of operations. They start at a specific point, transfer a
553 * fairly large amount of data sequentially, and then stop (usually
554 * on a well aligned boundary).
556 * Control mode data transfers (for memory resident device control blocks,
557 * e.g., ethernet message descriptors) do not access memory in such
558 * a streaming sequential fashion. Instead, they tend to modify a few
559 * words or bytes, move around and maybe modify a few more.
561 * There are many machine implementations that make this difficult to
562 * control in a generic and seamless fashion. Therefore, explicit synch-
563 * ronization steps (via ddi_dma_sync(9F)) are still required (even if you
564 * ask for a byte-consistent mapping) in order to make the view of the
565 * memory object shared between a CPU and a DMA master in consistent.
566 * However, judicious use of this flag can give sufficient hints to
567 * the mapping routines to attempt to pick the most efficacious mapping
568 * such that the synchronization steps are as efficient as possible.
571 #define DDI_DMA_CONSISTENT 0x0010
574 * Some DMA mappings have to be 'exclusive' access.
576 #define DDI_DMA_EXCLUSIVE 0x0020
579 * Sequential, unidirectional, block-sized and block aligned transfers
581 #define DDI_DMA_STREAMING 0x0040
584 * Support for 64-bit SBus devices
586 #define DDI_DMA_SBUS_64BIT 0x2000
589 * Return values from the mapping allocation functions.
593 * succeeded in satisfying request
595 #define DDI_DMA_MAPPED 0
598 * Mapping is legitimate (for advisory calls).
600 #define DDI_DMA_MAPOK 0
603 * Succeeded in mapping a portion of the request.
605 #define DDI_DMA_PARTIAL_MAP 1
608 * indicates end of window/segment list
610 #define DDI_DMA_DONE 2
613 * No resources to map request.
615 #define DDI_DMA_NORESOURCES -1
618 * Can't establish a mapping to the specified object
619 * (no specific reason).
621 #define DDI_DMA_NOMAPPING -2
624 * The request is too big to be mapped.
626 #define DDI_DMA_TOOBIG -3
629 * The request is too small to be mapped.
631 #define DDI_DMA_TOOSMALL -4
634 * The request cannot be mapped because the object
635 * is locked against mapping by another DMA master.
637 #define DDI_DMA_LOCKED -5
640 * The request cannot be mapped because the limits
641 * structure has bogus values.
643 #define DDI_DMA_BADLIMITS -6
646 * the segment/window pointer is stale
648 #define DDI_DMA_STALE -7
651 * The system can't allocate DMA resources using
652 * the given DMA attributes
654 #define DDI_DMA_BADATTR -8
657 * A DMA handle is already used for a DMA
659 #define DDI_DMA_INUSE -9
663 * DVMA disabled or not supported. use physical DMA
665 #define DDI_DMA_USE_PHYSICAL -10
669 * In order for the access to a memory object to be consistent
670 * between a device and a CPU, the function ddi_dma_sync(9F)
671 * must be called upon the DMA handle. The following flags
672 * define whose view of the object should be made consistent.
673 * There are different flags here because on different machines
674 * there are definite performance implications of how long
675 * such synchronization takes.
677 * DDI_DMA_SYNC_FORDEV makes all device references to the object
678 * mapped by the DMA handle up to date. It should be used by a
679 * driver after a cpu modifies the memory object (over the range
680 * specified by the other arguments to the ddi_dma_sync(9F) call).
682 * DDI_DMA_SYNC_FORCPU makes all cpu references to the object
683 * mapped by the DMA handle up to date. It should be used
684 * by a driver after the receipt of data from the device to
685 * the memory object is done (over the range specified by
686 * the other arguments to the ddi_dma_sync(9F) call).
688 * If the only mapping that concerns the driver is one for the
689 * kernel (such as memory allocated by ddi_iopb_alloc(9F)), the
690 * flag DDI_DMA_SYNC_FORKERNEL can be used. This is a hint to the
691 * system that if it can synchronize the kernel's view faster
692 * that the CPU's view, it can do so, otherwise it acts the
693 * same as DDI_DMA_SYNC_FORCPU. DDI_DMA_SYNC_FORKERNEL might
694 * speed up the synchronization of kernel mappings in case of
695 * non IO-coherent CPU caches.
697 #define DDI_DMA_SYNC_FORDEV 0x0
698 #define DDI_DMA_SYNC_FORCPU 0x1
699 #define DDI_DMA_SYNC_FORKERNEL 0x2
702 * Bus nexus control functions for DMA
706 * Control operations, defined here so that devops.h can be included
707 * by drivers without having to include a specific SYSDDI implementation
711 enum ddi_dma_ctlops
{
712 DDI_DMA_FREE
, /* obsolete - do not use */
713 DDI_DMA_SYNC
, /* obsolete - do not use */
714 DDI_DMA_HTOC
, /* obsolete - do not use */
715 DDI_DMA_KVADDR
, /* obsolete - do not use */
716 DDI_DMA_MOVWIN
, /* obsolete - do not use */
717 DDI_DMA_REPWIN
, /* obsolete - do not use */
718 DDI_DMA_GETERR
, /* obsolete - do not use */
719 DDI_DMA_COFF
, /* obsolete - do not use */
720 DDI_DMA_NEXTWIN
, /* obsolete - do not use */
721 DDI_DMA_NEXTSEG
, /* obsolete - do not use */
722 DDI_DMA_SEGTOC
, /* obsolete - do not use */
723 DDI_DMA_RESERVE
, /* reserve some DVMA range */
724 DDI_DMA_RELEASE
, /* free preallocated DVMA range */
725 DDI_DMA_RESETH
, /* obsolete - do not use */
726 DDI_DMA_CKSYNC
, /* obsolete - do not use */
727 DDI_DMA_IOPB_ALLOC
, /* obsolete - do not use */
728 DDI_DMA_IOPB_FREE
, /* obsolete - do not use */
729 DDI_DMA_SMEM_ALLOC
, /* obsolete - do not use */
730 DDI_DMA_SMEM_FREE
, /* obsolete - do not use */
731 DDI_DMA_SET_SBUS64
, /* 64 bit SBus support */
732 DDI_DMA_REMAP
, /* remap DVMA buffers after relocation */
735 * control ops for DMA engine on motherboard
737 DDI_DMA_E_ACQUIRE
, /* get channel for exclusive use */
738 DDI_DMA_E_FREE
, /* release channel */
739 DDI_DMA_E_1STPTY
, /* setup channel for 1st party DMA */
740 DDI_DMA_E_GETCB
, /* get control block for DMA engine */
741 DDI_DMA_E_FREECB
, /* free control blk for DMA engine */
742 DDI_DMA_E_PROG
, /* program channel of DMA engine */
743 DDI_DMA_E_SWSETUP
, /* setup channel for software control */
744 DDI_DMA_E_SWSTART
, /* software operation of DMA channel */
745 DDI_DMA_E_ENABLE
, /* enable channel of DMA engine */
746 DDI_DMA_E_STOP
, /* stop a channel of DMA engine */
747 DDI_DMA_E_DISABLE
, /* disable channel of DMA engine */
748 DDI_DMA_E_GETCNT
, /* get remaining xfer count */
749 DDI_DMA_E_GETLIM
, /* obsolete - do not use */
750 DDI_DMA_E_GETATTR
/* get DMA engine attributes */
754 * Cache attribute flags:
757 * The CPU can cache the data it fetches and push it to memory at a later
758 * time. This is the default attribute and used if no cache attributes is
761 * IOMEM_DATA_UC_WR_COMBINE
762 * The CPU never caches the data but writes may occur out of order or be
763 * combined. It implies re-ordering.
765 * IOMEM_DATA_UNCACHED
766 * The CPU never caches the data and has uncacheable access to memory.
767 * It also implies strict ordering.
769 * The cache attributes are mutually exclusive, and any combination of the
770 * values leads to a failure. On the sparc architecture, only IOMEM_DATA_CACHED
771 * is meaningful, but others lead to a failure.
773 #define IOMEM_DATA_CACHED 0x10000 /* data is cached */
774 #define IOMEM_DATA_UC_WR_COMBINE 0x20000 /* data is not cached, but */
775 /* writes might be combined */
776 #define IOMEM_DATA_UNCACHED 0x40000 /* data is not cached. */
777 #define IOMEM_DATA_MASK 0xF0000 /* cache attrs mask */
780 * Check if either uncacheable or write-combining specified. (those flags are
781 * mutually exclusive) This macro is used to override hat attributes if either
784 #define OVERRIDE_CACHE_ATTR(attr) \
785 (attr & (IOMEM_DATA_UNCACHED | IOMEM_DATA_UC_WR_COMBINE))
788 * Get the cache attribute from flags. If there is no attributes,
789 * return IOMEM_DATA_CACHED (default attribute).
791 #define IOMEM_CACHE_ATTR(flags) \
792 ((flags & IOMEM_DATA_MASK) ? (flags & IOMEM_DATA_MASK) : \
799 #endif /* _SYS_DDIDMAREQ_H */