4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
35 #include <sys/sunddi.h>
36 #include <sys/modctl.h>
37 #include <sys/sysmacros.h>
38 #include <sys/atomic.h>
41 #include <sys/dcopy.h>
42 #include <sys/dcopy_device.h>
45 /* Number of entries per channel to allocate */
46 uint_t dcopy_channel_size
= 1024;
49 typedef struct dcopy_list_s
{
52 uint_t dl_cnt
; /* num entries on list */
55 /* device state for register/unregister */
56 struct dcopy_device_s
{
57 /* DMA device drivers private pointer */
58 void *dc_device_private
;
60 /* to track list of channels from this DMA device */
61 dcopy_list_t dc_devchan_list
;
62 list_node_t dc_device_list_node
;
65 * dc_removing_cnt track how many channels still have to be freed up
66 * before it's safe to allow the DMA device driver to detach.
68 uint_t dc_removing_cnt
;
69 dcopy_device_cb_t
*dc_cb
;
71 dcopy_device_info_t dc_info
;
75 typedef struct dcopy_stats_s
{
76 kstat_named_t cs_bytes_xfer
;
77 kstat_named_t cs_cmd_alloc
;
78 kstat_named_t cs_cmd_post
;
79 kstat_named_t cs_cmd_poll
;
80 kstat_named_t cs_notify_poll
;
81 kstat_named_t cs_notify_pending
;
83 kstat_named_t cs_capabilities
;
86 /* DMA channel state */
87 struct dcopy_channel_s
{
88 /* DMA driver channel private pointer */
89 void *ch_channel_private
;
91 /* shortcut to device callbacks */
92 dcopy_device_cb_t
*ch_cb
;
95 * number of outstanding allocs for this channel. used to track when
96 * it's safe to free up this channel so the DMA device driver can
101 /* state for if channel needs to be removed when ch_ref_cnt gets to 0 */
102 boolean_t ch_removing
;
104 list_node_t ch_devchan_list_node
;
105 list_node_t ch_globalchan_list_node
;
108 * per channel list of commands actively blocking waiting for
111 dcopy_list_t ch_poll_list
;
113 /* pointer back to our device */
114 struct dcopy_device_s
*ch_device
;
116 dcopy_query_channel_t ch_info
;
119 dcopy_stats_t ch_stat
;
123 * If grabbing both device_list mutex & globalchan_list mutex,
124 * Always grab globalchan_list mutex before device_list mutex
126 typedef struct dcopy_state_s
{
127 dcopy_list_t d_device_list
;
128 dcopy_list_t d_globalchan_list
;
130 dcopy_state_t
*dcopy_statep
;
133 /* Module Driver Info */
134 static struct modlmisc dcopy_modlmisc
= {
136 "dcopy kernel module"
140 static struct modlinkage dcopy_modlinkage
= {
146 static int dcopy_init();
147 static void dcopy_fini();
149 static int dcopy_list_init(dcopy_list_t
*list
, size_t node_size
,
150 offset_t link_offset
);
151 static void dcopy_list_fini(dcopy_list_t
*list
);
152 static void dcopy_list_push(dcopy_list_t
*list
, void *list_node
);
153 static void *dcopy_list_pop(dcopy_list_t
*list
);
155 static void dcopy_device_cleanup(dcopy_device_handle_t device
,
156 boolean_t do_callback
);
158 static int dcopy_stats_init(dcopy_handle_t channel
);
159 static void dcopy_stats_fini(dcopy_handle_t channel
);
175 return (mod_install(&dcopy_modlinkage
));
183 _info(struct modinfo
*modinfop
)
185 return (mod_info(&dcopy_modlinkage
, modinfop
));
197 e
= mod_remove(&dcopy_modlinkage
);
216 dcopy_statep
= kmem_zalloc(sizeof (*dcopy_statep
), KM_SLEEP
);
218 /* Initialize the list we use to track device register/unregister */
219 e
= dcopy_list_init(&dcopy_statep
->d_device_list
,
220 sizeof (struct dcopy_device_s
),
221 offsetof(struct dcopy_device_s
, dc_device_list_node
));
222 if (e
!= DCOPY_SUCCESS
) {
223 goto dcopyinitfail_device
;
226 /* Initialize the list we use to track all DMA channels */
227 e
= dcopy_list_init(&dcopy_statep
->d_globalchan_list
,
228 sizeof (struct dcopy_channel_s
),
229 offsetof(struct dcopy_channel_s
, ch_globalchan_list_node
));
230 if (e
!= DCOPY_SUCCESS
) {
231 goto dcopyinitfail_global
;
237 dcopy_list_fini(&dcopy_statep
->d_globalchan_list
);
238 dcopyinitfail_global
:
239 dcopy_list_fini(&dcopy_statep
->d_device_list
);
240 dcopyinitfail_device
:
241 kmem_free(dcopy_statep
, sizeof (*dcopy_statep
));
254 * if mod_remove was successfull, we shouldn't have any
255 * devices/channels to worry about.
257 ASSERT(list_head(&dcopy_statep
->d_globalchan_list
.dl_list
) == NULL
);
258 ASSERT(list_head(&dcopy_statep
->d_device_list
.dl_list
) == NULL
);
260 dcopy_list_fini(&dcopy_statep
->d_globalchan_list
);
261 dcopy_list_fini(&dcopy_statep
->d_device_list
);
262 kmem_free(dcopy_statep
, sizeof (*dcopy_statep
));
266 /* *** EXTERNAL INTERFACE *** */
271 dcopy_query(dcopy_query_t
*query
)
273 query
->dq_version
= DCOPY_QUERY_V0
;
274 query
->dq_num_channels
= dcopy_statep
->d_globalchan_list
.dl_cnt
;
283 dcopy_alloc(int flags
, dcopy_handle_t
*handle
)
285 dcopy_handle_t channel
;
290 * we don't use the dcopy_list_* code here because we need to due
291 * some non-standard stuff.
294 list
= &dcopy_statep
->d_globalchan_list
;
297 * if nothing is on the channel list, return DCOPY_NORESOURCES. This
298 * can happen if there aren't any DMA device registered.
300 mutex_enter(&list
->dl_mutex
);
301 channel
= list_head(&list
->dl_list
);
302 if (channel
== NULL
) {
303 mutex_exit(&list
->dl_mutex
);
304 return (DCOPY_NORESOURCES
);
308 * increment the reference count, and pop the channel off the head and
309 * push it on the tail. This ensures we rotate through the channels.
310 * DMA channels are shared.
312 channel
->ch_ref_cnt
++;
313 list_remove(&list
->dl_list
, channel
);
314 list_insert_tail(&list
->dl_list
, channel
);
315 mutex_exit(&list
->dl_mutex
);
317 *handle
= (dcopy_handle_t
)channel
;
318 return (DCOPY_SUCCESS
);
326 dcopy_free(dcopy_handle_t
*channel
)
328 dcopy_device_handle_t device
;
330 boolean_t cleanup
= B_FALSE
;
333 ASSERT(*channel
!= NULL
);
336 * we don't need to add the channel back to the list since we never
337 * removed it. decrement the reference count.
339 list
= &dcopy_statep
->d_globalchan_list
;
340 mutex_enter(&list
->dl_mutex
);
341 (*channel
)->ch_ref_cnt
--;
344 * if we need to remove this channel, and the reference count is down
345 * to 0, decrement the number of channels which still need to be
346 * removed on the device.
348 if ((*channel
)->ch_removing
&& ((*channel
)->ch_ref_cnt
== 0)) {
349 device
= (*channel
)->ch_device
;
350 mutex_enter(&device
->dc_devchan_list
.dl_mutex
);
351 device
->dc_removing_cnt
--;
352 if (device
->dc_removing_cnt
== 0) {
355 mutex_exit(&device
->dc_devchan_list
.dl_mutex
);
357 mutex_exit(&list
->dl_mutex
);
360 * if there are no channels which still need to be removed, cleanup the
361 * device state and call back into the DMA device driver to tell them
362 * the device is free.
365 dcopy_device_cleanup(device
, B_TRUE
);
373 * dcopy_query_channel()
376 dcopy_query_channel(dcopy_handle_t channel
, dcopy_query_channel_t
*query
)
378 *query
= channel
->ch_info
;
386 dcopy_cmd_alloc(dcopy_handle_t handle
, int flags
, dcopy_cmd_t
*cmd
)
388 dcopy_handle_t channel
;
389 dcopy_cmd_priv_t priv
;
395 atomic_inc_64(&channel
->ch_stat
.cs_cmd_alloc
.value
.ui64
);
396 e
= channel
->ch_cb
->cb_cmd_alloc(channel
->ch_channel_private
, flags
,
398 if (e
== DCOPY_SUCCESS
) {
399 priv
= (*cmd
)->dp_private
;
400 priv
->pr_channel
= channel
;
402 * we won't initialize the blocking state until we actually
405 priv
->pr_block_init
= B_FALSE
;
416 dcopy_cmd_free(dcopy_cmd_t
*cmd
)
418 dcopy_handle_t channel
;
419 dcopy_cmd_priv_t priv
;
422 ASSERT(*cmd
!= NULL
);
424 priv
= (*cmd
)->dp_private
;
425 channel
= priv
->pr_channel
;
427 /* if we initialized the blocking state, clean it up too */
428 if (priv
->pr_block_init
) {
429 cv_destroy(&priv
->pr_cv
);
430 mutex_destroy(&priv
->pr_mutex
);
433 channel
->ch_cb
->cb_cmd_free(channel
->ch_channel_private
, cmd
);
441 dcopy_cmd_post(dcopy_cmd_t cmd
)
443 dcopy_handle_t channel
;
447 channel
= cmd
->dp_private
->pr_channel
;
449 atomic_inc_64(&channel
->ch_stat
.cs_cmd_post
.value
.ui64
);
450 if (cmd
->dp_cmd
== DCOPY_CMD_COPY
) {
451 atomic_add_64(&channel
->ch_stat
.cs_bytes_xfer
.value
.ui64
,
452 cmd
->dp
.copy
.cc_size
);
454 e
= channel
->ch_cb
->cb_cmd_post(channel
->ch_channel_private
, cmd
);
455 if (e
!= DCOPY_SUCCESS
) {
459 return (DCOPY_SUCCESS
);
467 dcopy_cmd_poll(dcopy_cmd_t cmd
, int flags
)
469 dcopy_handle_t channel
;
470 dcopy_cmd_priv_t priv
;
474 priv
= cmd
->dp_private
;
475 channel
= priv
->pr_channel
;
478 * if the caller is trying to block, they needed to post the
479 * command with DCOPY_CMD_INTR set.
481 if ((flags
& DCOPY_POLL_BLOCK
) && !(cmd
->dp_flags
& DCOPY_CMD_INTR
)) {
482 return (DCOPY_FAILURE
);
485 atomic_inc_64(&channel
->ch_stat
.cs_cmd_poll
.value
.ui64
);
488 e
= channel
->ch_cb
->cb_cmd_poll(channel
->ch_channel_private
, cmd
);
489 if (e
== DCOPY_PENDING
) {
491 * if the command is still active, and the blocking flag
494 if (flags
& DCOPY_POLL_BLOCK
) {
497 * if we haven't initialized the state, do it now. A
498 * command can be re-used, so it's possible it's
499 * already been initialized.
501 if (!priv
->pr_block_init
) {
502 priv
->pr_block_init
= B_TRUE
;
503 mutex_init(&priv
->pr_mutex
, NULL
, MUTEX_DRIVER
,
505 cv_init(&priv
->pr_cv
, NULL
, CV_DRIVER
, NULL
);
509 /* push it on the list for blocking commands */
510 priv
->pr_wait
= B_TRUE
;
511 dcopy_list_push(&channel
->ch_poll_list
, priv
);
513 mutex_enter(&priv
->pr_mutex
);
515 * it's possible we already cleared pr_wait before we
519 cv_wait(&priv
->pr_cv
, &priv
->pr_mutex
);
521 mutex_exit(&priv
->pr_mutex
);
524 * the command has completed, go back and poll so we
534 /* *** END OF EXTERNAL INTERFACE *** */
540 dcopy_list_init(dcopy_list_t
*list
, size_t node_size
, offset_t link_offset
)
542 mutex_init(&list
->dl_mutex
, NULL
, MUTEX_DRIVER
, NULL
);
543 list_create(&list
->dl_list
, node_size
, link_offset
);
546 return (DCOPY_SUCCESS
);
554 dcopy_list_fini(dcopy_list_t
*list
)
556 list_destroy(&list
->dl_list
);
557 mutex_destroy(&list
->dl_mutex
);
565 dcopy_list_push(dcopy_list_t
*list
, void *list_node
)
567 mutex_enter(&list
->dl_mutex
);
568 list_insert_tail(&list
->dl_list
, list_node
);
570 mutex_exit(&list
->dl_mutex
);
578 dcopy_list_pop(dcopy_list_t
*list
)
580 list_node_t
*list_node
;
582 mutex_enter(&list
->dl_mutex
);
583 list_node
= list_head(&list
->dl_list
);
584 if (list_node
== NULL
) {
585 mutex_exit(&list
->dl_mutex
);
589 list_remove(&list
->dl_list
, list_node
);
590 mutex_exit(&list
->dl_mutex
);
596 /* *** DEVICE INTERFACE *** */
598 * dcopy_device_register()
601 dcopy_device_register(void *device_private
, dcopy_device_info_t
*info
,
602 dcopy_device_handle_t
*handle
)
604 struct dcopy_channel_s
*channel
;
605 struct dcopy_device_s
*device
;
610 /* initialize the per device state */
611 device
= kmem_zalloc(sizeof (*device
), KM_SLEEP
);
612 device
->dc_device_private
= device_private
;
613 device
->dc_info
= *info
;
614 device
->dc_removing_cnt
= 0;
615 device
->dc_cb
= info
->di_cb
;
618 * we have a per device channel list so we can remove a device in the
621 e
= dcopy_list_init(&device
->dc_devchan_list
,
622 sizeof (struct dcopy_channel_s
),
623 offsetof(struct dcopy_channel_s
, ch_devchan_list_node
));
624 if (e
!= DCOPY_SUCCESS
) {
625 goto registerfail_devchan
;
629 * allocate state for each channel, allocate the channel, and then add
630 * the devices dma channels to the devices channel list.
632 for (i
= 0; i
< info
->di_num_dma
; i
++) {
633 channel
= kmem_zalloc(sizeof (*channel
), KM_SLEEP
);
634 channel
->ch_device
= device
;
635 channel
->ch_removing
= B_FALSE
;
636 channel
->ch_ref_cnt
= 0;
637 channel
->ch_cb
= info
->di_cb
;
639 e
= info
->di_cb
->cb_channel_alloc(device_private
, channel
,
640 DCOPY_SLEEP
, dcopy_channel_size
, &channel
->ch_info
,
641 &channel
->ch_channel_private
);
642 if (e
!= DCOPY_SUCCESS
) {
643 kmem_free(channel
, sizeof (*channel
));
644 goto registerfail_alloc
;
647 e
= dcopy_stats_init(channel
);
648 if (e
!= DCOPY_SUCCESS
) {
649 info
->di_cb
->cb_channel_free(
650 &channel
->ch_channel_private
);
651 kmem_free(channel
, sizeof (*channel
));
652 goto registerfail_alloc
;
655 e
= dcopy_list_init(&channel
->ch_poll_list
,
656 sizeof (struct dcopy_cmd_priv_s
),
657 offsetof(struct dcopy_cmd_priv_s
, pr_poll_list_node
));
658 if (e
!= DCOPY_SUCCESS
) {
659 dcopy_stats_fini(channel
);
660 info
->di_cb
->cb_channel_free(
661 &channel
->ch_channel_private
);
662 kmem_free(channel
, sizeof (*channel
));
663 goto registerfail_alloc
;
666 dcopy_list_push(&device
->dc_devchan_list
, channel
);
669 /* add the device to device list */
670 dcopy_list_push(&dcopy_statep
->d_device_list
, device
);
673 * add the device's dma channels to the global channel list (where
674 * dcopy_alloc's come from)
676 mutex_enter(&dcopy_statep
->d_globalchan_list
.dl_mutex
);
677 mutex_enter(&dcopy_statep
->d_device_list
.dl_mutex
);
678 channel
= list_head(&device
->dc_devchan_list
.dl_list
);
679 while (channel
!= NULL
) {
680 list_insert_tail(&dcopy_statep
->d_globalchan_list
.dl_list
,
682 dcopy_statep
->d_globalchan_list
.dl_cnt
++;
683 channel
= list_next(&device
->dc_devchan_list
.dl_list
, channel
);
685 mutex_exit(&dcopy_statep
->d_device_list
.dl_mutex
);
686 mutex_exit(&dcopy_statep
->d_globalchan_list
.dl_mutex
);
690 /* last call-back into kernel for dcopy KAPI enabled */
693 return (DCOPY_SUCCESS
);
696 channel
= list_head(&device
->dc_devchan_list
.dl_list
);
697 while (channel
!= NULL
) {
698 /* remove from the list */
699 channel
= dcopy_list_pop(&device
->dc_devchan_list
);
700 ASSERT(channel
!= NULL
);
702 dcopy_list_fini(&channel
->ch_poll_list
);
703 dcopy_stats_fini(channel
);
704 info
->di_cb
->cb_channel_free(&channel
->ch_channel_private
);
705 kmem_free(channel
, sizeof (*channel
));
708 dcopy_list_fini(&device
->dc_devchan_list
);
709 registerfail_devchan
:
710 kmem_free(device
, sizeof (*device
));
712 return (DCOPY_FAILURE
);
717 * dcopy_device_unregister()
721 dcopy_device_unregister(dcopy_device_handle_t
*handle
)
723 struct dcopy_channel_s
*channel
;
724 dcopy_device_handle_t device
;
725 boolean_t device_busy
;
727 /* first call-back into kernel for dcopy KAPI disable */
728 uioa_dcopy_disable();
731 device_busy
= B_FALSE
;
734 * remove the devices dma channels from the global channel list (where
735 * dcopy_alloc's come from)
737 mutex_enter(&dcopy_statep
->d_globalchan_list
.dl_mutex
);
738 mutex_enter(&device
->dc_devchan_list
.dl_mutex
);
739 channel
= list_head(&device
->dc_devchan_list
.dl_list
);
740 while (channel
!= NULL
) {
742 * if the channel has outstanding allocs, mark it as having
743 * to be removed and increment the number of channels which
744 * need to be removed in the device state too.
746 if (channel
->ch_ref_cnt
!= 0) {
747 channel
->ch_removing
= B_TRUE
;
748 device_busy
= B_TRUE
;
749 device
->dc_removing_cnt
++;
751 dcopy_statep
->d_globalchan_list
.dl_cnt
--;
752 list_remove(&dcopy_statep
->d_globalchan_list
.dl_list
, channel
);
753 channel
= list_next(&device
->dc_devchan_list
.dl_list
, channel
);
755 mutex_exit(&device
->dc_devchan_list
.dl_mutex
);
756 mutex_exit(&dcopy_statep
->d_globalchan_list
.dl_mutex
);
759 * if there are channels which still need to be removed, we will clean
760 * up the device state after they are freed up.
763 return (DCOPY_PENDING
);
766 dcopy_device_cleanup(device
, B_FALSE
);
769 return (DCOPY_SUCCESS
);
774 * dcopy_device_cleanup()
777 dcopy_device_cleanup(dcopy_device_handle_t device
, boolean_t do_callback
)
779 struct dcopy_channel_s
*channel
;
782 * remove all the channels in the device list, free them, and clean up
785 mutex_enter(&dcopy_statep
->d_device_list
.dl_mutex
);
786 channel
= list_head(&device
->dc_devchan_list
.dl_list
);
787 while (channel
!= NULL
) {
788 device
->dc_devchan_list
.dl_cnt
--;
789 list_remove(&device
->dc_devchan_list
.dl_list
, channel
);
790 dcopy_list_fini(&channel
->ch_poll_list
);
791 dcopy_stats_fini(channel
);
792 channel
->ch_cb
->cb_channel_free(&channel
->ch_channel_private
);
793 kmem_free(channel
, sizeof (*channel
));
794 channel
= list_head(&device
->dc_devchan_list
.dl_list
);
797 /* remove it from the list of devices */
798 list_remove(&dcopy_statep
->d_device_list
.dl_list
, device
);
800 mutex_exit(&dcopy_statep
->d_device_list
.dl_mutex
);
803 * notify the DMA device driver that the device is free to be
807 device
->dc_cb
->cb_unregister_complete(
808 device
->dc_device_private
, DCOPY_SUCCESS
);
811 dcopy_list_fini(&device
->dc_devchan_list
);
812 kmem_free(device
, sizeof (*device
));
817 * dcopy_device_channel_notify()
821 dcopy_device_channel_notify(dcopy_handle_t handle
, int status
)
823 struct dcopy_channel_s
*channel
;
824 dcopy_list_t
*poll_list
;
825 dcopy_cmd_priv_t priv
;
829 ASSERT(status
== DCOPY_COMPLETION
);
832 poll_list
= &channel
->ch_poll_list
;
835 * when we get a completion notification from the device, go through
836 * all of the commands blocking on this channel and see if they have
837 * completed. Remove the command and wake up the block thread if they
838 * have. Once we hit a command which is still pending, we are done
839 * polling since commands in a channel complete in order.
841 mutex_enter(&poll_list
->dl_mutex
);
842 if (poll_list
->dl_cnt
!= 0) {
843 priv
= list_head(&poll_list
->dl_list
);
844 while (priv
!= NULL
) {
845 atomic_inc_64(&channel
->
846 ch_stat
.cs_notify_poll
.value
.ui64
);
847 e
= channel
->ch_cb
->cb_cmd_poll(
848 channel
->ch_channel_private
,
850 if (e
== DCOPY_PENDING
) {
851 atomic_inc_64(&channel
->
852 ch_stat
.cs_notify_pending
.value
.ui64
);
857 list_remove(&poll_list
->dl_list
, priv
);
859 mutex_enter(&priv
->pr_mutex
);
860 priv
->pr_wait
= B_FALSE
;
861 cv_signal(&priv
->pr_cv
);
862 mutex_exit(&priv
->pr_mutex
);
864 priv
= list_head(&poll_list
->dl_list
);
868 mutex_exit(&poll_list
->dl_mutex
);
876 dcopy_stats_init(dcopy_handle_t channel
)
878 #define CHANSTRSIZE 20
879 char chanstr
[CHANSTRSIZE
];
880 dcopy_stats_t
*stats
;
885 stats
= &channel
->ch_stat
;
886 name
= (char *)ddi_driver_name(channel
->ch_device
->dc_info
.di_dip
);
887 instance
= ddi_get_instance(channel
->ch_device
->dc_info
.di_dip
);
889 (void) snprintf(chanstr
, CHANSTRSIZE
, "channel%d",
890 (uint32_t)channel
->ch_info
.qc_chan_num
);
892 channel
->ch_kstat
= kstat_create(name
, instance
, chanstr
, "misc",
893 KSTAT_TYPE_NAMED
, sizeof (dcopy_stats_t
) / sizeof (kstat_named_t
),
895 if (channel
->ch_kstat
== NULL
) {
896 return (DCOPY_FAILURE
);
898 channel
->ch_kstat
->ks_data
= stats
;
900 kstat_named_init(&stats
->cs_bytes_xfer
, "bytes_xfer",
902 kstat_named_init(&stats
->cs_cmd_alloc
, "cmd_alloc",
904 kstat_named_init(&stats
->cs_cmd_post
, "cmd_post",
906 kstat_named_init(&stats
->cs_cmd_poll
, "cmd_poll",
908 kstat_named_init(&stats
->cs_notify_poll
, "notify_poll",
910 kstat_named_init(&stats
->cs_notify_pending
, "notify_pending",
912 kstat_named_init(&stats
->cs_id
, "id",
914 kstat_named_init(&stats
->cs_capabilities
, "capabilities",
917 kstat_install(channel
->ch_kstat
);
919 channel
->ch_stat
.cs_id
.value
.ui64
= channel
->ch_info
.qc_id
;
920 channel
->ch_stat
.cs_capabilities
.value
.ui64
=
921 channel
->ch_info
.qc_capabilities
;
923 return (DCOPY_SUCCESS
);
931 dcopy_stats_fini(dcopy_handle_t channel
)
933 kstat_delete(channel
->ch_kstat
);
935 /* *** END OF DEVICE INTERFACE *** */