kernel: remove unused utsname_set_machine()
[unleashed.git] / usr / src / uts / sun4v / io / dr_io.c
blob7e6e0ef7c27991e0cd1c0d753d45590e95e9425f
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * sun4v VIO DR Module
31 #include <sys/modctl.h>
32 #include <sys/sunddi.h>
33 #include <sys/sunndi.h>
34 #include <sys/note.h>
35 #include <sys/sysevent/dr.h>
36 #include <sys/hypervisor_api.h>
37 #include <sys/mach_descrip.h>
38 #include <sys/mdesc.h>
39 #include <sys/mdesc_impl.h>
40 #include <sys/ds.h>
41 #include <sys/drctl.h>
42 #include <sys/dr_util.h>
43 #include <sys/dr_io.h>
44 #include <sys/promif.h>
45 #include <sys/machsystm.h>
46 #include <sys/ethernet.h>
47 #include <sys/hotplug/pci/pcicfg.h>
50 static struct modlmisc modlmisc = {
51 &mod_miscops,
52 "sun4v VIO DR"
55 static struct modlinkage modlinkage = {
56 MODREV_1,
57 (void *)&modlmisc,
58 NULL
63 * VIO DS Interface
67 * Global DS Handle
69 static ds_svc_hdl_t ds_vio_handle;
72 * Supported DS Capability Versions
74 static ds_ver_t dr_vio_vers[] = { { 1, 0 } };
75 #define DR_VIO_NVERS (sizeof (dr_vio_vers) / sizeof (dr_vio_vers[0]))
78 * DS Capability Description
80 static ds_capability_t dr_vio_cap = {
81 DR_VIO_DS_ID, /* svc_id */
82 dr_vio_vers, /* vers */
83 DR_VIO_NVERS /* nvers */
87 * DS Callbacks
89 static void dr_vio_reg_handler(ds_cb_arg_t, ds_ver_t *, ds_svc_hdl_t);
90 static void dr_vio_unreg_handler(ds_cb_arg_t arg);
91 static void dr_vio_data_handler(ds_cb_arg_t arg, void *buf, size_t buflen);
94 * DS Client Ops Vector
96 static ds_clnt_ops_t dr_vio_ops = {
97 dr_vio_reg_handler, /* ds_reg_cb */
98 dr_vio_unreg_handler, /* ds_unreg_cb */
99 dr_vio_data_handler, /* ds_data_cb */
100 NULL /* cb_arg */
104 typedef struct {
105 char *name;
106 uint64_t devid;
107 dev_info_t *dip;
108 } dr_search_arg_t;
110 static int
111 dr_io_check_node(dev_info_t *dip, void *arg)
113 char *name;
114 uint64_t devid;
115 dr_search_arg_t *sarg = (dr_search_arg_t *)arg;
117 name = ddi_node_name(dip);
119 if (strcmp(name, sarg->name) != 0)
120 return (DDI_WALK_CONTINUE);
122 devid = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
123 "reg", -1);
125 DR_DBG_IO("%s: found devid=%ld, looking for %ld\n",
126 __func__, devid, sarg->devid);
128 if (devid == sarg->devid) {
129 DR_DBG_IO("%s: matched", __func__);
131 /* matching node must be returned held */
132 if (!e_ddi_branch_held(dip))
133 e_ddi_branch_hold(dip);
135 sarg->dip = dip;
136 return (DDI_WALK_TERMINATE);
139 return (DDI_WALK_CONTINUE);
143 * Walk the device tree to find the dip corresponding to the devid
144 * passed in. If present, the dip is returned held. The caller must
145 * release the hold on the dip once it is no longer required. If no
146 * matching node if found, NULL is returned.
148 static dev_info_t *
149 dr_io_find_node(char *name, uint64_t devid)
151 dr_search_arg_t arg;
153 DR_DBG_IO("dr_io_find_node...\n");
155 arg.name = name;
156 arg.devid = devid;
157 arg.dip = NULL;
159 ddi_walk_devs(ddi_root_node(), dr_io_check_node, &arg);
161 ASSERT((arg.dip == NULL) || (e_ddi_branch_held(arg.dip)));
163 return ((arg.dip) ? arg.dip : NULL);
167 * Look up a particular IO node in the MD. Returns the mde_cookie_t
168 * representing that IO node if present, and MDE_INVAL_ELEM_COOKIE otherwise.
169 * It is assumed the scratch array has already been allocated so that
170 * it can accommodate the worst case scenario, every node in the MD.
172 static mde_cookie_t
173 dr_io_find_node_md(md_t *mdp, char *name, uint64_t id, mde_cookie_t *listp)
175 int i;
176 int nnodes;
177 char *devnm;
178 uint64_t devid;
179 mde_cookie_t rootnode;
180 mde_cookie_t result = MDE_INVAL_ELEM_COOKIE;
182 DR_DBG_IO("%s: %s@%ld\n", __func__, name, id);
184 rootnode = md_root_node(mdp);
185 ASSERT(rootnode != MDE_INVAL_ELEM_COOKIE);
188 * Scan the DAG for all candidate nodes.
190 nnodes = md_scan_dag(mdp, rootnode, md_find_name(mdp, "virtual-device"),
191 md_find_name(mdp, "fwd"), listp);
193 if (nnodes < 0) {
194 DR_DBG_IO("%s: scan for "
195 "'virtual-device' nodes failed\n", __func__);
196 return (result);
199 DR_DBG_IO("%s: found %d nodes in the MD\n", __func__, nnodes);
202 * Find the node of interest
204 for (i = 0; i < nnodes; i++) {
206 if (md_get_prop_str(mdp, listp[i], "name", &devnm)) {
207 DR_DBG_IO("%s: missing 'name' property for"
208 " IO node %d\n", __func__, i);
209 return (DDI_WALK_ERROR);
212 if (strcmp(devnm, name) != 0)
213 continue;
215 if (md_get_prop_val(mdp, listp[i], "cfg-handle", &devid)) {
216 DR_DBG_IO("%s: missing 'cfg-handle' property for"
217 " IO node %d\n", __func__, i);
218 break;
221 if (devid == id) {
222 /* found a match */
223 DR_DBG_IO("%s: found IO node %s@%ld "
224 "in MD\n", __func__, name, id);
225 result = listp[i];
226 break;
230 if (result == MDE_INVAL_ELEM_COOKIE)
231 DR_DBG_IO("%s: IO node %ld not in MD\n", __func__, id);
233 return (result);
236 typedef struct {
237 md_t *mdp;
238 mde_cookie_t node;
239 dev_info_t *dip;
240 } cb_arg_t;
242 #define STR_ARR_LEN 5
244 static int
245 new_dev_node(dev_info_t *new_node, void *arg, uint_t flags)
247 _NOTE(ARGUNUSED(flags))
249 cb_arg_t *cba;
250 char *devnm, *devtype;
251 char *compat;
252 uint64_t devid;
253 int len = 0;
254 char *curr;
255 int i = 0;
256 char *str_arr[STR_ARR_LEN];
258 cba = (cb_arg_t *)arg;
261 * Add 'name' property
263 if (md_get_prop_str(cba->mdp, cba->node, "name", &devnm)) {
264 DR_DBG_IO("%s: failed to read 'name' prop from MD\n", __func__);
265 return (DDI_WALK_ERROR);
267 DR_DBG_IO("%s: device name is %s\n", __func__, devnm);
269 if (ndi_prop_update_string(DDI_DEV_T_NONE, new_node,
270 "name", devnm) != DDI_SUCCESS) {
271 DR_DBG_IO("%s: failed to create 'name' prop\n", __func__);
272 return (DDI_WALK_ERROR);
276 * Add 'compatible' property
278 if (md_get_prop_data(cba->mdp, cba->node, "compatible",
279 (uint8_t **)&compat, &len)) {
280 DR_DBG_IO("%s: failed to read "
281 "'compatible' prop from MD\n", __func__);
282 return (DDI_WALK_ERROR);
285 /* parse the MD string array */
286 curr = compat;
287 while (curr < (compat + len)) {
289 DR_DBG_IO("%s: adding '%s' to "
290 "'compatible' prop\n", __func__, curr);
292 str_arr[i++] = curr;
293 curr += strlen(curr) + 1;
295 if (i == STR_ARR_LEN) {
296 DR_DBG_CPU("exceeded str_arr len (%d)\n", STR_ARR_LEN);
297 break;
302 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, new_node,
303 "compatible", str_arr, i) != DDI_SUCCESS) {
304 DR_DBG_IO("%s: cannot create 'compatible' prop\n", __func__);
305 return (DDI_WALK_ERROR);
309 * Add 'device_type' property
311 if (md_get_prop_str(cba->mdp, cba->node, "device-type", &devtype)) {
312 DR_DBG_IO("%s: failed to read "
313 "'device-type' prop from MD\n", __func__);
314 return (DDI_WALK_ERROR);
316 if (ndi_prop_update_string(DDI_DEV_T_NONE, new_node,
317 "device_type", devtype) != DDI_SUCCESS) {
318 DR_DBG_IO("%s: failed to create "
319 "'device-type' prop\n", __func__);
320 return (DDI_WALK_ERROR);
323 DR_DBG_IO("%s: device type is %s\n", __func__, devtype);
326 * Add 'reg' (cfg-handle) property
328 if (md_get_prop_val(cba->mdp, cba->node, "cfg-handle", &devid)) {
329 DR_DBG_IO("%s: failed to read "
330 "'cfg-handle' prop from MD\n", __func__);
331 return (DDI_WALK_ERROR);
334 DR_DBG_IO("%s: new device is %s@%ld\n", __func__, devnm, devid);
336 if (ndi_prop_update_int(DDI_DEV_T_NONE, new_node, "reg", devid)
337 != DDI_SUCCESS) {
338 DR_DBG_IO("%s: failed to create 'reg' prop\n", __func__);
339 return (DDI_WALK_ERROR);
342 /* if vnet/vswitch, probe and add mac-address and mtu properties */
343 if (strcmp(devnm, "vsw") == 0 || strcmp(devnm, "network") == 0) {
345 int i, j;
346 uint64_t mtu, macaddr;
347 uchar_t maddr_arr[ETHERADDRL];
349 if (md_get_prop_val(cba->mdp, cba->node, "local-mac-address",
350 &macaddr)) {
351 DR_DBG_IO("%s: failed to read "
352 "'local-mac-address' prop from MD\n", __func__);
353 return (DDI_WALK_ERROR);
356 for (i = 0, j = (ETHERADDRL - 1); i < ETHERADDRL; i++, j--)
357 maddr_arr[j] = (macaddr >> (i * 8)) & 0xff;
359 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, new_node,
360 "local-mac-address", maddr_arr, ETHERADDRL)
361 != DDI_SUCCESS) {
362 DR_DBG_IO("%s: failed to create "
363 "'local-mac-address' prop\n", __func__);
364 return (DDI_WALK_ERROR);
367 if (md_get_prop_val(cba->mdp, cba->node, "mtu", &mtu)) {
368 DR_DBG_IO("%s: failed to read "
369 "'mtu' prop from MD\n", __func__);
370 return (DDI_WALK_ERROR);
373 if (ndi_prop_update_int64(DDI_DEV_T_NONE, new_node, "mtu",
374 mtu) != DDI_SUCCESS) {
375 DR_DBG_IO("%s: failed to "
376 "create 'mtu' prop\n", __func__);
377 return (DDI_WALK_ERROR);
380 DR_DBG_IO("%s: Added properties for %s@%ld, "
381 "mac=%ld, mtu=%ld\n", __func__, devnm, devid, macaddr, mtu);
384 cba->dip = new_node;
386 return (DDI_WALK_TERMINATE);
390 * Find the parent node of the argument virtual device node in
391 * the MD. For virtual devices, the parent is always
392 * "channel-devices", so scan the MD using the "back" arcs
393 * looking for a node with that name.
395 static mde_cookie_t
396 dr_vio_find_parent_md(md_t *mdp, mde_cookie_t node)
398 int max_nodes;
399 int num_nodes;
400 int listsz;
401 mde_cookie_t *listp;
402 mde_cookie_t pnode = MDE_INVAL_ELEM_COOKIE;
404 max_nodes = md_node_count(mdp);
405 listsz = max_nodes * sizeof (mde_cookie_t);
406 listp = kmem_zalloc(listsz, KM_SLEEP);
407 DR_DBG_KMEM("%s: alloc addr %p size %d\n",
408 __func__, (void *)listp, listsz);
410 num_nodes = md_scan_dag(mdp, node,
411 md_find_name(mdp, "channel-devices"),
412 md_find_name(mdp, "back"), listp);
414 ASSERT(num_nodes == 1);
416 if (num_nodes == 1)
417 pnode = listp[0];
419 DR_DBG_KMEM("%s: free addr %p size %d\n",
420 __func__, (void *)listp, listsz);
421 kmem_free(listp, listsz);
423 return (pnode);
426 static int
427 dr_io_configure(dr_vio_req_t *req, dr_vio_res_t *res)
429 int rv = ENXIO;
430 int listsz;
431 int nnodes;
432 uint64_t devid = req->dev_id;
433 uint64_t pdevid;
434 char *name = req->name;
435 char *pname;
436 md_t *mdp = NULL;
437 mde_cookie_t *listp = NULL;
438 mde_cookie_t node;
439 mde_cookie_t pnode;
440 dev_info_t *pdip = NULL;
441 dev_info_t *dip;
442 devi_branch_t br;
443 cb_arg_t cba;
444 int drctl_cmd;
445 int drctl_flags = 0;
446 drctl_rsrc_t *drctl_req;
447 size_t drctl_req_len;
448 drctl_rsrc_t *drctl_rsrc = NULL;
449 drctl_cookie_t drctl_res_ck;
450 char *p;
451 drctl_resp_t *drctl_resp;
452 size_t drctl_resp_len = 0;
454 res->result = DR_VIO_RES_FAILURE;
456 if ((dip = dr_io_find_node(name, devid)) != NULL) {
457 DR_DBG_IO("%s: %s@%ld already configured\n",
458 __func__, name, devid);
460 /* Return success if resources is already there. */
461 res->result = DR_VIO_RES_OK;
462 res->status = DR_VIO_STAT_CONFIGURED;
463 e_ddi_branch_rele(dip);
464 return (0);
467 /* Assume we fail to find the node to be added. */
468 res->status = DR_VIO_STAT_NOT_PRESENT;
470 if ((mdp = md_get_handle()) == NULL) {
471 DR_DBG_IO("%s: unable to initialize MD\n", __func__);
472 return (ENXIO);
475 nnodes = md_node_count(mdp);
476 ASSERT(nnodes > 0);
478 listsz = nnodes * sizeof (mde_cookie_t);
479 listp = kmem_zalloc(listsz, KM_SLEEP);
480 DR_DBG_KMEM("%s: alloc addr %p size %d\n",
481 __func__, (void *)listp, listsz);
484 * Get the MD device node.
486 node = dr_io_find_node_md(mdp, name, devid, listp);
488 if (node == MDE_INVAL_ELEM_COOKIE) {
489 DR_DBG_IO("%s: scan for %s name node failed\n", __func__, name);
490 res->result = DR_VIO_RES_NOT_IN_MD;
491 goto done;
495 * Get the MD parent node.
497 pnode = dr_vio_find_parent_md(mdp, node);
498 if (pnode == MDE_INVAL_ELEM_COOKIE) {
499 DR_DBG_IO("%s: failed to find MD parent of %lx\n",
500 __func__, pnode);
501 goto done;
504 if (md_get_prop_str(mdp, pnode, "name", &pname)) {
505 DR_DBG_IO("%s: failed to read "
506 "'name' for pnode %lx from MD\n", __func__, pnode);
507 goto done;
510 if (md_get_prop_val(mdp, pnode, "cfg-handle", &pdevid)) {
511 DR_DBG_IO("%s: failed to read 'cfg-handle' "
512 "for pnode '%s' from MD\n", __func__, pname);
513 goto done;
516 DR_DBG_IO("%s: parent device %s@%lx\n", __func__, pname, pdevid);
519 * Get the devinfo parent node.
521 if ((pdip = dr_io_find_node(pname, pdevid)) == NULL) {
522 DR_DBG_IO("%s: parent device %s@%ld not found\n",
523 __func__, pname, pdevid);
524 goto done;
527 drctl_req_len = sizeof (drctl_rsrc_t) + MAXPATHLEN;
528 drctl_req = kmem_zalloc(drctl_req_len, KM_SLEEP);
529 DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
530 __func__, (void *)drctl_req, drctl_req_len);
531 drctl_req->status = DRCTL_STATUS_INIT;
533 drctl_cmd = DRCTL_IO_CONFIG_REQUEST;
536 * Construct the path of the device as it will be if it
537 * is successfully added.
539 p = drctl_req->res_dev_path;
540 (void) sprintf(p, "/devices");
541 (void) ddi_pathname(pdip, p + strlen(p));
542 (void) sprintf(p + strlen(p), "/%s@%ld", name, devid);
543 DR_DBG_IO("%s: devpath=%s\n", __func__, drctl_req->res_dev_path);
545 rv = drctl_config_init(drctl_cmd, drctl_flags, drctl_req,
546 1, &drctl_resp, &drctl_resp_len, &drctl_res_ck);
548 ASSERT((drctl_resp != NULL) && (drctl_resp_len != 0));
550 drctl_rsrc = drctl_resp->resp_resources;
552 if (rv != 0) {
553 DR_DBG_IO("%s: drctl_config_init failed: %d\n", __func__, rv);
555 ASSERT(drctl_resp->resp_type == DRCTL_RESP_ERR);
557 (void) strlcpy(res->reason,
558 drctl_resp->resp_err_msg, DR_VIO_MAXREASONLEN);
560 DR_DBG_IO("%s: %s\n", __func__, res->reason);
562 goto done;
566 ASSERT(drctl_resp->resp_type == DRCTL_RESP_OK);
568 if (drctl_rsrc->status == DRCTL_STATUS_DENY) {
570 res->result = DR_VIO_RES_BLOCKED;
572 DR_DBG_IO("%s: drctl_config_init denied\n", __func__);
573 p = (char *)drctl_rsrc + drctl_rsrc->offset;
575 (void) strlcpy(res->reason, p, DR_VIO_MAXREASONLEN);
577 DR_DBG_IO("%s: %s\n", __func__, res->reason);
579 drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
581 rv = EPERM;
582 } else {
583 cba.mdp = mdp;
584 cba.node = node;
586 br.arg = (void *)&cba;
587 br.type = DEVI_BRANCH_SID;
588 br.create.sid_branch_create = new_dev_node;
589 br.devi_branch_callback = NULL;
591 rv = e_ddi_branch_create(pdip,
592 &br, NULL, DEVI_BRANCH_CONFIGURE);
594 drctl_req->status = (rv == 0) ?
595 DRCTL_STATUS_CONFIG_SUCCESS : DRCTL_STATUS_CONFIG_FAILURE;
597 DR_DBG_IO("%s: %s@%ld = %d\n", __func__, name, devid, rv);
600 if (drctl_config_fini(&drctl_res_ck, drctl_req, 1) != 0)
601 DR_DBG_IO("%s: drctl_config_fini returned: %d\n", __func__, rv);
603 done:
604 if (listp) {
605 DR_DBG_KMEM("%s: free addr %p size %d\n",
606 __func__, (void *)listp, listsz);
607 kmem_free(listp, listsz);
610 if (mdp)
611 (void) md_fini_handle(mdp);
613 if (pdip)
614 e_ddi_branch_rele(pdip);
616 DR_DBG_KMEM("%s: free addr %p size %ld\n",
617 __func__, (void *)drctl_req, drctl_req_len);
618 kmem_free(drctl_req, drctl_req_len);
620 if (drctl_resp) {
621 DR_DBG_KMEM("%s: free addr %p size %ld\n",
622 __func__, (void *)drctl_resp, drctl_resp_len);
623 kmem_free(drctl_resp, drctl_resp_len);
626 if (rv == 0) {
627 res->result = DR_VIO_RES_OK;
628 res->status = DR_VIO_STAT_CONFIGURED;
630 /* notify interested parties about the operation */
631 dr_generate_event(DR_TYPE_VIO, SE_HINT_INSERT);
632 } else {
633 res->status = DR_VIO_STAT_UNCONFIGURED;
636 return (rv);
639 static int
640 dr_io_unconfigure(dr_vio_req_t *req, dr_vio_res_t *res)
642 int rv;
643 char *name = req->name;
644 char *p;
645 uint64_t devid = req->dev_id;
646 dev_info_t *dip;
647 dev_info_t *fdip = NULL;
648 int drctl_cmd;
649 int drctl_flags = 0;
650 drctl_rsrc_t *drctl_req;
651 size_t drctl_req_len;
652 drctl_rsrc_t *drctl_rsrc = NULL;
653 drctl_cookie_t drctl_res_ck;
654 drctl_resp_t *drctl_resp;
655 size_t drctl_resp_len;
657 if ((dip = dr_io_find_node(name, devid)) == NULL) {
658 DR_DBG_IO("%s: %s@%ld already unconfigured\n",
659 __func__, name, devid);
660 res->result = DR_VIO_RES_OK;
661 res->status = DR_VIO_STAT_NOT_PRESENT;
662 return (0);
665 res->result = DR_VIO_RES_FAILURE;
667 ASSERT(e_ddi_branch_held(dip));
669 /* Assume we fail to unconfigure the resource. */
670 res->status = DR_VIO_STAT_CONFIGURED;
672 drctl_req_len = sizeof (drctl_rsrc_t) + MAXPATHLEN;
673 drctl_req = kmem_zalloc(drctl_req_len, KM_SLEEP);
674 DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
675 __func__, (void *)drctl_req, drctl_req_len);
676 drctl_req->status = DRCTL_STATUS_INIT;
678 drctl_cmd = DRCTL_IO_UNCONFIG_REQUEST;
680 if (req->msg_type == DR_VIO_FORCE_UNCONFIG)
681 drctl_flags = DRCTL_FLAG_FORCE;
683 p = drctl_req->res_dev_path;
684 (void) sprintf(p, "/devices");
685 (void) ddi_pathname(dip, p + strlen(p));
686 DR_DBG_IO("%s: devpath=%s\n", __func__, drctl_req->res_dev_path);
688 rv = drctl_config_init(drctl_cmd, drctl_flags, drctl_req,
689 1, &drctl_resp, &drctl_resp_len, &drctl_res_ck);
691 ASSERT((drctl_resp != NULL) && (drctl_resp_len != 0));
693 drctl_rsrc = drctl_resp->resp_resources;
695 if (rv != 0) {
697 DR_DBG_IO("%s: drctl_config_init failed: %d\n", __func__, rv);
699 ASSERT(drctl_resp->resp_type == DRCTL_RESP_ERR);
701 (void) strlcpy(res->reason,
702 drctl_resp->resp_err_msg, DR_VIO_MAXREASONLEN);
704 DR_DBG_IO("%s: %s\n", __func__, res->reason);
706 goto done;
709 if (drctl_rsrc->status == DRCTL_STATUS_DENY) {
710 res->result = DR_VIO_RES_BLOCKED;
712 DR_DBG_IO("%s: drctl_config_init denied\n", __func__);
713 p = (char *)drctl_rsrc + drctl_rsrc->offset;
715 (void) strlcpy(res->reason, p, DR_VIO_MAXREASONLEN);
717 DR_DBG_IO("%s: %s\n", __func__, res->reason);
719 drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
721 rv = EPERM;
722 } else if (rv = e_ddi_branch_destroy(dip, &fdip, 0)) {
723 char *path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
725 DR_DBG_KMEM("%s: alloc addr %p size %d\n",
726 __func__, (void *)path, MAXPATHLEN);
728 * If non-NULL, fdip is held and must be released.
730 if (fdip != NULL) {
731 (void) ddi_pathname(fdip, path);
732 ddi_release_devi(fdip);
733 } else {
734 (void) ddi_pathname(dip, path);
737 DR_DBG_IO("%s: node removal failed: %s (%p)",
738 __func__, path, (fdip) ? (void *)fdip : (void *)dip);
740 drctl_req->status = DRCTL_STATUS_CONFIG_FAILURE;
742 DR_DBG_KMEM("%s: free addr %p size %d\n",
743 __func__, (void *)path, MAXPATHLEN);
744 kmem_free(path, MAXPATHLEN);
745 } else {
746 drctl_req->status = DRCTL_STATUS_CONFIG_SUCCESS;
749 if (drctl_config_fini(&drctl_res_ck, drctl_req, 1) != 0)
750 DR_DBG_IO("%s: drctl_config_fini returned: %d\n", __func__, rv);
752 DR_DBG_IO("%s: (%s@%ld) = %d\n", __func__, name, devid, rv);
754 if (rv == 0) {
755 res->result = DR_VIO_RES_OK;
756 res->status = DR_VIO_STAT_UNCONFIGURED;
758 /* Notify interested parties about the operation. */
759 dr_generate_event(DR_TYPE_VIO, SE_HINT_REMOVE);
761 done:
762 DR_DBG_KMEM("%s: free addr %p size %ld\n",
763 __func__, (void *)drctl_req, drctl_req_len);
764 kmem_free(drctl_req, drctl_req_len);
766 if (drctl_resp) {
767 DR_DBG_KMEM("%s: free addr %p size %ld\n",
768 __func__, (void *)drctl_resp, drctl_resp_len);
769 kmem_free(drctl_resp, drctl_resp_len);
772 return (rv);
775 static void
776 dr_vio_data_handler(ds_cb_arg_t arg, void *buf, size_t buflen)
778 _NOTE(ARGUNUSED(arg))
780 size_t res_len;
781 dr_vio_res_t *res;
782 dr_vio_req_t *req;
785 * Allocate a response buffer, because we always want to
786 * send back a response message.
788 res_len = sizeof (dr_vio_res_t) + DR_VIO_MAXREASONLEN;
789 res = kmem_zalloc(res_len, KM_SLEEP);
790 DR_DBG_KMEM("%s: alloc addr %p size %ld\n",
791 __func__, (void *)res, res_len);
792 res->result = DR_VIO_RES_FAILURE;
795 * Sanity check the message
797 if (buf == NULL) {
798 DR_DBG_IO("empty message: expected at least %ld bytes\n",
799 sizeof (dr_vio_req_t));
800 goto done;
802 if (buflen < sizeof (dr_vio_req_t)) {
803 DR_DBG_IO("incoming message short: expected at least %ld "
804 "bytes, received %ld\n", sizeof (dr_vio_req_t), buflen);
805 goto done;
808 DR_DBG_TRANS("incoming request:\n");
809 DR_DBG_DUMP_MSG(buf, buflen);
811 req = buf;
812 switch (req->msg_type) {
813 case DR_VIO_CONFIGURE:
814 (void) dr_io_configure(req, res);
815 break;
816 case DR_VIO_FORCE_UNCONFIG:
817 case DR_VIO_UNCONFIGURE:
818 (void) dr_io_unconfigure(req, res);
819 break;
820 default:
821 cmn_err(CE_NOTE, "bad msg_type %d\n", req->msg_type);
822 break;
824 done:
825 res->req_num = (req) ? req->req_num : 0;
827 DR_DBG_TRANS("outgoing response:\n");
828 DR_DBG_DUMP_MSG(res, res_len);
830 /* send back the response */
831 if (ds_cap_send(ds_vio_handle, res, res_len) != 0)
832 DR_DBG_IO("ds_send failed\n");
834 if (res) {
835 DR_DBG_KMEM("%s: free addr %p size %ld\n",
836 __func__, (void *)res, res_len);
837 kmem_free(res, res_len);
841 static void
842 dr_vio_reg_handler(ds_cb_arg_t arg, ds_ver_t *ver, ds_svc_hdl_t hdl)
844 DR_DBG_IO("vio_reg_handler: arg=0x%p, ver=%d.%d, hdl=0x%lx\n",
845 arg, ver->major, ver->minor, hdl);
847 ds_vio_handle = hdl;
850 static void
851 dr_vio_unreg_handler(ds_cb_arg_t arg)
853 DR_DBG_IO("vio_unreg_handler: arg=0x%p\n", arg);
855 ds_vio_handle = DS_INVALID_HDL;
858 static int
859 dr_io_init(void)
861 int rv;
863 if ((rv = ds_cap_init(&dr_vio_cap, &dr_vio_ops)) != 0) {
864 cmn_err(CE_NOTE, "ds_cap_init vio failed: %d", rv);
865 return (-1);
868 return (0);
871 static int
872 dr_io_fini(void)
874 int rv;
876 if ((rv = ds_cap_fini(&dr_vio_cap)) != 0) {
877 cmn_err(CE_NOTE, "ds_cap_fini vio failed: %d", rv);
878 return (-1);
881 return (0);
885 _init(void)
887 int status;
889 /* check that IO DR is enabled */
890 if (dr_is_disabled(DR_TYPE_VIO)) {
891 cmn_err(CE_CONT, "!VIO DR is disabled\n");
892 return (-1);
895 if ((status = dr_io_init()) != 0) {
896 cmn_err(CE_NOTE, "VIO DR initialization failed");
897 return (status);
900 if ((status = mod_install(&modlinkage)) != 0) {
901 (void) dr_io_fini();
904 return (status);
908 _info(struct modinfo *modinfop)
910 return (mod_info(&modlinkage, modinfop));
913 int dr_io_allow_unload = 0;
916 _fini(void)
918 int status;
920 if (dr_io_allow_unload == 0)
921 return (EBUSY);
923 if ((status = mod_remove(&modlinkage)) == 0) {
924 (void) dr_io_fini();
927 return (status);