4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
27 #include <sys/types.h>
28 #include <sys/modctl.h>
31 #include <sys/sunddi.h>
32 #include <sys/devops.h>
36 #include <sys/policy.h>
37 #include <sys/errno.h>
38 #include <vm/seg_dev.h>
39 #include <vm/seg_vn.h>
41 #include <sys/fs/swapnode.h>
42 #include <sys/sysmacros.h>
43 #include <sys/fcntl.h>
44 #include <sys/vmsystm.h>
45 #include <sys/physmem.h>
46 #include <sys/vfs_opreg.h>
48 static dev_info_t
*physmem_dip
= NULL
;
51 * Linked list element hanging off physmem_proc_hash below, which holds all
52 * the information for a given segment which has been setup for this process.
53 * This is a simple linked list as we are assuming that for a given process
54 * the setup ioctl will only be called a handful of times. If this assumption
55 * changes in the future, a quicker to traverse data structure should be used.
58 struct physmem_hash
*ph_next
;
62 struct vnode
*ph_vnode
;
66 * Hash of all of the processes which have setup mappings with the driver with
67 * pointers to per process data.
69 struct physmem_proc_hash
{
70 struct proc
*pph_proc
;
71 struct physmem_hash
*pph_hash
;
72 struct physmem_proc_hash
*pph_next
;
76 /* Needs to be a power of two for simple hash algorithm */
78 struct physmem_proc_hash
*pph
[PPH_SIZE
];
81 * Lock which protects the pph hash above. To add an element (either a new
82 * process or a new segment) the WRITE lock must be held. To traverse the
83 * list, only a READ lock is needed.
87 #define PHYSMEM_HASH(procp) ((int)((((uintptr_t)procp) >> 8) & (PPH_SIZE - 1)))
90 * Need to keep a reference count of how many processes have the driver
91 * open to prevent it from disappearing.
93 uint64_t physmem_vnodecnt
;
94 kmutex_t physmem_mutex
; /* protects phsymem_vnodecnt */
96 static int physmem_getpage(struct vnode
*vp
, offset_t off
, size_t len
,
97 uint_t
*protp
, page_t
*pl
[], size_t plsz
, struct seg
*seg
, caddr_t addr
,
98 enum seg_rw rw
, struct cred
*cr
, caller_context_t
*ct
);
100 static int physmem_addmap(struct vnode
*vp
, offset_t off
, struct as
*as
,
101 caddr_t addr
, size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
102 struct cred
*cred
, caller_context_t
*ct
);
104 static int physmem_delmap(struct vnode
*vp
, offset_t off
, struct as
*as
,
105 caddr_t addr
, size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
,
106 struct cred
*cred
, caller_context_t
*ct
);
108 static void physmem_inactive(vnode_t
*vp
, cred_t
*crp
, caller_context_t
*ct
);
110 const fs_operation_def_t physmem_vnodeops_template
[] = {
111 VOPNAME_GETPAGE
, { .vop_getpage
= physmem_getpage
},
112 VOPNAME_ADDMAP
, { .vop_addmap
= physmem_addmap
},
113 VOPNAME_DELMAP
, { .vop_delmap
= physmem_delmap
},
114 VOPNAME_INACTIVE
, { .vop_inactive
= physmem_inactive
},
118 vnodeops_t
*physmem_vnodeops
= NULL
;
121 * Removes the current process from the hash if the process has no more
122 * physmem segments active.
125 physmem_remove_hash_proc()
128 struct physmem_proc_hash
**walker
;
129 struct physmem_proc_hash
*victim
= NULL
;
131 index
= PHYSMEM_HASH(curproc
);
132 rw_enter(&pph_rwlock
, RW_WRITER
);
133 walker
= &pph
[index
];
134 while (*walker
!= NULL
) {
135 if ((*walker
)->pph_proc
== curproc
&&
136 (*walker
)->pph_hash
== NULL
) {
138 *walker
= victim
->pph_next
;
141 walker
= &((*walker
)->pph_next
);
143 rw_exit(&pph_rwlock
);
145 kmem_free(victim
, sizeof (struct physmem_proc_hash
));
149 * Add a new entry to the hash for the given process to cache the
150 * address ranges that it is working on. If this is the first hash
151 * item to be added for this process, we will create the head pointer
153 * Returns 0 on success, ERANGE when the physical address is already in the
157 physmem_add_hash(struct physmem_hash
*php
)
160 struct physmem_proc_hash
*iterator
;
161 struct physmem_proc_hash
*newp
= NULL
;
162 struct physmem_hash
*temp
;
165 index
= PHYSMEM_HASH(curproc
);
168 rw_enter(&pph_rwlock
, RW_WRITER
);
169 iterator
= pph
[index
];
170 while (iterator
!= NULL
) {
171 if (iterator
->pph_proc
== curproc
) {
173 * check to make sure a single process does not try to
174 * map the same region twice.
176 for (temp
= iterator
->pph_hash
; temp
!= NULL
;
177 temp
= temp
->ph_next
) {
178 if ((php
->ph_base_pa
>= temp
->ph_base_pa
&&
179 php
->ph_base_pa
< temp
->ph_base_pa
+
181 (temp
->ph_base_pa
>= php
->ph_base_pa
&&
182 temp
->ph_base_pa
< php
->ph_base_pa
+
189 php
->ph_next
= iterator
->pph_hash
;
190 iterator
->pph_hash
= php
;
192 rw_exit(&pph_rwlock
);
193 /* Need to check for two threads in sync */
195 kmem_free(newp
, sizeof (*newp
));
198 iterator
= iterator
->pph_next
;
202 newp
->pph_proc
= curproc
;
203 newp
->pph_next
= pph
[index
];
204 newp
->pph_hash
= php
;
207 rw_exit(&pph_rwlock
);
211 rw_exit(&pph_rwlock
);
212 /* Dropped the lock so we could use KM_SLEEP */
213 newp
= kmem_zalloc(sizeof (struct physmem_proc_hash
), KM_SLEEP
);
218 * Will return the pointer to the physmem_hash struct if the setup routine
219 * has previously been called for this memory.
220 * Returns NULL on failure.
222 struct physmem_hash
*
223 physmem_get_hash(uint64_t req_paddr
, size_t len
, proc_t
*procp
)
226 struct physmem_proc_hash
*proc_hp
;
227 struct physmem_hash
*php
;
229 ASSERT(rw_lock_held(&pph_rwlock
));
231 index
= PHYSMEM_HASH(procp
);
232 proc_hp
= pph
[index
];
233 while (proc_hp
!= NULL
) {
234 if (proc_hp
->pph_proc
== procp
) {
235 php
= proc_hp
->pph_hash
;
236 while (php
!= NULL
) {
237 if ((req_paddr
>= php
->ph_base_pa
) &&
239 php
->ph_base_pa
+ php
->ph_seg_len
)) {
245 proc_hp
= proc_hp
->pph_next
;
251 physmem_validate_cookie(uint64_t p_cookie
)
254 struct physmem_proc_hash
*proc_hp
;
255 struct physmem_hash
*php
;
257 ASSERT(rw_lock_held(&pph_rwlock
));
259 index
= PHYSMEM_HASH(curproc
);
260 proc_hp
= pph
[index
];
261 while (proc_hp
!= NULL
) {
262 if (proc_hp
->pph_proc
== curproc
) {
263 php
= proc_hp
->pph_hash
;
264 while (php
!= NULL
) {
265 if ((uint64_t)(uintptr_t)php
== p_cookie
) {
271 proc_hp
= proc_hp
->pph_next
;
277 * Remove the given vnode from the pph hash. If it exists in the hash the
278 * process still has to be around as the vnode is obviously still around and
279 * since it's a physmem vnode, it must be in the hash.
280 * If it is not in the hash that must mean that the setup ioctl failed.
281 * Return 0 in this instance, 1 if it is in the hash.
284 physmem_remove_vnode_hash(vnode_t
*vp
)
287 struct physmem_proc_hash
*proc_hp
;
288 struct physmem_hash
**phpp
;
289 struct physmem_hash
*victim
;
291 index
= PHYSMEM_HASH(curproc
);
292 /* synchronize with the map routine */
293 rw_enter(&pph_rwlock
, RW_WRITER
);
294 proc_hp
= pph
[index
];
295 while (proc_hp
!= NULL
) {
296 if (proc_hp
->pph_proc
== curproc
) {
297 phpp
= &proc_hp
->pph_hash
;
298 while (*phpp
!= NULL
) {
299 if ((*phpp
)->ph_vnode
== vp
) {
301 *phpp
= victim
->ph_next
;
303 rw_exit(&pph_rwlock
);
304 kmem_free(victim
, sizeof (*victim
));
307 phpp
= &(*phpp
)->ph_next
;
310 proc_hp
= proc_hp
->pph_next
;
312 rw_exit(&pph_rwlock
);
319 physmem_setup_vnops()
322 char *name
= "physmem";
323 if (physmem_vnodeops
!= NULL
)
324 cmn_err(CE_PANIC
, "physmem vnodeops already set\n");
325 error
= vn_make_ops(name
, physmem_vnodeops_template
, &physmem_vnodeops
);
327 cmn_err(CE_WARN
, "physmem_setup_vnops: bad vnode ops template");
333 * The guts of the PHYSMEM_SETUP ioctl.
334 * Create a segment in the address space with the specified parameters.
335 * If pspp->user_va is NULL, as_gap will be used to find an appropriate VA.
336 * We do not do bounds checking on the requested physical addresses, if they
337 * do not exist in the system, they will not be mappable.
338 * Returns 0 on success with the following error codes on failure:
339 * ENOMEM - The VA range requested was already mapped if pspp->user_va is
340 * non-NULL or the system was unable to find enough VA space for
341 * the desired length if user_va was NULL>
342 * EINVAL - The requested PA, VA, or length was not PAGESIZE aligned.
345 physmem_setup_addrs(struct physmem_setup_param
*pspp
)
347 struct as
*as
= curproc
->p_as
;
348 struct segvn_crargs vn_a
;
354 struct physmem_hash
*php
;
356 ASSERT(pspp
!= NULL
);
357 base_pa
= pspp
->req_paddr
;
359 uvaddr
= (caddr_t
)(uintptr_t)pspp
->user_va
;
361 /* Sanity checking */
362 if (!IS_P2ALIGNED(base_pa
, PAGESIZE
))
364 if (!IS_P2ALIGNED(len
, PAGESIZE
))
366 if (uvaddr
!= NULL
&& !IS_P2ALIGNED(uvaddr
, PAGESIZE
))
369 php
= kmem_zalloc(sizeof (struct physmem_hash
), KM_SLEEP
);
371 /* Need to bump vnode count so that the driver can not be unloaded */
372 mutex_enter(&physmem_mutex
);
374 mutex_exit(&physmem_mutex
);
376 vp
= vn_alloc(KM_SLEEP
);
377 ASSERT(vp
!= NULL
); /* SLEEP can't return NULL */
378 vn_setops(vp
, physmem_vnodeops
);
383 vn_a
.offset
= (u_offset_t
)base_pa
;
384 vn_a
.type
= MAP_SHARED
;
385 vn_a
.prot
= PROT_ALL
;
386 vn_a
.maxprot
= PROT_ALL
;
391 vn_a
.lgrp_mem_policy_flags
= 0;
394 if (uvaddr
!= NULL
) {
395 if (as_gap(as
, len
, &uvaddr
, &len
, AH_LO
, NULL
) == -1) {
400 kmem_free(php
, sizeof (*php
));
401 mutex_enter(&physmem_mutex
);
403 mutex_exit(&physmem_mutex
);
407 /* We pick the address for the user */
408 map_addr(&uvaddr
, len
, 0, 1, 0);
409 if (uvaddr
== NULL
) {
414 ret
= as_map(as
, uvaddr
, len
, segvn_create
, &vn_a
);
418 php
->ph_base_pa
= base_pa
;
419 php
->ph_base_va
= uvaddr
;
420 php
->ph_seg_len
= len
;
421 pspp
->user_va
= (uint64_t)(uintptr_t)uvaddr
;
422 pspp
->cookie
= (uint64_t)(uintptr_t)php
;
423 ret
= physmem_add_hash(php
);
427 /* Note that the call to as_unmap will free the vnode */
428 (void) as_unmap(as
, uvaddr
, len
);
429 kmem_free(php
, sizeof (*php
));
438 * The guts of the PHYSMEM_MAP ioctl.
439 * Map the given PA to the appropriate VA if PHYSMEM_SETUP ioctl has already
440 * been called for this PA range.
441 * Returns 0 on success with the following error codes on failure:
442 * EPERM - The requested page is long term locked, and thus repeated
443 * requests to allocate this page will likely fail.
444 * EAGAIN - The requested page could not be allocated, but it is believed
445 * that future attempts could succeed.
446 * ENOMEM - There was not enough free memory in the system to safely
447 * map the requested page.
448 * EINVAL - The requested paddr was not PAGESIZE aligned or the
449 * PHYSMEM_SETUP ioctl was not called for this page.
450 * ENOENT - The requested page was iniside the kernel cage, and the
451 * PHYSMEM_CAGE flag was not set.
452 * EBUSY - The requested page is retired and the PHYSMEM_RETIRE flag
456 physmem_map_addrs(struct physmem_map_param
*pmpp
)
463 struct physmem_hash
*php
;
466 ASSERT(pmpp
!= NULL
);
467 req_paddr
= pmpp
->req_paddr
;
469 if (!IS_P2ALIGNED(req_paddr
, PAGESIZE
))
471 /* Find the vnode for this map request */
472 rw_enter(&pph_rwlock
, RW_READER
);
473 php
= physmem_get_hash(req_paddr
, PAGESIZE
, curproc
);
475 rw_exit(&pph_rwlock
);
479 uvaddr
= php
->ph_base_va
+ (req_paddr
- php
->ph_base_pa
);
480 rw_exit(&pph_rwlock
);
482 pp
= page_numtopp_nolock(btop((size_t)req_paddr
));
489 * Check to see if page already mapped correctly. This can happen
490 * when we failed to capture a page previously and it was captured
491 * asynchronously for us. Return success in this case.
493 if (pp
->p_vnode
== vp
) {
494 ASSERT(pp
->p_offset
== (u_offset_t
)req_paddr
);
495 pmpp
->ret_va
= (uint64_t)(uintptr_t)uvaddr
;
500 * physmem should be responsible for checking for cage
503 if (pmpp
->flags
& PHYSMEM_CAGE
)
504 flags
= CAPTURE_GET_CAGE
;
505 if (pmpp
->flags
& PHYSMEM_RETIRED
)
506 flags
|= CAPTURE_GET_RETIRED
;
508 ret
= page_trycapture(pp
, 0, flags
| CAPTURE_PHYSMEM
, curproc
);
514 pmpp
->ret_va
= (uint64_t)(uintptr_t)uvaddr
;
520 * Map the given page into the process's address space if possible.
521 * We actually only hash the page in on the correct vnode as the page
522 * will be mapped via segvn_pagefault.
523 * returns 0 on success
524 * returns 1 if there is no need to map this page anymore (process exited)
525 * returns -1 if we failed to map the page.
528 map_page_proc(page_t
*pp
, void *arg
, uint_t flags
)
531 proc_t
*procp
= (proc_t
*)arg
;
533 u_offset_t paddr
= (u_offset_t
)ptob(pp
->p_pagenum
);
534 struct physmem_hash
*php
;
539 * Check against availrmem to make sure that we're not low on memory.
540 * We check again here as ASYNC requests do not do this check elsewhere.
541 * We return 1 as we don't want the page to have the PR_CAPTURE bit
542 * set or be on the page capture hash.
544 if (swapfs_minfree
> availrmem
+ 1) {
550 * If this is an asynchronous request for the current process,
551 * we can not map the page as it's possible that we are also in the
552 * process of unmapping the page which could result in a deadlock
555 if ((flags
& CAPTURE_ASYNC
) && (curproc
== procp
)) {
560 /* only return zeroed out pages */
561 pagezero(pp
, 0, PAGESIZE
);
563 rw_enter(&pph_rwlock
, RW_READER
);
564 php
= physmem_get_hash(paddr
, PAGESIZE
, procp
);
566 rw_exit(&pph_rwlock
);
568 * Free the page as there is no longer a valid outstanding
569 * request for this page.
578 * We need to protect against a possible deadlock here where we own
579 * the vnode page hash mutex and want to acquire it again as there
580 * are locations in the code, where we unlock a page while holding
581 * the mutex which can lead to the page being captured and eventually
584 if (mutex_owned(page_vnode_mutex(vp
))) {
585 rw_exit(&pph_rwlock
);
590 ret
= page_hashin(pp
, vp
, paddr
, NULL
);
591 rw_exit(&pph_rwlock
);
599 mutex_enter(&freemem_lock
);
601 mutex_exit(&freemem_lock
);
607 * The guts of the PHYSMEM_DESTROY ioctl.
608 * The cookie passed in will provide all of the information needed to
609 * free up the address space and physical memory associated with the
610 * corresponding PHSYMEM_SETUP ioctl.
611 * Returns 0 on success with the following error codes on failure:
612 * EINVAL - The cookie supplied is not valid.
615 physmem_destroy_addrs(uint64_t p_cookie
)
617 struct as
*as
= curproc
->p_as
;
621 rw_enter(&pph_rwlock
, RW_READER
);
622 if (physmem_validate_cookie(p_cookie
) == 0) {
623 rw_exit(&pph_rwlock
);
627 len
= ((struct physmem_hash
*)(uintptr_t)p_cookie
)->ph_seg_len
;
628 uvaddr
= ((struct physmem_hash
*)(uintptr_t)p_cookie
)->ph_base_va
;
629 rw_exit(&pph_rwlock
);
631 (void) as_unmap(as
, uvaddr
, len
);
637 * If the page has been hashed into the physmem vnode, then just look it up
638 * and return it via pl, otherwise return ENOMEM as the map ioctl has not
639 * succeeded on the given page.
643 physmem_getpage(struct vnode
*vp
, offset_t off
, size_t len
, uint_t
*protp
,
644 page_t
*pl
[], size_t plsz
, struct seg
*seg
, caddr_t addr
, enum seg_rw rw
,
645 struct cred
*cr
, caller_context_t
*ct
)
649 ASSERT(len
== PAGESIZE
);
650 ASSERT(AS_READ_HELD(seg
->s_as
));
653 * If the page is in the hash, then we successfully claimed this
654 * page earlier, so return it to the caller.
656 pp
= page_lookup(vp
, off
, SE_SHARED
);
667 * We can not allow a process mapping /dev/physmem pages to fork as there can
668 * only be a single mapping to a /dev/physmem page at a given time. Thus, the
669 * return of EINVAL when we are not working on our own address space.
670 * Otherwise we return zero as this function is required for normal operation.
674 physmem_addmap(struct vnode
*vp
, offset_t off
, struct as
*as
,
675 caddr_t addr
, size_t len
, uchar_t prot
, uchar_t maxprot
, uint_t flags
,
676 struct cred
*cred
, caller_context_t
*ct
)
678 if (curproc
->p_as
!= as
) {
684 /* Will always get called for removing a whole segment. */
687 physmem_delmap(struct vnode
*vp
, offset_t off
, struct as
*as
,
688 caddr_t addr
, size_t len
, uint_t prot
, uint_t maxprot
, uint_t flags
,
689 struct cred
*cred
, caller_context_t
*ct
)
692 * Release our hold on the vnode so that the final VN_RELE will
693 * call physmem_inactive to clean things up.
701 * Clean up all the pages belonging to this vnode and then free it.
705 physmem_inactive(vnode_t
*vp
, cred_t
*crp
, caller_context_t
*ct
)
710 * Remove the vnode from the hash now, to prevent asynchronous
711 * attempts to map into this vnode. This avoids a deadlock
712 * where two threads try to get into this logic at the same
713 * time and try to map the pages they are destroying into the
714 * other's address space.
715 * If it's not in the hash, just free it.
717 if (physmem_remove_vnode_hash(vp
) == 0) {
718 ASSERT(vp
->v_pages
== NULL
);
720 physmem_remove_hash_proc();
721 mutex_enter(&physmem_mutex
);
723 mutex_exit(&physmem_mutex
);
728 * At this point in time, no other logic can be adding or removing
729 * pages from the vnode, otherwise the v_pages list could be inaccurate.
732 while ((pp
= vp
->v_pages
) != NULL
) {
734 if (page_tryupgrade(pp
)) {
736 * set lckcnt for page_destroy to do availrmem
742 /* failure to lock should be transient */
743 rpp
= page_lookup(vp
, ptob(pp
->p_pagenum
), SE_SHARED
);
752 physmem_remove_hash_proc();
753 mutex_enter(&physmem_mutex
);
755 mutex_exit(&physmem_mutex
);
760 physmem_ioctl(dev_t dev
, int cmd
, intptr_t arg
, int mode
, cred_t
*credp
,
768 struct physmem_setup_param psp
;
769 if (ddi_copyin((void *)arg
, &psp
,
770 sizeof (struct physmem_setup_param
), 0))
772 ret
= physmem_setup_addrs(&psp
);
773 if (ddi_copyout(&psp
, (void *)arg
, sizeof (psp
), 0))
779 struct physmem_map_param pmp
;
780 if (ddi_copyin((void *)arg
, &pmp
,
781 sizeof (struct physmem_map_param
), 0))
783 ret
= physmem_map_addrs(&pmp
);
784 if (ddi_copyout(&pmp
, (void *)arg
, sizeof (pmp
), 0))
788 case PHYSMEM_DESTROY
:
791 if (ddi_copyin((void *)arg
, &cookie
,
792 sizeof (uint64_t), 0))
794 ret
= physmem_destroy_addrs(cookie
);
805 physmem_open(dev_t
*devp
, int flag
, int otyp
, cred_t
*credp
)
808 static int msg_printed
= 0;
810 if ((flag
& (FWRITE
| FREAD
)) != (FWRITE
| FREAD
)) {
814 /* need to make sure we have the right privileges */
815 if ((ret
= secpolicy_resource(credp
)) != 0)
817 if ((ret
= secpolicy_lock_memory(credp
)) != 0)
820 if (msg_printed
== 0) {
821 cmn_err(CE_NOTE
, "!driver has been opened. This driver may "
822 "take out long term locks on pages which may impact "
823 "dynamic reconfiguration events");
832 physmem_close(dev_t dev
, int flag
, int otyp
, cred_t
*credp
)
839 physmem_getinfo(dev_info_t
*dip
, ddi_info_cmd_t infocmd
,
840 void *arg
, void **resultp
)
843 case DDI_INFO_DEVT2DEVINFO
:
844 *resultp
= physmem_dip
;
845 return (DDI_SUCCESS
);
847 case DDI_INFO_DEVT2INSTANCE
:
848 *resultp
= (void *)(ulong_t
)getminor((dev_t
)arg
);
849 return (DDI_SUCCESS
);
852 return (DDI_FAILURE
);
857 physmem_attach(dev_info_t
*dip
, ddi_attach_cmd_t cmd
)
861 if (cmd
== DDI_RESUME
) {
862 return (DDI_SUCCESS
);
865 if (cmd
!= DDI_ATTACH
)
866 return (DDI_FAILURE
);
868 if (ddi_create_minor_node(dip
, ddi_get_name(dip
), S_IFCHR
,
869 ddi_get_instance(dip
), DDI_PSEUDO
, 0) != DDI_SUCCESS
)
870 return (DDI_FAILURE
);
874 /* Initialize driver specific data */
875 if (physmem_setup_vnops()) {
876 ddi_remove_minor_node(dip
, ddi_get_name(dip
));
877 return (DDI_FAILURE
);
880 for (i
= 0; i
< PPH_SIZE
; i
++)
883 page_capture_register_callback(PC_PHYSMEM
, 10000,
886 return (DDI_SUCCESS
);
890 physmem_detach(dev_info_t
*dip
, ddi_detach_cmd_t cmd
)
892 int ret
= DDI_SUCCESS
;
894 if (cmd
== DDI_SUSPEND
) {
895 return (DDI_SUCCESS
);
898 if (cmd
!= DDI_DETACH
)
899 return (DDI_FAILURE
);
901 ASSERT(physmem_dip
== dip
);
903 mutex_enter(&physmem_mutex
);
904 if (physmem_vnodecnt
== 0) {
905 if (physmem_vnodeops
!= NULL
) {
906 vn_freevnodeops(physmem_vnodeops
);
907 physmem_vnodeops
= NULL
;
908 page_capture_unregister_callback(PC_PHYSMEM
);
913 mutex_exit(&physmem_mutex
);
914 if (ret
== DDI_SUCCESS
)
915 ddi_remove_minor_node(dip
, ddi_get_name(dip
));
919 static struct cb_ops physmem_cb_ops
= {
920 physmem_open
, /* open */
921 physmem_close
, /* close */
922 nodev
, /* strategy */
927 physmem_ioctl
, /* ioctl */
931 nochpoll
, /* chpoll */
932 ddi_prop_op
, /* prop_op */
934 D_NEW
| D_MP
| D_DEVMAP
,
940 static struct dev_ops physmem_ops
= {
952 ddi_quiesce_not_needed
, /* quiesce */
955 static struct modldrv modldrv
= {
961 static struct modlinkage modlinkage
= {
970 return (mod_install(&modlinkage
));
974 _info(struct modinfo
*modinfop
)
976 return (mod_info(&modlinkage
, modinfop
));
982 return (mod_remove(&modlinkage
));