6514 AS_* lock macros simplification
[illumos-gate.git] / usr / src / uts / sfmmu / vm / xhat_sfmmu.c
blob36434381915fd1756fac3c8ffa09c3f790560d9d
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
20 * CDDL HEADER END
23 * Copyright 2004 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 #include <sys/types.h>
31 #include <sys/cmn_err.h>
32 #include <sys/mman.h>
33 #include <vm/hat_sfmmu.h>
34 #include <vm/xhat.h>
35 #include <vm/xhat_sfmmu.h>
36 #include <vm/page.h>
37 #include <vm/as.h>
42 * Allocates a block that includes both struct xhat and
43 * provider-specific data.
45 struct xhat_hme_blk *
46 xhat_alloc_xhatblk(struct xhat *xhat)
48 struct xhat_hme_blk *xblk;
49 xblk_cache_t *xblkcache = xhat->xhat_provider->xblkcache;
53 mutex_enter(&xblkcache->lock);
54 if (xblkcache->free_blks) {
55 xblk = (struct xhat_hme_blk *)
56 sfmmu_hmetohblk(xblkcache->free_blks);
59 * Since we are always walking the list in the
60 * forward direction, we don't update prev pointers
62 xblkcache->free_blks = xblk->xblk_hme[0].hme_next;
63 mutex_exit(&xblkcache->lock);
64 } else {
65 mutex_exit(&xblkcache->lock);
66 xblk = kmem_cache_alloc(xblkcache->cache, KM_SLEEP);
69 return (xblk);
74 * Return the block to free_blks pool. The memory will
75 * be freed in the reclaim routine.
77 void
78 xhat_free_xhatblk(struct xhat_hme_blk *xblk)
80 xblk_cache_t *xblkcache = xblk->xhat_hme_blk_hat->
81 xhat_provider->xblkcache;
84 mutex_enter(&xblkcache->lock);
85 xblk->xblk_hme[0].hme_next = xblkcache->free_blks;
86 xblkcache->free_blks = &xblk->xblk_hme[0];
87 mutex_exit(&xblkcache->lock);
92 * Ran by kmem reaper thread. Also called when
93 * provider unregisters
95 void
96 xhat_xblkcache_reclaim(void *arg)
98 xhat_provider_t *provider = (xhat_provider_t *)arg;
99 struct sf_hment *sfhme;
100 struct xhat_hme_blk *xblk;
101 xblk_cache_t *xblkcache;
103 if (provider == NULL)
104 cmn_err(CE_PANIC, "xhat_xblkcache_reclaim() is passed NULL");
106 xblkcache = provider->xblkcache;
109 while (xblkcache->free_blks != NULL) {
112 * Put free blocks on a separate list
113 * and free free_blks pointer.
115 mutex_enter(&xblkcache->lock);
116 sfhme = xblkcache->free_blks;
117 xblkcache->free_blks = NULL;
118 mutex_exit(&xblkcache->lock);
120 while (sfhme != NULL) {
121 xblk = (struct xhat_hme_blk *)sfmmu_hmetohblk(sfhme);
122 ASSERT(xblk->xhat_hme_blk_misc.xhat_bit == 1);
123 sfhme = sfhme->hme_next;
124 kmem_cache_free(xblkcache->cache, xblk);
133 * Insert the xhat block (or, more precisely, the sf_hment)
134 * into page's p_mapping list.
136 pfn_t
137 xhat_insert_xhatblk(page_t *pp, struct xhat *xhat, void **blk)
139 kmutex_t *pml;
140 pfn_t pfn;
141 struct xhat_hme_blk *xblk;
145 xblk = xhat_alloc_xhatblk(xhat);
146 if (xblk == NULL)
147 return (0);
149 /* Add a "user" to the XHAT */
150 xhat_hat_hold(xhat);
152 xblk->xhat_hme_blk_hat = xhat;
153 xblk->xhat_hme_blk_misc.xhat_bit = 1;
155 pml = sfmmu_mlist_enter(pp);
158 /* Insert at the head of p_mapping list */
159 xblk->xblk_hme[0].hme_prev = NULL;
160 xblk->xblk_hme[0].hme_next = pp->p_mapping;
161 xblk->xblk_hme[0].hme_page = pp;
163 /* Only one tte per xhat_hme_blk, at least for now */
164 xblk->xblk_hme[0].hme_tte.tte_hmenum = 0;
166 if (pp->p_mapping) {
167 ((struct sf_hment *)(pp->p_mapping))->hme_prev =
168 &(xblk->xblk_hme[0]);
169 ASSERT(pp->p_share > 0);
170 } else {
171 /* EMPTY */
172 ASSERT(pp->p_share == 0);
174 pp->p_mapping = &(xblk->xblk_hme[0]);
177 * Update number of mappings.
179 pp->p_share++;
180 pfn = pp->p_pagenum;
182 sfmmu_mlist_exit(pml);
184 *blk = XBLK2PROVBLK(xblk);
186 return (pfn);
191 * mlist_locked indicates whether the mapping list
192 * is locked. If provider did not lock it himself, the
193 * only time it is locked in HAT layer is in
194 * hat_pageunload().
197 xhat_delete_xhatblk(void *blk, int mlist_locked)
199 struct xhat_hme_blk *xblk = PROVBLK2XBLK(blk);
200 page_t *pp = xblk->xblk_hme[0].hme_page;
201 kmutex_t *pml;
204 ASSERT(pp != NULL);
205 ASSERT(pp->p_share > 0);
207 if (!mlist_locked)
208 pml = sfmmu_mlist_enter(pp);
209 else
210 ASSERT(sfmmu_mlist_held(pp));
212 pp->p_share--;
214 if (xblk->xblk_hme[0].hme_prev) {
215 ASSERT(pp->p_mapping != &(xblk->xblk_hme[0]));
216 ASSERT(xblk->xblk_hme[0].hme_prev->hme_page == pp);
217 xblk->xblk_hme[0].hme_prev->hme_next =
218 xblk->xblk_hme[0].hme_next;
219 } else {
220 ASSERT(pp->p_mapping == &(xblk->xblk_hme[0]));
221 pp->p_mapping = xblk->xblk_hme[0].hme_next;
222 ASSERT((pp->p_mapping == NULL) ?
223 (pp->p_share == 0) : 1);
226 if (xblk->xblk_hme->hme_next) {
227 ASSERT(xblk->xblk_hme[0].hme_next->hme_page == pp);
228 xblk->xblk_hme[0].hme_next->hme_prev =
229 xblk->xblk_hme[0].hme_prev;
232 if (!mlist_locked)
233 sfmmu_mlist_exit(pml);
235 xhat_hat_rele(xblk->xhat_hme_blk_hat);
236 xhat_free_xhatblk(xblk);
239 return (0);