1 /* $OpenBSD: monitor_mm.c,v 1.16 2009/06/22 05:39:28 dtucker Exp $ */
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
33 #include <sys/param.h>
34 #include "openbsd-compat/sys-tree.h"
43 #include "monitor_mm.h"
46 mm_compare(struct mm_share
*a
, struct mm_share
*b
)
48 long diff
= (char *)a
->address
- (char *)b
->address
;
58 RB_GENERATE(mmtree
, mm_share
, next
, mm_compare
)
60 static struct mm_share
*
61 mm_make_entry(struct mm_master
*mm
, struct mmtree
*head
,
62 void *address
, size_t size
)
64 struct mm_share
*tmp
, *tmp2
;
66 if (mm
->mmalloc
== NULL
)
67 tmp
= xmalloc(sizeof(struct mm_share
));
69 tmp
= mm_xmalloc(mm
->mmalloc
, sizeof(struct mm_share
));
70 tmp
->address
= address
;
73 tmp2
= RB_INSERT(mmtree
, head
, tmp
);
75 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
76 mm
, tmp2
, address
, (u_long
)size
);
81 /* Creates a shared memory area of a certain size */
84 mm_create(struct mm_master
*mmalloc
, size_t size
)
90 mm
= xmalloc(sizeof(struct mm_master
));
92 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
95 * If the memory map has a mm_master it can be completely
96 * shared including authentication between the child
99 mm
->mmalloc
= mmalloc
;
101 address
= xmmap(size
);
102 if (address
== (void *)MAP_FAILED
)
103 fatal("mmap(%lu): %s", (u_long
)size
, strerror(errno
));
105 mm
->address
= address
;
108 RB_INIT(&mm
->rb_free
);
109 RB_INIT(&mm
->rb_allocated
);
111 mm_make_entry(mm
, &mm
->rb_free
, address
, size
);
116 /* Frees either the allocated or the free list */
119 mm_freelist(struct mm_master
*mmalloc
, struct mmtree
*head
)
121 struct mm_share
*mms
, *next
;
123 for (mms
= RB_ROOT(head
); mms
; mms
= next
) {
124 next
= RB_NEXT(mmtree
, head
, mms
);
125 RB_REMOVE(mmtree
, head
, mms
);
129 mm_free(mmalloc
, mms
);
133 /* Destroys a memory mapped area */
136 mm_destroy(struct mm_master
*mm
)
138 mm_freelist(mm
->mmalloc
, &mm
->rb_free
);
139 mm_freelist(mm
->mmalloc
, &mm
->rb_allocated
);
142 if (munmap(mm
->address
, mm
->size
) == -1)
143 fatal("munmap(%p, %lu): %s", mm
->address
, (u_long
)mm
->size
,
146 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
149 if (mm
->mmalloc
== NULL
)
152 mm_free(mm
->mmalloc
, mm
);
156 mm_xmalloc(struct mm_master
*mm
, size_t size
)
160 address
= mm_malloc(mm
, size
);
162 fatal("%s: mm_malloc(%lu)", __func__
, (u_long
)size
);
167 /* Allocates data from a memory mapped area */
170 mm_malloc(struct mm_master
*mm
, size_t size
)
172 struct mm_share
*mms
, *tmp
;
175 fatal("mm_malloc: try to allocate 0 space");
176 if (size
> SIZE_T_MAX
- MM_MINSIZE
+ 1)
177 fatal("mm_malloc: size too big");
179 size
= ((size
+ (MM_MINSIZE
- 1)) / MM_MINSIZE
) * MM_MINSIZE
;
181 RB_FOREACH(mms
, mmtree
, &mm
->rb_free
) {
182 if (mms
->size
>= size
)
190 memset(mms
->address
, 0xd0, size
);
192 tmp
= mm_make_entry(mm
, &mm
->rb_allocated
, mms
->address
, size
);
194 /* Does not change order in RB tree */
196 mms
->address
= (u_char
*)mms
->address
+ size
;
198 if (mms
->size
== 0) {
199 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
200 if (mm
->mmalloc
== NULL
)
203 mm_free(mm
->mmalloc
, mms
);
206 return (tmp
->address
);
209 /* Frees memory in a memory mapped area */
212 mm_free(struct mm_master
*mm
, void *address
)
214 struct mm_share
*mms
, *prev
, tmp
;
216 tmp
.address
= address
;
217 mms
= RB_FIND(mmtree
, &mm
->rb_allocated
, &tmp
);
219 fatal("mm_free(%p): can not find %p", mm
, address
);
222 memset(mms
->address
, 0xd0, mms
->size
);
224 /* Remove from allocated list and insert in free list */
225 RB_REMOVE(mmtree
, &mm
->rb_allocated
, mms
);
226 if (RB_INSERT(mmtree
, &mm
->rb_free
, mms
) != NULL
)
227 fatal("mm_free(%p): double address %p", mm
, address
);
229 /* Find previous entry */
231 if (RB_LEFT(prev
, next
)) {
232 prev
= RB_LEFT(prev
, next
);
233 while (RB_RIGHT(prev
, next
))
234 prev
= RB_RIGHT(prev
, next
);
236 if (RB_PARENT(prev
, next
) &&
237 (prev
== RB_RIGHT(RB_PARENT(prev
, next
), next
)))
238 prev
= RB_PARENT(prev
, next
);
240 while (RB_PARENT(prev
, next
) &&
241 (prev
== RB_LEFT(RB_PARENT(prev
, next
), next
)))
242 prev
= RB_PARENT(prev
, next
);
243 prev
= RB_PARENT(prev
, next
);
247 /* Check if range does not overlap */
248 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) > address
)
249 fatal("mm_free: memory corruption: %p(%lu) > %p",
250 prev
->address
, (u_long
)prev
->size
, address
);
252 /* See if we can merge backwards */
253 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) == address
) {
254 prev
->size
+= mms
->size
;
255 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
256 if (mm
->mmalloc
== NULL
)
259 mm_free(mm
->mmalloc
, mms
);
266 /* Check if we can merge forwards */
267 mms
= RB_NEXT(mmtree
, &mm
->rb_free
, prev
);
271 if (MM_ADDRESS_END(prev
) > mms
->address
)
272 fatal("mm_free: memory corruption: %p < %p(%lu)",
273 mms
->address
, prev
->address
, (u_long
)prev
->size
);
274 if (MM_ADDRESS_END(prev
) != mms
->address
)
277 prev
->size
+= mms
->size
;
278 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
280 if (mm
->mmalloc
== NULL
)
283 mm_free(mm
->mmalloc
, mms
);
287 mm_sync_list(struct mmtree
*oldtree
, struct mmtree
*newtree
,
288 struct mm_master
*mm
, struct mm_master
*mmold
)
290 struct mm_master
*mmalloc
= mm
->mmalloc
;
291 struct mm_share
*mms
, *new;
294 RB_FOREACH(mms
, mmtree
, oldtree
) {
295 /* Check the values */
296 mm_memvalid(mmold
, mms
, sizeof(struct mm_share
));
297 mm_memvalid(mm
, mms
->address
, mms
->size
);
299 new = mm_xmalloc(mmalloc
, sizeof(struct mm_share
));
300 memcpy(new, mms
, sizeof(struct mm_share
));
301 RB_INSERT(mmtree
, newtree
, new);
306 mm_share_sync(struct mm_master
**pmm
, struct mm_master
**pmmalloc
)
308 struct mm_master
*mm
;
309 struct mm_master
*mmalloc
;
310 struct mm_master
*mmold
;
311 struct mmtree rb_free
, rb_allocated
;
313 debug3("%s: Share sync", __func__
);
317 mm_memvalid(mmold
, mm
, sizeof(*mm
));
319 mmalloc
= mm_create(NULL
, mm
->size
);
320 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
321 memcpy(mm
, *pmm
, sizeof(struct mm_master
));
322 mm
->mmalloc
= mmalloc
;
324 rb_free
= mm
->rb_free
;
325 rb_allocated
= mm
->rb_allocated
;
327 RB_INIT(&mm
->rb_free
);
328 RB_INIT(&mm
->rb_allocated
);
330 mm_sync_list(&rb_free
, &mm
->rb_free
, mm
, mmold
);
331 mm_sync_list(&rb_allocated
, &mm
->rb_allocated
, mm
, mmold
);
338 debug3("%s: Share sync end", __func__
);
342 mm_memvalid(struct mm_master
*mm
, void *address
, size_t size
)
344 void *end
= (u_char
*)address
+ size
;
346 if (address
< mm
->address
)
347 fatal("mm_memvalid: address too small: %p", address
);
349 fatal("mm_memvalid: end < address: %p < %p", end
, address
);
350 if (end
> (void *)((u_char
*)mm
->address
+ mm
->size
))
351 fatal("mm_memvalid: address too large: %p", address
);