1 /* $OpenBSD: monitor_mm.c,v 1.19 2014/01/04 17:50:55 tedu Exp $ */
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
33 #include <sys/param.h>
34 #include "openbsd-compat/sys-tree.h"
45 #include "monitor_mm.h"
48 mm_compare(struct mm_share
*a
, struct mm_share
*b
)
50 ptrdiff_t diff
= (char *)a
->address
- (char *)b
->address
;
60 RB_GENERATE(mmtree
, mm_share
, next
, mm_compare
)
62 static struct mm_share
*
63 mm_make_entry(struct mm_master
*mm
, struct mmtree
*head
,
64 void *address
, size_t size
)
66 struct mm_share
*tmp
, *tmp2
;
68 if (mm
->mmalloc
== NULL
)
69 tmp
= xcalloc(1, sizeof(struct mm_share
));
71 tmp
= mm_xmalloc(mm
->mmalloc
, sizeof(struct mm_share
));
72 tmp
->address
= address
;
75 tmp2
= RB_INSERT(mmtree
, head
, tmp
);
77 fatal("mm_make_entry(%p): double address %p->%p(%zu)",
78 mm
, tmp2
, address
, size
);
83 /* Creates a shared memory area of a certain size */
86 mm_create(struct mm_master
*mmalloc
, size_t size
)
92 mm
= xcalloc(1, sizeof(struct mm_master
));
94 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
97 * If the memory map has a mm_master it can be completely
98 * shared including authentication between the child
101 mm
->mmalloc
= mmalloc
;
103 address
= xmmap(size
);
104 if (address
== (void *)MAP_FAILED
)
105 fatal("mmap(%zu): %s", size
, strerror(errno
));
107 mm
->address
= address
;
110 RB_INIT(&mm
->rb_free
);
111 RB_INIT(&mm
->rb_allocated
);
113 mm_make_entry(mm
, &mm
->rb_free
, address
, size
);
118 /* Frees either the allocated or the free list */
121 mm_freelist(struct mm_master
*mmalloc
, struct mmtree
*head
)
123 struct mm_share
*mms
, *next
;
125 for (mms
= RB_ROOT(head
); mms
; mms
= next
) {
126 next
= RB_NEXT(mmtree
, head
, mms
);
127 RB_REMOVE(mmtree
, head
, mms
);
131 mm_free(mmalloc
, mms
);
135 /* Destroys a memory mapped area */
138 mm_destroy(struct mm_master
*mm
)
140 mm_freelist(mm
->mmalloc
, &mm
->rb_free
);
141 mm_freelist(mm
->mmalloc
, &mm
->rb_allocated
);
144 if (munmap(mm
->address
, mm
->size
) == -1)
145 fatal("munmap(%p, %zu): %s", mm
->address
, mm
->size
,
148 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
151 if (mm
->mmalloc
== NULL
)
154 mm_free(mm
->mmalloc
, mm
);
158 mm_xmalloc(struct mm_master
*mm
, size_t size
)
162 address
= mm_malloc(mm
, size
);
164 fatal("%s: mm_malloc(%zu)", __func__
, size
);
165 memset(address
, 0, size
);
170 /* Allocates data from a memory mapped area */
173 mm_malloc(struct mm_master
*mm
, size_t size
)
175 struct mm_share
*mms
, *tmp
;
178 fatal("mm_malloc: try to allocate 0 space");
179 if (size
> SIZE_T_MAX
- MM_MINSIZE
+ 1)
180 fatal("mm_malloc: size too big");
182 size
= ((size
+ (MM_MINSIZE
- 1)) / MM_MINSIZE
) * MM_MINSIZE
;
184 RB_FOREACH(mms
, mmtree
, &mm
->rb_free
) {
185 if (mms
->size
>= size
)
193 memset(mms
->address
, 0xd0, size
);
195 tmp
= mm_make_entry(mm
, &mm
->rb_allocated
, mms
->address
, size
);
197 /* Does not change order in RB tree */
199 mms
->address
= (char *)mms
->address
+ size
;
201 if (mms
->size
== 0) {
202 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
203 if (mm
->mmalloc
== NULL
)
206 mm_free(mm
->mmalloc
, mms
);
209 return (tmp
->address
);
212 /* Frees memory in a memory mapped area */
215 mm_free(struct mm_master
*mm
, void *address
)
217 struct mm_share
*mms
, *prev
, tmp
;
219 tmp
.address
= address
;
220 mms
= RB_FIND(mmtree
, &mm
->rb_allocated
, &tmp
);
222 fatal("mm_free(%p): can not find %p", mm
, address
);
225 memset(mms
->address
, 0xd0, mms
->size
);
227 /* Remove from allocated list and insert in free list */
228 RB_REMOVE(mmtree
, &mm
->rb_allocated
, mms
);
229 if (RB_INSERT(mmtree
, &mm
->rb_free
, mms
) != NULL
)
230 fatal("mm_free(%p): double address %p", mm
, address
);
232 /* Find previous entry */
234 if (RB_LEFT(prev
, next
)) {
235 prev
= RB_LEFT(prev
, next
);
236 while (RB_RIGHT(prev
, next
))
237 prev
= RB_RIGHT(prev
, next
);
239 if (RB_PARENT(prev
, next
) &&
240 (prev
== RB_RIGHT(RB_PARENT(prev
, next
), next
)))
241 prev
= RB_PARENT(prev
, next
);
243 while (RB_PARENT(prev
, next
) &&
244 (prev
== RB_LEFT(RB_PARENT(prev
, next
), next
)))
245 prev
= RB_PARENT(prev
, next
);
246 prev
= RB_PARENT(prev
, next
);
250 /* Check if range does not overlap */
251 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) > address
)
252 fatal("mm_free: memory corruption: %p(%zu) > %p",
253 prev
->address
, prev
->size
, address
);
255 /* See if we can merge backwards */
256 if (prev
!= NULL
&& MM_ADDRESS_END(prev
) == address
) {
257 prev
->size
+= mms
->size
;
258 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
259 if (mm
->mmalloc
== NULL
)
262 mm_free(mm
->mmalloc
, mms
);
269 /* Check if we can merge forwards */
270 mms
= RB_NEXT(mmtree
, &mm
->rb_free
, prev
);
274 if (MM_ADDRESS_END(prev
) > mms
->address
)
275 fatal("mm_free: memory corruption: %p < %p(%zu)",
276 mms
->address
, prev
->address
, prev
->size
);
277 if (MM_ADDRESS_END(prev
) != mms
->address
)
280 prev
->size
+= mms
->size
;
281 RB_REMOVE(mmtree
, &mm
->rb_free
, mms
);
283 if (mm
->mmalloc
== NULL
)
286 mm_free(mm
->mmalloc
, mms
);
290 mm_sync_list(struct mmtree
*oldtree
, struct mmtree
*newtree
,
291 struct mm_master
*mm
, struct mm_master
*mmold
)
293 struct mm_master
*mmalloc
= mm
->mmalloc
;
294 struct mm_share
*mms
, *new;
297 RB_FOREACH(mms
, mmtree
, oldtree
) {
298 /* Check the values */
299 mm_memvalid(mmold
, mms
, sizeof(struct mm_share
));
300 mm_memvalid(mm
, mms
->address
, mms
->size
);
302 new = mm_xmalloc(mmalloc
, sizeof(struct mm_share
));
303 memcpy(new, mms
, sizeof(struct mm_share
));
304 RB_INSERT(mmtree
, newtree
, new);
309 mm_share_sync(struct mm_master
**pmm
, struct mm_master
**pmmalloc
)
311 struct mm_master
*mm
;
312 struct mm_master
*mmalloc
;
313 struct mm_master
*mmold
;
314 struct mmtree rb_free
, rb_allocated
;
316 debug3("%s: Share sync", __func__
);
320 mm_memvalid(mmold
, mm
, sizeof(*mm
));
322 mmalloc
= mm_create(NULL
, mm
->size
);
323 mm
= mm_xmalloc(mmalloc
, sizeof(struct mm_master
));
324 memcpy(mm
, *pmm
, sizeof(struct mm_master
));
325 mm
->mmalloc
= mmalloc
;
327 rb_free
= mm
->rb_free
;
328 rb_allocated
= mm
->rb_allocated
;
330 RB_INIT(&mm
->rb_free
);
331 RB_INIT(&mm
->rb_allocated
);
333 mm_sync_list(&rb_free
, &mm
->rb_free
, mm
, mmold
);
334 mm_sync_list(&rb_allocated
, &mm
->rb_allocated
, mm
, mmold
);
341 debug3("%s: Share sync end", __func__
);
345 mm_memvalid(struct mm_master
*mm
, void *address
, size_t size
)
347 void *end
= (char *)address
+ size
;
349 if (address
< mm
->address
)
350 fatal("mm_memvalid: address too small: %p", address
);
352 fatal("mm_memvalid: end < address: %p < %p", end
, address
);
353 if (end
> MM_ADDRESS_END(mm
))
354 fatal("mm_memvalid: address too large: %p", address
);