Merge branch 'master' of ssh://crater.dragonflybsd.org/repository/git/dragonfly
[dragonfly.git] / crypto / openssh / monitor_mm.c
blobfaf9f3dcb4f6e9eadd14732c5d77e36c37a6fd18
1 /* $OpenBSD: monitor_mm.c,v 1.16 2009/06/22 05:39:28 dtucker Exp $ */
2 /*
3 * Copyright 2002 Niels Provos <provos@citi.umich.edu>
4 * All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
8 * are met:
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "includes.h"
29 #include <sys/types.h>
30 #ifdef HAVE_SYS_MMAN_H
31 #include <sys/mman.h>
32 #endif
33 #include <sys/param.h>
34 #include "openbsd-compat/sys-tree.h"
36 #include <errno.h>
37 #include <stdarg.h>
38 #include <string.h>
40 #include "xmalloc.h"
41 #include "ssh.h"
42 #include "log.h"
43 #include "monitor_mm.h"
45 static int
46 mm_compare(struct mm_share *a, struct mm_share *b)
48 long diff = (char *)a->address - (char *)b->address;
50 if (diff == 0)
51 return (0);
52 else if (diff < 0)
53 return (-1);
54 else
55 return (1);
58 RB_GENERATE(mmtree, mm_share, next, mm_compare)
60 static struct mm_share *
61 mm_make_entry(struct mm_master *mm, struct mmtree *head,
62 void *address, size_t size)
64 struct mm_share *tmp, *tmp2;
66 if (mm->mmalloc == NULL)
67 tmp = xmalloc(sizeof(struct mm_share));
68 else
69 tmp = mm_xmalloc(mm->mmalloc, sizeof(struct mm_share));
70 tmp->address = address;
71 tmp->size = size;
73 tmp2 = RB_INSERT(mmtree, head, tmp);
74 if (tmp2 != NULL)
75 fatal("mm_make_entry(%p): double address %p->%p(%lu)",
76 mm, tmp2, address, (u_long)size);
78 return (tmp);
81 /* Creates a shared memory area of a certain size */
83 struct mm_master *
84 mm_create(struct mm_master *mmalloc, size_t size)
86 void *address;
87 struct mm_master *mm;
89 if (mmalloc == NULL)
90 mm = xmalloc(sizeof(struct mm_master));
91 else
92 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
95 * If the memory map has a mm_master it can be completely
96 * shared including authentication between the child
97 * and the client.
99 mm->mmalloc = mmalloc;
101 address = xmmap(size);
102 if (address == (void *)MAP_FAILED)
103 fatal("mmap(%lu): %s", (u_long)size, strerror(errno));
105 mm->address = address;
106 mm->size = size;
108 RB_INIT(&mm->rb_free);
109 RB_INIT(&mm->rb_allocated);
111 mm_make_entry(mm, &mm->rb_free, address, size);
113 return (mm);
116 /* Frees either the allocated or the free list */
118 static void
119 mm_freelist(struct mm_master *mmalloc, struct mmtree *head)
121 struct mm_share *mms, *next;
123 for (mms = RB_ROOT(head); mms; mms = next) {
124 next = RB_NEXT(mmtree, head, mms);
125 RB_REMOVE(mmtree, head, mms);
126 if (mmalloc == NULL)
127 xfree(mms);
128 else
129 mm_free(mmalloc, mms);
133 /* Destroys a memory mapped area */
135 void
136 mm_destroy(struct mm_master *mm)
138 mm_freelist(mm->mmalloc, &mm->rb_free);
139 mm_freelist(mm->mmalloc, &mm->rb_allocated);
141 #ifdef HAVE_MMAP
142 if (munmap(mm->address, mm->size) == -1)
143 fatal("munmap(%p, %lu): %s", mm->address, (u_long)mm->size,
144 strerror(errno));
145 #else
146 fatal("%s: UsePrivilegeSeparation=yes and Compression=yes not supported",
147 __func__);
148 #endif
149 if (mm->mmalloc == NULL)
150 xfree(mm);
151 else
152 mm_free(mm->mmalloc, mm);
155 void *
156 mm_xmalloc(struct mm_master *mm, size_t size)
158 void *address;
160 address = mm_malloc(mm, size);
161 if (address == NULL)
162 fatal("%s: mm_malloc(%lu)", __func__, (u_long)size);
163 return (address);
167 /* Allocates data from a memory mapped area */
169 void *
170 mm_malloc(struct mm_master *mm, size_t size)
172 struct mm_share *mms, *tmp;
174 if (size == 0)
175 fatal("mm_malloc: try to allocate 0 space");
176 if (size > SIZE_T_MAX - MM_MINSIZE + 1)
177 fatal("mm_malloc: size too big");
179 size = ((size + (MM_MINSIZE - 1)) / MM_MINSIZE) * MM_MINSIZE;
181 RB_FOREACH(mms, mmtree, &mm->rb_free) {
182 if (mms->size >= size)
183 break;
186 if (mms == NULL)
187 return (NULL);
189 /* Debug */
190 memset(mms->address, 0xd0, size);
192 tmp = mm_make_entry(mm, &mm->rb_allocated, mms->address, size);
194 /* Does not change order in RB tree */
195 mms->size -= size;
196 mms->address = (u_char *)mms->address + size;
198 if (mms->size == 0) {
199 RB_REMOVE(mmtree, &mm->rb_free, mms);
200 if (mm->mmalloc == NULL)
201 xfree(mms);
202 else
203 mm_free(mm->mmalloc, mms);
206 return (tmp->address);
209 /* Frees memory in a memory mapped area */
211 void
212 mm_free(struct mm_master *mm, void *address)
214 struct mm_share *mms, *prev, tmp;
216 tmp.address = address;
217 mms = RB_FIND(mmtree, &mm->rb_allocated, &tmp);
218 if (mms == NULL)
219 fatal("mm_free(%p): can not find %p", mm, address);
221 /* Debug */
222 memset(mms->address, 0xd0, mms->size);
224 /* Remove from allocated list and insert in free list */
225 RB_REMOVE(mmtree, &mm->rb_allocated, mms);
226 if (RB_INSERT(mmtree, &mm->rb_free, mms) != NULL)
227 fatal("mm_free(%p): double address %p", mm, address);
229 /* Find previous entry */
230 prev = mms;
231 if (RB_LEFT(prev, next)) {
232 prev = RB_LEFT(prev, next);
233 while (RB_RIGHT(prev, next))
234 prev = RB_RIGHT(prev, next);
235 } else {
236 if (RB_PARENT(prev, next) &&
237 (prev == RB_RIGHT(RB_PARENT(prev, next), next)))
238 prev = RB_PARENT(prev, next);
239 else {
240 while (RB_PARENT(prev, next) &&
241 (prev == RB_LEFT(RB_PARENT(prev, next), next)))
242 prev = RB_PARENT(prev, next);
243 prev = RB_PARENT(prev, next);
247 /* Check if range does not overlap */
248 if (prev != NULL && MM_ADDRESS_END(prev) > address)
249 fatal("mm_free: memory corruption: %p(%lu) > %p",
250 prev->address, (u_long)prev->size, address);
252 /* See if we can merge backwards */
253 if (prev != NULL && MM_ADDRESS_END(prev) == address) {
254 prev->size += mms->size;
255 RB_REMOVE(mmtree, &mm->rb_free, mms);
256 if (mm->mmalloc == NULL)
257 xfree(mms);
258 else
259 mm_free(mm->mmalloc, mms);
260 } else
261 prev = mms;
263 if (prev == NULL)
264 return;
266 /* Check if we can merge forwards */
267 mms = RB_NEXT(mmtree, &mm->rb_free, prev);
268 if (mms == NULL)
269 return;
271 if (MM_ADDRESS_END(prev) > mms->address)
272 fatal("mm_free: memory corruption: %p < %p(%lu)",
273 mms->address, prev->address, (u_long)prev->size);
274 if (MM_ADDRESS_END(prev) != mms->address)
275 return;
277 prev->size += mms->size;
278 RB_REMOVE(mmtree, &mm->rb_free, mms);
280 if (mm->mmalloc == NULL)
281 xfree(mms);
282 else
283 mm_free(mm->mmalloc, mms);
286 static void
287 mm_sync_list(struct mmtree *oldtree, struct mmtree *newtree,
288 struct mm_master *mm, struct mm_master *mmold)
290 struct mm_master *mmalloc = mm->mmalloc;
291 struct mm_share *mms, *new;
293 /* Sync free list */
294 RB_FOREACH(mms, mmtree, oldtree) {
295 /* Check the values */
296 mm_memvalid(mmold, mms, sizeof(struct mm_share));
297 mm_memvalid(mm, mms->address, mms->size);
299 new = mm_xmalloc(mmalloc, sizeof(struct mm_share));
300 memcpy(new, mms, sizeof(struct mm_share));
301 RB_INSERT(mmtree, newtree, new);
305 void
306 mm_share_sync(struct mm_master **pmm, struct mm_master **pmmalloc)
308 struct mm_master *mm;
309 struct mm_master *mmalloc;
310 struct mm_master *mmold;
311 struct mmtree rb_free, rb_allocated;
313 debug3("%s: Share sync", __func__);
315 mm = *pmm;
316 mmold = mm->mmalloc;
317 mm_memvalid(mmold, mm, sizeof(*mm));
319 mmalloc = mm_create(NULL, mm->size);
320 mm = mm_xmalloc(mmalloc, sizeof(struct mm_master));
321 memcpy(mm, *pmm, sizeof(struct mm_master));
322 mm->mmalloc = mmalloc;
324 rb_free = mm->rb_free;
325 rb_allocated = mm->rb_allocated;
327 RB_INIT(&mm->rb_free);
328 RB_INIT(&mm->rb_allocated);
330 mm_sync_list(&rb_free, &mm->rb_free, mm, mmold);
331 mm_sync_list(&rb_allocated, &mm->rb_allocated, mm, mmold);
333 mm_destroy(mmold);
335 *pmm = mm;
336 *pmmalloc = mmalloc;
338 debug3("%s: Share sync end", __func__);
341 void
342 mm_memvalid(struct mm_master *mm, void *address, size_t size)
344 void *end = (u_char *)address + size;
346 if (address < mm->address)
347 fatal("mm_memvalid: address too small: %p", address);
348 if (end < address)
349 fatal("mm_memvalid: end < address: %p < %p", end, address);
350 if (end > (void *)((u_char *)mm->address + mm->size))
351 fatal("mm_memvalid: address too large: %p", address);