From 85d25bcf562abac1b78f0828f44e3cc7e67e93fa Mon Sep 17 00:00:00 2001 From: Matthew Dillon Date: Thu, 23 Apr 2009 11:54:47 -0700 Subject: [PATCH] Fix VM panic. Add required overflow check for MAP_STACK and MAP_FIXED mmaps Certain mmap() calls were not properly checking that (addr + size) does not overflow, causing a kernel panic. Reported-by: Alex Hornung --- sys/kern/kern_exec.c | 2 +- sys/vm/vm_map.c | 40 ++++++++++++++++++++++++++++++++-------- sys/vm/vm_map.h | 3 ++- sys/vm/vm_mmap.c | 19 +++++++++++++++++-- 4 files changed, 52 insertions(+), 12 deletions(-) diff --git a/sys/kern/kern_exec.c b/sys/kern/kern_exec.c index 56e54ec14e..a25bb57246 100644 --- a/sys/kern/kern_exec.c +++ b/sys/kern/kern_exec.c @@ -729,7 +729,7 @@ exec_new_vmspace(struct image_params *imgp, struct vmspace *vmcopy) /* Allocate a new stack */ error = vm_map_stack(&vmspace->vm_map, stack_addr, (vm_size_t)maxssiz, - VM_PROT_ALL, VM_PROT_ALL, 0); + FALSE, VM_PROT_ALL, VM_PROT_ALL, 0); if (error) return (error); diff --git a/sys/vm/vm_map.c b/sys/vm/vm_map.c index f166a1e646..1507336a85 100644 --- a/sys/vm/vm_map.c +++ b/sys/vm/vm_map.c @@ -2993,16 +2993,23 @@ vmspace_fork(struct vmspace *vm1) int vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, - vm_prot_t prot, vm_prot_t max, int cow) + boolean_t fitit, vm_prot_t prot, vm_prot_t max, int cow) { - vm_map_entry_t prev_entry; - vm_map_entry_t new_stack_entry; - vm_size_t init_ssize; - int rv; + vm_map_entry_t prev_entry; + vm_map_entry_t new_stack_entry; + vm_size_t init_ssize; + int rv; int count; + vm_offset_t tmpaddr; - if (VM_MIN_USER_ADDRESS > 0 && addrbos < VM_MIN_USER_ADDRESS) + /* + * XXX cleanup cruft + */ + if (VM_MIN_USER_ADDRESS > 0 && + fitit == FALSE && + addrbos < VM_MIN_USER_ADDRESS) { return (KERN_NO_SPACE); + } if (max_ssize < sgrowsiz) init_ssize = max_ssize; @@ -3012,6 +3019,18 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, count = vm_map_entry_reserve(MAP_RESERVE_COUNT); vm_map_lock(map); + /* + * Find space for the mapping + */ + if (fitit) { + if (vm_map_findspace(map, addrbos, max_ssize, 1, &tmpaddr)) { + vm_map_unlock(map); + vm_map_entry_release(count); + return (KERN_NO_SPACE); + } + addrbos = tmpaddr; + } + /* If addr is already mapped, no go */ if (vm_map_lookup_entry(map, addrbos, &prev_entry)) { vm_map_unlock(map); @@ -3019,6 +3038,8 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, return (KERN_NO_SPACE); } +#if 0 + /* XXX already handled by kern_mmap() */ /* If we would blow our VMEM resource limit, no go */ if (map->size + init_ssize > curproc->p_rlimit[RLIMIT_VMEM].rlim_cur) { @@ -3026,8 +3047,10 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, vm_map_entry_release(count); return (KERN_NO_SPACE); } +#endif - /* If we can't accomodate max_ssize in the current mapping, + /* + * If we can't accomodate max_ssize in the current mapping, * no go. However, we need to be aware that subsequent user * mappings might map into the space we have reserved for * stack, and currently this space is not protected. @@ -3042,7 +3065,8 @@ vm_map_stack (vm_map_t map, vm_offset_t addrbos, vm_size_t max_ssize, return (KERN_NO_SPACE); } - /* We initially map a stack of only init_ssize. We will + /* + * We initially map a stack of only init_ssize. We will * grow as needed later. Since this is to be a grow * down stack, we map at the top of the range. * diff --git a/sys/vm/vm_map.h b/sys/vm/vm_map.h index 4990221706..577dce3123 100644 --- a/sys/vm/vm_map.h +++ b/sys/vm/vm_map.h @@ -467,7 +467,8 @@ int vm_map_madvise (vm_map_t, vm_offset_t, vm_offset_t, int, off_t); void vm_map_simplify_entry (vm_map_t, vm_map_entry_t, int *); void vm_init2 (void); int vm_uiomove (vm_map_t, vm_object_t, off_t, int, vm_offset_t, int *); -int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, vm_prot_t, vm_prot_t, int); +int vm_map_stack (vm_map_t, vm_offset_t, vm_size_t, boolean_t, + vm_prot_t, vm_prot_t, int); int vm_map_growstack (struct proc *p, vm_offset_t addr); int vmspace_swap_count (struct vmspace *vmspace); int vmspace_anonymous_count (struct vmspace *vmspace); diff --git a/sys/vm/vm_mmap.c b/sys/vm/vm_mmap.c index d2b93c3d16..5d6c4672dc 100644 --- a/sys/vm/vm_mmap.c +++ b/sys/vm/vm_mmap.c @@ -984,6 +984,8 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, { boolean_t fitit; vm_object_t object; + vm_offset_t eaddr; + vm_size_t esize; struct vnode *vp; struct thread *td = curthread; struct proc *p; @@ -999,10 +1001,16 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, /* * XXX messy code, fixme + * + * NOTE: Overflow checks require discrete statements or GCC4 + * will optimize it out. */ if ((p = curproc) != NULL && map == &p->p_vmspace->vm_map) { - if (map->size + size > p->p_rlimit[RLIMIT_VMEM].rlim_cur) + esize = map->size + size; + if (esize < map->size || + esize > p->p_rlimit[RLIMIT_VMEM].rlim_cur) { return(ENOMEM); + } } /* @@ -1012,6 +1020,9 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, * operations (such as in exec) and non-aligned offsets will * cause pmap inconsistencies...so we want to be sure to * disallow this in all cases. + * + * NOTE: Overflow checks require discrete statements or GCC4 + * will optimize it out. */ if (foff & PAGE_MASK) return (EINVAL); @@ -1022,6 +1033,9 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, } else { if (*addr != trunc_page(*addr)) return (EINVAL); + eaddr = *addr + size; + if (eaddr < *addr) + return (EINVAL); fitit = FALSE; vm_map_remove(map, *addr, *addr + size); } @@ -1103,7 +1117,8 @@ vm_mmap(vm_map_t map, vm_offset_t *addr, vm_size_t size, vm_prot_t prot, * page tables will default to storing the page table at offset 0. */ if (flags & MAP_STACK) { - rv = vm_map_stack (map, *addr, size, prot, maxprot, docow); + rv = vm_map_stack(map, *addr, size, fitit, + prot, maxprot, docow); } else if (flags & MAP_VPAGETABLE) { rv = vm_map_find(map, object, foff, addr, size, fitit, VM_MAPTYPE_VPAGETABLE, prot, maxprot, docow); -- 2.11.4.GIT