Update.
[glibc.git] / sysdeps / mach / hurd / mmap.c
blob0e6a6e09f37fe17ea4801b496ee75f9084c4735f
1 /* Copyright (C) 1994, 1995, 1996, 1997, 1999 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Library General Public License as
6 published by the Free Software Foundation; either version 2 of the
7 License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Library General Public License for more details.
14 You should have received a copy of the GNU Library General Public
15 License along with the GNU C Library; see the file COPYING.LIB. If not,
16 write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330,
17 Boston, MA 02111-1307, USA. */
19 #include <sys/types.h>
20 #include <sys/mman.h>
21 #include <errno.h>
22 #include <hurd.h>
23 #include <hurd/fd.h>
25 /* Map addresses starting near ADDR and extending for LEN bytes. from
26 OFFSET into the file FD describes according to PROT and FLAGS. If ADDR
27 is nonzero, it is the desired mapping address. If the MAP_FIXED bit is
28 set in FLAGS, the mapping will be at ADDR exactly (which must be
29 page-aligned); otherwise the system chooses a convenient nearby address.
30 The return value is the actual mapping address chosen or (__ptr_t) -1
31 for errors (in which case `errno' is set). A successful `mmap' call
32 deallocates any previous mapping for the affected region. */
34 __ptr_t
35 __mmap (__ptr_t addr, size_t len, int prot, int flags, int fd, off_t offset)
37 error_t err;
38 vm_prot_t vmprot;
39 memory_object_t memobj;
40 vm_address_t mapaddr;
41 vm_size_t pageoff;
43 mapaddr = (vm_address_t) addr;
45 if ((flags & (MAP_TYPE|MAP_INHERIT)) == MAP_ANON
46 && prot == (PROT_READ|PROT_WRITE)) /* cf VM_PROT_DEFAULT */
48 /* vm_allocate has (a little) less overhead in the kernel too. */
49 err = __vm_allocate (__mach_task_self (), &mapaddr, len,
50 !(flags & MAP_FIXED));
52 if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
54 /* XXX this is not atomic as it is in unix! */
55 /* The region is already allocated; deallocate it first. */
56 err = __vm_deallocate (__mach_task_self (), mapaddr, len);
57 if (!err)
58 err = __vm_allocate (__mach_task_self (), &mapaddr, len, 0);
61 return err ? (__ptr_t) (long int) __hurd_fail (err) : (__ptr_t) mapaddr;
64 pageoff = offset & (vm_page_size - 1);
65 offset &= ~(vm_page_size - 1);
67 if (flags & MAP_FIXED)
69 /* A specific address is requested. It need not be page-aligned;
70 it just needs to be congruent with the object offset. */
71 if ((mapaddr & (vm_page_size - 1)) != pageoff)
72 return (__ptr_t) (long int) __hurd_fail (EINVAL);
73 else
74 /* We will add back PAGEOFF after mapping. */
75 mapaddr -= pageoff;
78 vmprot = VM_PROT_NONE;
79 if (prot & PROT_READ)
80 vmprot |= VM_PROT_READ;
81 if (prot & PROT_WRITE)
82 vmprot |= VM_PROT_WRITE;
83 if (prot & PROT_EXEC)
84 vmprot |= VM_PROT_EXECUTE;
86 switch (flags & MAP_TYPE)
88 default:
89 return (__ptr_t) (long int) __hurd_fail (EINVAL);
91 case MAP_ANON:
92 memobj = MACH_PORT_NULL;
93 break;
95 case MAP_FILE:
96 case 0: /* Allow, e.g., just MAP_SHARED. */
98 mach_port_t robj, wobj;
99 if (err = HURD_DPORT_USE (fd, __io_map (port, &robj, &wobj)))
100 return (__ptr_t) (long int) __hurd_dfail (fd, err);
101 switch (prot & (PROT_READ|PROT_WRITE))
103 case PROT_READ:
104 memobj = robj;
105 if (wobj != MACH_PORT_NULL)
106 __mach_port_deallocate (__mach_task_self (), wobj);
107 break;
108 case PROT_WRITE:
109 memobj = wobj;
110 if (robj != MACH_PORT_NULL)
111 __mach_port_deallocate (__mach_task_self (), robj);
112 break;
113 case PROT_READ|PROT_WRITE:
114 if (robj == wobj)
116 memobj = wobj;
117 /* Remove extra reference. */
118 __mach_port_deallocate (__mach_task_self (), memobj);
120 else if (wobj == MACH_PORT_NULL && /* Not writable by mapping. */
121 !(flags & MAP_SHARED))
122 /* The file can only be mapped for reading. Since we are
123 making a private mapping, we will never try to write the
124 object anyway, so we don't care. */
125 memobj = robj;
126 else
128 __mach_port_deallocate (__mach_task_self (), wobj);
129 return (__ptr_t) (long int) __hurd_fail (EACCES);
131 break;
133 break;
134 /* XXX handle MAP_NOEXTEND */
138 /* XXX handle MAP_INHERIT */
140 err = __vm_map (__mach_task_self (),
141 &mapaddr, (vm_size_t) len, (vm_address_t) 0,
142 ! (flags & MAP_FIXED),
143 memobj, (vm_offset_t) offset,
144 ! (flags & MAP_SHARED),
145 vmprot, VM_PROT_ALL,
146 (flags & MAP_SHARED) ? VM_INHERIT_SHARE : VM_INHERIT_COPY);
148 if (err == KERN_NO_SPACE && (flags & MAP_FIXED))
150 /* XXX this is not atomic as it is in unix! */
151 /* The region is already allocated; deallocate it first. */
152 err = __vm_deallocate (__mach_task_self (), mapaddr, len);
153 if (! err)
154 err = __vm_map (__mach_task_self (),
155 &mapaddr, (vm_size_t) len, (vm_address_t) 0,
156 0, memobj, (vm_offset_t) offset,
157 ! (flags & MAP_SHARED),
158 vmprot, VM_PROT_ALL,
159 (flags & MAP_SHARED) ? VM_INHERIT_SHARE
160 : VM_INHERIT_COPY);
163 if (memobj != MACH_PORT_NULL)
164 __mach_port_deallocate (__mach_task_self (), memobj);
166 if (err)
167 return (__ptr_t) (long int) __hurd_fail (err);
169 /* Adjust the mapping address for the offset-within-page. */
170 mapaddr += pageoff;
172 return (__ptr_t) mapaddr;
175 weak_alias (__mmap, mmap)