eliminate pipe race.
[mit-jos.git] / lib / fd.c
blobf7671f89788f12a585870a5977d96025cefe6f03
1 #include <inc/lib.h>
3 #define debug 0
5 // Maximum number of file descriptors a program may hold open concurrently
6 #define MAXFD 32
7 // Bottom of file data area
8 #define FILEBASE 0xD0000000
9 // Bottom of file descriptor area
10 #define FDTABLE (FILEBASE - PTSIZE)
12 // Return the 'struct Fd*' for file descriptor index i
13 #define INDEX2FD(i) ((struct Fd*) (FDTABLE + (i)*PGSIZE))
14 // Return the file data pointer for file descriptor index i
15 #define INDEX2DATA(i) ((char*) (FILEBASE + (i)*PTSIZE))
18 /********************************
19 * FILE DESCRIPTOR MANIPULATORS *
20 * *
21 ********************************/
23 char*
24 fd2data(struct Fd *fd)
26 return INDEX2DATA(fd2num(fd));
29 int
30 fd2num(struct Fd *fd)
32 return ((uintptr_t) fd - FDTABLE) / PGSIZE;
35 // Finds the smallest i from 0 to MAXFD-1 that doesn't have
36 // its fd page mapped.
37 // Sets *fd_store to the corresponding fd page virtual address.
39 // fd_alloc does NOT actually allocate an fd page.
40 // It is up to the caller to allocate the page somehow.
41 // This means that if someone calls fd_alloc twice in a row
42 // without allocating the first page we return, we'll return the same
43 // page the second time.
45 // Hint: Use INDEX2FD.
47 // Returns 0 on success, < 0 on error. Errors are:
48 // -E_MAX_FD: no more file descriptors
49 // On error, *fd_store is set to 0.
50 int
51 fd_alloc(struct Fd **fd_store)
53 struct Fd *fd;
54 int i;
56 for (i = 0; i < MAXFD; i++) {
57 fd = INDEX2FD(i);
58 if (pageref(fd) == 0) {
59 *fd_store = fd;
60 return 0;
63 *fd_store = 0;
64 return -E_MAX_OPEN;
67 // Check that fdnum is in range and mapped.
68 // If it is, set *fd_store to the fd page virtual address.
70 // Returns 0 on success (the page is in range and mapped), < 0 on error.
71 // Errors are:
72 // -E_INVAL: fdnum was either not in range or not mapped.
73 int
74 fd_lookup(int fdnum, struct Fd **fd_store)
76 struct Fd *fd;
78 if (fdnum >= 0 && fdnum < MAXFD) {
79 fd = INDEX2FD(fdnum);
80 if (pageref(fd) > 0) {
81 *fd_store = fd;
82 return 0;
85 *fd_store = 0;
86 return -E_INVAL;
89 // Frees file descriptor 'fd' by closing the corresponding file
90 // and unmapping the file descriptor page.
91 // If 'must_exist' is 0, then fd can be a closed or nonexistent file
92 // descriptor; the function will return 0 and have no other effect.
93 // If 'must_exist' is 1, then fd_close returns -E_INVAL when passed a
94 // closed or nonexistent file descriptor.
95 // Returns 0 on success, < 0 on error.
96 int
97 fd_close(struct Fd *fd, bool must_exist)
99 struct Fd *fd2;
100 struct Dev *dev;
101 int r;
102 if ((r = fd_lookup(fd2num(fd), &fd2)) < 0
103 || fd != fd2)
104 return (must_exist ? r : 0);
105 if ((r = dev_lookup(fd->fd_dev_id, &dev)) >= 0)
106 r = (*dev->dev_close)(fd);
107 // Make sure fd is unmapped. Might be a no-op if
108 // (*dev->dev_close)(fd) already unmapped it.
109 (void) sys_page_unmap(0, fd);
110 return r;
114 /******************
115 * FILE FUNCTIONS *
117 ******************/
119 static struct Dev *devtab[] =
121 &devfile,
122 &devpipe,
123 &devcons,
128 dev_lookup(int dev_id, struct Dev **dev)
130 int i;
131 for (i = 0; devtab[i]; i++)
132 if (devtab[i]->dev_id == dev_id) {
133 *dev = devtab[i];
134 return 0;
136 cprintf("[%08x] unknown device type %d\n", env->env_id, dev_id);
137 *dev = 0;
138 return -E_INVAL;
142 close(int fdnum)
144 struct Fd *fd;
145 int r;
147 if ((r = fd_lookup(fdnum, &fd)) < 0)
148 return r;
149 else
150 return fd_close(fd, 1);
153 void
154 close_all(void)
156 int i;
157 for (i = 0; i < MAXFD; i++)
158 close(i);
161 // Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
162 // For instance, writing onto either file descriptor will affect the
163 // file and the file offset of the other.
164 // Closes any previously open file descriptor at 'newfdnum'.
165 // This is implemented using virtual memory tricks (of course!).
167 dup(int oldfdnum, int newfdnum)
169 int i, r;
170 char *ova, *nva;
171 pte_t pte;
172 struct Fd *oldfd, *newfd;
174 if ((r = fd_lookup(oldfdnum, &oldfd)) < 0)
175 return r;
176 close(newfdnum);
178 newfd = INDEX2FD(newfdnum);
179 ova = fd2data(oldfd);
180 nva = fd2data(newfd);
182 if (vpd[PDX(ova)]) {
183 for (i = 0; i < PTSIZE; i += PGSIZE) {
184 pte = vpt[VPN(ova + i)];
185 if (pte&PTE_P) {
186 // should be no error here -- pd is already allocated
187 if ((r = sys_page_map(0, ova + i, 0, nva + i, pte & PTE_USER)) < 0)
188 goto err;
193 if ((r = sys_page_map(0, oldfd, 0, newfd, vpt[VPN(oldfd)] & PTE_USER)) < 0)
194 goto err;
196 return newfdnum;
198 err:
199 sys_page_unmap(0, newfd);
200 for (i = 0; i < PTSIZE; i += PGSIZE)
201 sys_page_unmap(0, nva + i);
202 return r;
205 ssize_t
206 read(int fdnum, void *buf, size_t n)
208 int r;
209 struct Dev *dev;
210 struct Fd *fd;
212 if ((r = fd_lookup(fdnum, &fd)) < 0
213 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
214 return r;
215 if ((fd->fd_omode & O_ACCMODE) == O_WRONLY) {
216 cprintf("[%08x] read %d -- bad mode\n", env->env_id, fdnum);
217 return -E_INVAL;
219 r = (*dev->dev_read)(fd, buf, n, fd->fd_offset);
220 if (r >= 0)
221 fd->fd_offset += r;
222 return r;
225 ssize_t
226 readn(int fdnum, void *buf, size_t n)
228 int m, tot;
230 for (tot = 0; tot < n; tot += m) {
231 m = read(fdnum, (char*)buf + tot, n - tot);
232 if (m < 0)
233 return m;
234 if (m == 0)
235 break;
237 return tot;
240 ssize_t
241 write(int fdnum, const void *buf, size_t n)
243 int r;
244 struct Dev *dev;
245 struct Fd *fd;
247 if ((r = fd_lookup(fdnum, &fd)) < 0
248 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
249 return r;
250 if ((fd->fd_omode & O_ACCMODE) == O_RDONLY) {
251 cprintf("[%08x] write %d -- bad mode\n", env->env_id, fdnum);
252 return -E_INVAL;
254 if (debug)
255 cprintf("write %d %p %d via dev %s\n",
256 fdnum, buf, n, dev->dev_name);
257 r = (*dev->dev_write)(fd, buf, n, fd->fd_offset);
258 if (r > 0)
259 fd->fd_offset += r;
260 return r;
264 seek(int fdnum, off_t offset)
266 int r;
267 struct Fd *fd;
269 if ((r = fd_lookup(fdnum, &fd)) < 0)
270 return r;
271 fd->fd_offset = offset;
272 return 0;
276 ftruncate(int fdnum, off_t newsize)
278 int r;
279 struct Dev *dev;
280 struct Fd *fd;
281 if ((r = fd_lookup(fdnum, &fd)) < 0
282 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
283 return r;
284 if ((fd->fd_omode & O_ACCMODE) == O_RDONLY) {
285 cprintf("[%08x] ftruncate %d -- bad mode\n",
286 env->env_id, fdnum);
287 return -E_INVAL;
289 return (*dev->dev_trunc)(fd, newsize);
293 fstat(int fdnum, struct Stat *stat)
295 int r;
296 struct Dev *dev;
297 struct Fd *fd;
299 if ((r = fd_lookup(fdnum, &fd)) < 0
300 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
301 return r;
302 stat->st_name[0] = 0;
303 stat->st_size = 0;
304 stat->st_isdir = 0;
305 stat->st_dev = dev;
306 return (*dev->dev_stat)(fd, stat);
310 stat(const char *path, struct Stat *stat)
312 int fd, r;
314 if ((fd = open(path, O_RDONLY)) < 0)
315 return fd;
316 r = fstat(fd, stat);
317 close(fd);
318 return r;