open/close implemented.
[mit-jos.git] / lib / fd.c
blob481a306298591f2ee27f9978479c97f3266f5014
1 #include <inc/lib.h>
3 #define debug 0
5 // Maximum number of file descriptors a program may hold open concurrently
6 #define MAXFD 32
7 // Bottom of file data area
8 #define FILEBASE 0xD0000000
9 // Bottom of file descriptor area
10 #define FDTABLE (FILEBASE - PTSIZE)
12 // Return the 'struct Fd*' for file descriptor index i
13 #define INDEX2FD(i) ((struct Fd*) (FDTABLE + (i)*PGSIZE))
14 // Return the file data pointer for file descriptor index i
15 #define INDEX2DATA(i) ((char*) (FILEBASE + (i)*PTSIZE))
18 /********************************
19 * FILE DESCRIPTOR MANIPULATORS *
20 * *
21 ********************************/
23 char*
24 fd2data(struct Fd *fd)
26 return INDEX2DATA(fd2num(fd));
29 int
30 fd2num(struct Fd *fd)
32 return ((uintptr_t) fd - FDTABLE) / PGSIZE;
35 // Finds the smallest i from 0 to MAXFD-1 that doesn't have
36 // its fd page mapped.
37 // Sets *fd_store to the corresponding fd page virtual address.
39 // fd_alloc does NOT actually allocate an fd page.
40 // It is up to the caller to allocate the page somehow.
41 // This means that if someone calls fd_alloc twice in a row
42 // without allocating the first page we return, we'll return the same
43 // page the second time.
45 // Hint: Use INDEX2FD.
47 // Returns 0 on success, < 0 on error. Errors are:
48 // -E_MAX_FD: no more file descriptors
49 // On error, *fd_store is set to 0.
50 int
51 fd_alloc(struct Fd **fd_store)
53 struct Fd *fd;
54 int i;
56 for (i = 0; i < MAXFD; i++) {
57 fd = INDEX2FD(i);
58 if (pageref(fd) == 0) {
59 *fd_store = fd;
60 return 0;
63 *fd_store = 0;
64 return -E_MAX_OPEN;
67 // Check that fdnum is in range and mapped.
68 // If it is, set *fd_store to the fd page virtual address.
70 // Returns 0 on success (the page is in range and mapped), < 0 on error.
71 // Errors are:
72 // -E_INVAL: fdnum was either not in range or not mapped.
73 int
74 fd_lookup(int fdnum, struct Fd **fd_store)
76 struct Fd *fd;
78 if (fdnum >= 0 && fdnum < MAXFD) {
79 fd = INDEX2FD(fdnum);
80 if (pageref(fd) > 0) {
81 *fd_store = fd;
82 return 0;
85 *fd_store = 0;
86 return -E_INVAL;
89 // Frees file descriptor 'fd' by closing the corresponding file
90 // and unmapping the file descriptor page.
91 // If 'must_exist' is 0, then fd can be a closed or nonexistent file
92 // descriptor; the function will return 0 and have no other effect.
93 // If 'must_exist' is 1, then fd_close returns -E_INVAL when passed a
94 // closed or nonexistent file descriptor.
95 // Returns 0 on success, < 0 on error.
96 int
97 fd_close(struct Fd *fd, bool must_exist)
99 struct Fd *fd2;
100 struct Dev *dev;
101 int r;
102 if ((r = fd_lookup(fd2num(fd), &fd2)) < 0
103 || fd != fd2)
104 return (must_exist ? r : 0);
105 if ((r = dev_lookup(fd->fd_dev_id, &dev)) >= 0)
106 r = (*dev->dev_close)(fd);
107 // Make sure fd is unmapped. Might be a no-op if
108 // (*dev->dev_close)(fd) already unmapped it.
109 (void) sys_page_unmap(0, fd);
110 return r;
114 /******************
115 * FILE FUNCTIONS *
117 ******************/
119 static struct Dev *devtab[] =
121 &devfile,
126 dev_lookup(int dev_id, struct Dev **dev)
128 int i;
129 for (i = 0; devtab[i]; i++)
130 if (devtab[i]->dev_id == dev_id) {
131 *dev = devtab[i];
132 return 0;
134 cprintf("[%08x] unknown device type %d\n", env->env_id, dev_id);
135 *dev = 0;
136 return -E_INVAL;
140 close(int fdnum)
142 struct Fd *fd;
143 int r;
145 if ((r = fd_lookup(fdnum, &fd)) < 0)
146 return r;
147 else
148 return fd_close(fd, 1);
151 void
152 close_all(void)
154 int i;
155 for (i = 0; i < MAXFD; i++)
156 close(i);
159 // Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
160 // For instance, writing onto either file descriptor will affect the
161 // file and the file offset of the other.
162 // Closes any previously open file descriptor at 'newfdnum'.
163 // This is implemented using virtual memory tricks (of course!).
165 dup(int oldfdnum, int newfdnum)
167 int i, r;
168 char *ova, *nva;
169 pte_t pte;
170 struct Fd *oldfd, *newfd;
172 if ((r = fd_lookup(oldfdnum, &oldfd)) < 0)
173 return r;
174 close(newfdnum);
176 newfd = INDEX2FD(newfdnum);
177 ova = fd2data(oldfd);
178 nva = fd2data(newfd);
180 if ((r = sys_page_map(0, oldfd, 0, newfd, vpt[VPN(oldfd)] & PTE_USER)) < 0)
181 goto err;
182 if (vpd[PDX(ova)]) {
183 for (i = 0; i < PTSIZE; i += PGSIZE) {
184 pte = vpt[VPN(ova + i)];
185 if (pte&PTE_P) {
186 // should be no error here -- pd is already allocated
187 if ((r = sys_page_map(0, ova + i, 0, nva + i, pte & PTE_USER)) < 0)
188 goto err;
193 return newfdnum;
195 err:
196 sys_page_unmap(0, newfd);
197 for (i = 0; i < PTSIZE; i += PGSIZE)
198 sys_page_unmap(0, nva + i);
199 return r;
202 ssize_t
203 read(int fdnum, void *buf, size_t n)
205 int r;
206 struct Dev *dev;
207 struct Fd *fd;
209 if ((r = fd_lookup(fdnum, &fd)) < 0
210 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
211 return r;
212 if ((fd->fd_omode & O_ACCMODE) == O_WRONLY) {
213 cprintf("[%08x] read %d -- bad mode\n", env->env_id, fdnum);
214 return -E_INVAL;
216 r = (*dev->dev_read)(fd, buf, n, fd->fd_offset);
217 if (r >= 0)
218 fd->fd_offset += r;
219 return r;
222 ssize_t
223 readn(int fdnum, void *buf, size_t n)
225 int m, tot;
227 for (tot = 0; tot < n; tot += m) {
228 m = read(fdnum, (char*)buf + tot, n - tot);
229 if (m < 0)
230 return m;
231 if (m == 0)
232 break;
234 return tot;
237 ssize_t
238 write(int fdnum, const void *buf, size_t n)
240 int r;
241 struct Dev *dev;
242 struct Fd *fd;
244 if ((r = fd_lookup(fdnum, &fd)) < 0
245 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
246 return r;
247 if ((fd->fd_omode & O_ACCMODE) == O_RDONLY) {
248 cprintf("[%08x] write %d -- bad mode\n", env->env_id, fdnum);
249 return -E_INVAL;
251 if (debug)
252 cprintf("write %d %p %d via dev %s\n",
253 fdnum, buf, n, dev->dev_name);
254 r = (*dev->dev_write)(fd, buf, n, fd->fd_offset);
255 if (r > 0)
256 fd->fd_offset += r;
257 return r;
261 seek(int fdnum, off_t offset)
263 int r;
264 struct Fd *fd;
266 if ((r = fd_lookup(fdnum, &fd)) < 0)
267 return r;
268 fd->fd_offset = offset;
269 return 0;
273 ftruncate(int fdnum, off_t newsize)
275 int r;
276 struct Dev *dev;
277 struct Fd *fd;
278 if ((r = fd_lookup(fdnum, &fd)) < 0
279 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
280 return r;
281 if ((fd->fd_omode & O_ACCMODE) == O_RDONLY) {
282 cprintf("[%08x] ftruncate %d -- bad mode\n",
283 env->env_id, fdnum);
284 return -E_INVAL;
286 return (*dev->dev_trunc)(fd, newsize);
290 fstat(int fdnum, struct Stat *stat)
292 int r;
293 struct Dev *dev;
294 struct Fd *fd;
296 if ((r = fd_lookup(fdnum, &fd)) < 0
297 || (r = dev_lookup(fd->fd_dev_id, &dev)) < 0)
298 return r;
299 stat->st_name[0] = 0;
300 stat->st_size = 0;
301 stat->st_isdir = 0;
302 stat->st_dev = dev;
303 return (*dev->dev_stat)(fd, stat);
307 stat(const char *path, struct Stat *stat)
309 int fd, r;
311 if ((fd = open(path, O_RDONLY)) < 0)
312 return fd;
313 r = fstat(fd, stat);
314 close(fd);
315 return r;