5 // Maximum number of file descriptors a program may hold open concurrently
7 // Bottom of file data area
8 #define FILEBASE 0xD0000000
9 // Bottom of file descriptor area
10 #define FDTABLE (FILEBASE - PTSIZE)
12 // Return the 'struct Fd*' for file descriptor index i
13 #define INDEX2FD(i) ((struct Fd*) (FDTABLE + (i)*PGSIZE))
14 // Return the file data pointer for file descriptor index i
15 #define INDEX2DATA(i) ((char*) (FILEBASE + (i)*PTSIZE))
18 /********************************
19 * FILE DESCRIPTOR MANIPULATORS *
21 ********************************/
24 fd2data(struct Fd
*fd
)
26 return INDEX2DATA(fd2num(fd
));
32 return ((uintptr_t) fd
- FDTABLE
) / PGSIZE
;
35 // Finds the smallest i from 0 to MAXFD-1 that doesn't have
36 // its fd page mapped.
37 // Sets *fd_store to the corresponding fd page virtual address.
39 // fd_alloc does NOT actually allocate an fd page.
40 // It is up to the caller to allocate the page somehow.
41 // This means that if someone calls fd_alloc twice in a row
42 // without allocating the first page we return, we'll return the same
43 // page the second time.
45 // Hint: Use INDEX2FD.
47 // Returns 0 on success, < 0 on error. Errors are:
48 // -E_MAX_FD: no more file descriptors
49 // On error, *fd_store is set to 0.
51 fd_alloc(struct Fd
**fd_store
)
56 for (i
= 0; i
< MAXFD
; i
++) {
58 if (pageref(fd
) == 0) {
67 // Check that fdnum is in range and mapped.
68 // If it is, set *fd_store to the fd page virtual address.
70 // Returns 0 on success (the page is in range and mapped), < 0 on error.
72 // -E_INVAL: fdnum was either not in range or not mapped.
74 fd_lookup(int fdnum
, struct Fd
**fd_store
)
78 if (fdnum
>= 0 && fdnum
< MAXFD
) {
80 if (pageref(fd
) > 0) {
89 // Frees file descriptor 'fd' by closing the corresponding file
90 // and unmapping the file descriptor page.
91 // If 'must_exist' is 0, then fd can be a closed or nonexistent file
92 // descriptor; the function will return 0 and have no other effect.
93 // If 'must_exist' is 1, then fd_close returns -E_INVAL when passed a
94 // closed or nonexistent file descriptor.
95 // Returns 0 on success, < 0 on error.
97 fd_close(struct Fd
*fd
, bool must_exist
)
102 if ((r
= fd_lookup(fd2num(fd
), &fd2
)) < 0
104 return (must_exist
? r
: 0);
105 if ((r
= dev_lookup(fd
->fd_dev_id
, &dev
)) >= 0)
106 r
= (*dev
->dev_close
)(fd
);
107 // Make sure fd is unmapped. Might be a no-op if
108 // (*dev->dev_close)(fd) already unmapped it.
109 (void) sys_page_unmap(0, fd
);
119 static struct Dev
*devtab
[] =
128 dev_lookup(int dev_id
, struct Dev
**dev
)
131 for (i
= 0; devtab
[i
]; i
++)
132 if (devtab
[i
]->dev_id
== dev_id
) {
136 cprintf("[%08x] unknown device type %d\n", env
->env_id
, dev_id
);
147 if ((r
= fd_lookup(fdnum
, &fd
)) < 0)
150 return fd_close(fd
, 1);
157 for (i
= 0; i
< MAXFD
; i
++)
161 // Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
162 // For instance, writing onto either file descriptor will affect the
163 // file and the file offset of the other.
164 // Closes any previously open file descriptor at 'newfdnum'.
165 // This is implemented using virtual memory tricks (of course!).
167 dup(int oldfdnum
, int newfdnum
)
172 struct Fd
*oldfd
, *newfd
;
174 if ((r
= fd_lookup(oldfdnum
, &oldfd
)) < 0)
178 newfd
= INDEX2FD(newfdnum
);
179 ova
= fd2data(oldfd
);
180 nva
= fd2data(newfd
);
183 for (i
= 0; i
< PTSIZE
; i
+= PGSIZE
) {
184 pte
= vpt
[VPN(ova
+ i
)];
186 // should be no error here -- pd is already allocated
187 if ((r
= sys_page_map(0, ova
+ i
, 0, nva
+ i
, pte
& PTE_USER
)) < 0)
193 if ((r
= sys_page_map(0, oldfd
, 0, newfd
, vpt
[VPN(oldfd
)] & PTE_USER
)) < 0)
199 sys_page_unmap(0, newfd
);
200 for (i
= 0; i
< PTSIZE
; i
+= PGSIZE
)
201 sys_page_unmap(0, nva
+ i
);
206 read(int fdnum
, void *buf
, size_t n
)
212 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
213 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
215 if ((fd
->fd_omode
& O_ACCMODE
) == O_WRONLY
) {
216 cprintf("[%08x] read %d -- bad mode\n", env
->env_id
, fdnum
);
219 r
= (*dev
->dev_read
)(fd
, buf
, n
, fd
->fd_offset
);
226 readn(int fdnum
, void *buf
, size_t n
)
230 for (tot
= 0; tot
< n
; tot
+= m
) {
231 m
= read(fdnum
, (char*)buf
+ tot
, n
- tot
);
241 write(int fdnum
, const void *buf
, size_t n
)
247 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
248 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
250 if ((fd
->fd_omode
& O_ACCMODE
) == O_RDONLY
) {
251 cprintf("[%08x] write %d -- bad mode\n", env
->env_id
, fdnum
);
255 cprintf("write %d %p %d via dev %s\n",
256 fdnum
, buf
, n
, dev
->dev_name
);
257 r
= (*dev
->dev_write
)(fd
, buf
, n
, fd
->fd_offset
);
264 seek(int fdnum
, off_t offset
)
269 if ((r
= fd_lookup(fdnum
, &fd
)) < 0)
271 fd
->fd_offset
= offset
;
276 ftruncate(int fdnum
, off_t newsize
)
281 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
282 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
284 if ((fd
->fd_omode
& O_ACCMODE
) == O_RDONLY
) {
285 cprintf("[%08x] ftruncate %d -- bad mode\n",
289 return (*dev
->dev_trunc
)(fd
, newsize
);
293 fstat(int fdnum
, struct Stat
*stat
)
299 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
300 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
302 stat
->st_name
[0] = 0;
306 return (*dev
->dev_stat
)(fd
, stat
);
310 stat(const char *path
, struct Stat
*stat
)
314 if ((fd
= open(path
, O_RDONLY
)) < 0)