5 // Maximum number of file descriptors a program may hold open concurrently
7 // Bottom of file data area
8 #define FILEBASE 0xD0000000
9 // Bottom of file descriptor area
10 #define FDTABLE (FILEBASE - PTSIZE)
12 // Return the 'struct Fd*' for file descriptor index i
13 #define INDEX2FD(i) ((struct Fd*) (FDTABLE + (i)*PGSIZE))
14 // Return the file data pointer for file descriptor index i
15 #define INDEX2DATA(i) ((char*) (FILEBASE + (i)*PTSIZE))
18 /********************************
19 * FILE DESCRIPTOR MANIPULATORS *
21 ********************************/
24 fd2data(struct Fd
*fd
)
26 return INDEX2DATA(fd2num(fd
));
32 return ((uintptr_t) fd
- FDTABLE
) / PGSIZE
;
35 // Finds the smallest i from 0 to MAXFD-1 that doesn't have
36 // its fd page mapped.
37 // Sets *fd_store to the corresponding fd page virtual address.
39 // fd_alloc does NOT actually allocate an fd page.
40 // It is up to the caller to allocate the page somehow.
41 // This means that if someone calls fd_alloc twice in a row
42 // without allocating the first page we return, we'll return the same
43 // page the second time.
45 // Hint: Use INDEX2FD.
47 // Returns 0 on success, < 0 on error. Errors are:
48 // -E_MAX_FD: no more file descriptors
49 // On error, *fd_store is set to 0.
51 fd_alloc(struct Fd
**fd_store
)
56 for (i
= 0; i
< MAXFD
; i
++) {
58 if (pageref(fd
) == 0) {
67 // Check that fdnum is in range and mapped.
68 // If it is, set *fd_store to the fd page virtual address.
70 // Returns 0 on success (the page is in range and mapped), < 0 on error.
72 // -E_INVAL: fdnum was either not in range or not mapped.
74 fd_lookup(int fdnum
, struct Fd
**fd_store
)
78 if (fdnum
>= 0 && fdnum
< MAXFD
) {
80 if (pageref(fd
) > 0) {
89 // Frees file descriptor 'fd' by closing the corresponding file
90 // and unmapping the file descriptor page.
91 // If 'must_exist' is 0, then fd can be a closed or nonexistent file
92 // descriptor; the function will return 0 and have no other effect.
93 // If 'must_exist' is 1, then fd_close returns -E_INVAL when passed a
94 // closed or nonexistent file descriptor.
95 // Returns 0 on success, < 0 on error.
97 fd_close(struct Fd
*fd
, bool must_exist
)
102 if ((r
= fd_lookup(fd2num(fd
), &fd2
)) < 0
104 return (must_exist
? r
: 0);
105 if ((r
= dev_lookup(fd
->fd_dev_id
, &dev
)) >= 0)
106 r
= (*dev
->dev_close
)(fd
);
107 // Make sure fd is unmapped. Might be a no-op if
108 // (*dev->dev_close)(fd) already unmapped it.
109 (void) sys_page_unmap(0, fd
);
119 static struct Dev
*devtab
[] =
126 dev_lookup(int dev_id
, struct Dev
**dev
)
129 for (i
= 0; devtab
[i
]; i
++)
130 if (devtab
[i
]->dev_id
== dev_id
) {
134 cprintf("[%08x] unknown device type %d\n", env
->env_id
, dev_id
);
145 if ((r
= fd_lookup(fdnum
, &fd
)) < 0)
148 return fd_close(fd
, 1);
155 for (i
= 0; i
< MAXFD
; i
++)
159 // Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'.
160 // For instance, writing onto either file descriptor will affect the
161 // file and the file offset of the other.
162 // Closes any previously open file descriptor at 'newfdnum'.
163 // This is implemented using virtual memory tricks (of course!).
165 dup(int oldfdnum
, int newfdnum
)
170 struct Fd
*oldfd
, *newfd
;
172 if ((r
= fd_lookup(oldfdnum
, &oldfd
)) < 0)
176 newfd
= INDEX2FD(newfdnum
);
177 ova
= fd2data(oldfd
);
178 nva
= fd2data(newfd
);
180 if ((r
= sys_page_map(0, oldfd
, 0, newfd
, vpt
[VPN(oldfd
)] & PTE_USER
)) < 0)
183 for (i
= 0; i
< PTSIZE
; i
+= PGSIZE
) {
184 pte
= vpt
[VPN(ova
+ i
)];
186 // should be no error here -- pd is already allocated
187 if ((r
= sys_page_map(0, ova
+ i
, 0, nva
+ i
, pte
& PTE_USER
)) < 0)
196 sys_page_unmap(0, newfd
);
197 for (i
= 0; i
< PTSIZE
; i
+= PGSIZE
)
198 sys_page_unmap(0, nva
+ i
);
203 read(int fdnum
, void *buf
, size_t n
)
209 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
210 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
212 if ((fd
->fd_omode
& O_ACCMODE
) == O_WRONLY
) {
213 cprintf("[%08x] read %d -- bad mode\n", env
->env_id
, fdnum
);
216 r
= (*dev
->dev_read
)(fd
, buf
, n
, fd
->fd_offset
);
223 readn(int fdnum
, void *buf
, size_t n
)
227 for (tot
= 0; tot
< n
; tot
+= m
) {
228 m
= read(fdnum
, (char*)buf
+ tot
, n
- tot
);
238 write(int fdnum
, const void *buf
, size_t n
)
244 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
245 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
247 if ((fd
->fd_omode
& O_ACCMODE
) == O_RDONLY
) {
248 cprintf("[%08x] write %d -- bad mode\n", env
->env_id
, fdnum
);
252 cprintf("write %d %p %d via dev %s\n",
253 fdnum
, buf
, n
, dev
->dev_name
);
254 r
= (*dev
->dev_write
)(fd
, buf
, n
, fd
->fd_offset
);
261 seek(int fdnum
, off_t offset
)
266 if ((r
= fd_lookup(fdnum
, &fd
)) < 0)
268 fd
->fd_offset
= offset
;
273 ftruncate(int fdnum
, off_t newsize
)
278 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
279 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
281 if ((fd
->fd_omode
& O_ACCMODE
) == O_RDONLY
) {
282 cprintf("[%08x] ftruncate %d -- bad mode\n",
286 return (*dev
->dev_trunc
)(fd
, newsize
);
290 fstat(int fdnum
, struct Stat
*stat
)
296 if ((r
= fd_lookup(fdnum
, &fd
)) < 0
297 || (r
= dev_lookup(fd
->fd_dev_id
, &dev
)) < 0)
299 stat
->st_name
[0] = 0;
303 return (*dev
->dev_stat
)(fd
, stat
);
307 stat(const char *path
, struct Stat
*stat
)
311 if ((fd
= open(path
, O_RDONLY
)) < 0)