Small cleanup of extensions code
[AROS.git] / compiler / clib / __stat.c
blobb05628cc7c4b636b03c681615113fc57f6788261
1 /*
2 Copyright © 1995-2001, The AROS Development Team. All rights reserved.
3 $Id$
4 */
6 #include <dos/dos.h>
7 #include <proto/dos.h>
8 #include <proto/exec.h>
10 #include <string.h>
11 #include <errno.h>
12 #include <stdint.h>
14 #include "__time.h"
15 #include "__errno.h"
16 #include "__stat.h"
18 #include <sys/stat.h>
19 #include <aros/debug.h>
21 static mode_t __prot_a2u(ULONG protect);
22 static uid_t __id_a2u(UWORD id);
23 static void hashlittle2(const void *key, size_t length,
24 uint32_t *pc, uint32_t *pb);
26 int __stat(BPTR lock, struct stat *sb)
28 struct FileInfoBlock *fib;
29 UBYTE *buffer;
30 int buffersize = 256;
31 uint64_t hash;
32 uint32_t pc = 1, pb = 1; /* initial hash values */
33 int fallback_to_defaults = 0;
35 fib = AllocDosObject(DOS_FIB, NULL);
37 if (!fib)
39 errno = IoErr2errno(IoErr());
41 return -1;
44 if (!Examine(lock, fib))
46 if(IoErr() == ERROR_NOT_IMPLEMENTED)
48 fallback_to_defaults = 1;
50 else
52 errno = IoErr2errno(IoErr());
53 FreeDosObject(DOS_FIB, fib);
54 return -1;
58 /* Get the full path of the stated filesystem object and use it to
59 compute hash value */
62 if(!(buffer = AllocVec(buffersize, MEMF_ANY)))
64 errno = IoErr2errno(IoErr());
65 FreeDosObject(DOS_FIB, fib);
66 return -1;
69 if(NameFromLock(lock, buffer, buffersize))
70 break;
71 else if(IoErr() == ERROR_OBJECT_IN_USE || ERROR_NOT_IMPLEMENTED)
73 /* We can't retrieve name because lock is an exclusive lock
74 or Examine is not implemented in this handler */
75 buffer[0] = '\0';
76 break;
78 else if(IoErr() != ERROR_LINE_TOO_LONG)
80 errno = IoErr2errno(IoErr());
81 FreeDosObject(DOS_FIB, fib);
82 FreeVec(buffer);
83 return -1;
85 FreeVec(buffer);
86 buffersize *= 2;
88 while(TRUE);
90 hashlittle2(buffer, strlen((char*) buffer), &pc, &pb);
91 hash = pc + (((uint64_t)pb)<<32);
92 FreeVec(buffer);
94 if(fallback_to_defaults)
96 /* Empty file, not protected, as old as it can be... */
97 fib->fib_Size = 0;
98 fib->fib_NumBlocks = 0;
99 fib->fib_Date.ds_Days = 0;
100 fib->fib_Date.ds_Minute = 0;
101 fib->fib_Date.ds_Tick = 0;
102 fib->fib_OwnerUID = 0;
103 fib->fib_OwnerGID = 0;
104 fib->fib_Protection = 0;
105 /* Most probable value */
106 fib->fib_DirEntryType = ST_PIPEFILE;
109 sb->st_dev = (dev_t)((struct FileHandle *)lock)->fh_Device;
110 sb->st_ino = hash; /* hash value will be truncated if st_ino size is
111 smaller than uint64_t, but it's ok */
112 sb->st_size = (off_t)fib->fib_Size;
113 sb->st_atime =
114 sb->st_ctime =
115 sb->st_mtime = (fib->fib_Date.ds_Days * 24*60 + fib->fib_Date.ds_Minute + __gmtoffset) * 60 +
116 fib->fib_Date.ds_Tick / TICKS_PER_SECOND + OFFSET_FROM_1970;
117 sb->st_uid = __id_a2u(fib->fib_OwnerUID);
118 sb->st_gid = __id_a2u(fib->fib_OwnerGID);
119 sb->st_mode = __prot_a2u(fib->fib_Protection);
122 struct InfoData info;
124 if (Info(lock, &info))
126 sb->st_blksize = info.id_BytesPerBlock;
128 else
130 /* The st_blksize is just a guideline anyway, so we set it
131 to 1024 in case Info() didn't succeed */
132 sb->st_blksize = 1024;
135 if(fib->fib_Size > 0 && sb->st_blksize > 0)
136 sb->st_blocks =
137 (1 + ((long) fib->fib_Size - 1) / sb->st_blksize) *
138 (sb->st_blksize / 512);
139 else
140 sb->st_blocks = 0;
142 switch (fib->fib_DirEntryType)
144 case ST_PIPEFILE:
145 /* don't use S_IFIFO, we don't have a mkfifo() call ! */
146 sb->st_mode |= S_IFCHR;
147 break;
149 case ST_ROOT:
150 case ST_USERDIR:
151 case ST_LINKDIR:
152 sb->st_nlink = 1;
153 sb->st_mode |= S_IFDIR;
154 break;
156 case ST_SOFTLINK:
157 sb->st_nlink = 1;
158 sb->st_mode |= S_IFLNK;
159 break;
161 case ST_FILE:
162 case ST_LINKFILE:
163 default:
164 sb->st_nlink = 1;
165 sb->st_mode |= S_IFREG;
168 FreeDosObject(DOS_FIB, fib);
170 return 0;
174 static mode_t __prot_a2u(ULONG protect)
176 mode_t uprot = 0000;
178 if ((protect & FIBF_SCRIPT))
179 uprot |= 0111;
180 /* The following three flags are low-active! */
181 if (!(protect & FIBF_EXECUTE))
182 uprot |= 0100;
183 if (!(protect & FIBF_WRITE))
184 uprot |= 0200;
185 if (!(protect & FIBF_READ))
186 uprot |= 0400;
187 if ((protect & FIBF_GRP_EXECUTE))
188 uprot |= 0010;
189 if ((protect & FIBF_GRP_WRITE))
190 uprot |= 0020;
191 if ((protect & FIBF_GRP_READ))
192 uprot |= 0040;
193 if ((protect & FIBF_OTR_EXECUTE))
194 uprot |= 0001;
195 if ((protect & FIBF_OTR_WRITE))
196 uprot |= 0002;
197 if ((protect & FIBF_OTR_READ))
198 uprot |= 0004;
200 return uprot;
204 static uid_t __id_a2u(UWORD id)
206 switch(id)
208 case (UWORD)-1:
209 return 0;
211 case (UWORD)-2:
212 return (UWORD)-1;
214 case 0:
215 return (UWORD)-2;
217 default:
218 return id;
222 /* The hash function code below is adapted from Bob Jenkins public domain hash
223 function, see http://burtleburtle.net/bob/hash/doobs.html for details */
225 #if AROS_BIG_ENDIAN
226 # define HASH_LITTLE_ENDIAN 0
227 #else
228 # define HASH_LITTLE_ENDIAN 1
229 #endif
231 #define hashsize(n) ((uint32_t)1<<(n))
232 #define hashmask(n) (hashsize(n)-1)
233 #define rot(x,k) (((x)<<(k)) | ((x)>>(32-(k))))
235 #define mix(a,b,c) \
237 a -= c; a ^= rot(c, 4); c += b; \
238 b -= a; b ^= rot(a, 6); a += c; \
239 c -= b; c ^= rot(b, 8); b += a; \
240 a -= c; a ^= rot(c,16); c += b; \
241 b -= a; b ^= rot(a,19); a += c; \
242 c -= b; c ^= rot(b, 4); b += a; \
245 #define final(a,b,c) \
247 c ^= b; c -= rot(b,14); \
248 a ^= c; a -= rot(c,11); \
249 b ^= a; b -= rot(a,25); \
250 c ^= b; c -= rot(b,16); \
251 a ^= c; a -= rot(c,4); \
252 b ^= a; b -= rot(a,14); \
253 c ^= b; c -= rot(b,24); \
257 * hashlittle2() -- hash a variable-length key into two 32-bit values
258 * k : the key (the unaligned variable-length array of bytes)
259 * length : the length of the key, counting by bytes
260 * pc : IN: primary initval, OUT: primary hash
261 * pb : IN: secondary initval, OUT: secondary hash
263 * Returns two 32-bit hash values. This is good enough for hash table
264 * lookup with 2^^64 buckets, or if you want a second hash if you're not
265 * happy with the first, or if you want a probably-unique 64-bit ID for
266 * the key. *pc is better mixed than *pb, so use *pc first. If you want
267 * a 64-bit value do something like "*pc + (((uint64_t)*pb)<<32)".
269 static void hashlittle2(
270 const void *key, /* the key to hash */
271 size_t length, /* length of the key */
272 uint32_t *pc, /* IN: primary initval, OUT: primary hash */
273 uint32_t *pb) /* IN: secondary initval, OUT: secondary hash */
275 uint32_t a,b,c; /* internal state */
276 union { const void *ptr; size_t i; } u; /* needed for Mac Powerbook G4 */
278 /* Set up the internal state */
279 a = b = c = 0xdeadbeef + ((uint32_t)length) + *pc;
280 c += *pb;
282 u.ptr = key;
283 if (HASH_LITTLE_ENDIAN && ((u.i & 0x3) == 0)) {
284 const uint32_t *k = (const uint32_t *)key; /* read 32-bit chunks */
285 const uint8_t *k8;
287 /*------ all but last block: aligned reads and affect 32 bits of (a,b,c) */
288 while (length > 12)
290 a += k[0];
291 b += k[1];
292 c += k[2];
293 mix(a,b,c);
294 length -= 12;
295 k += 3;
298 /*----------------------------- handle the last (probably partial) block */
300 * "k[2]&0xffffff" actually reads beyond the end of the string, but
301 * then masks off the part it's not allowed to read. Because the
302 * string is aligned, the masked-off tail is in the same word as the
303 * rest of the string. Every machine with memory protection I've seen
304 * does it on word boundaries, so is OK with this. But VALGRIND will
305 * still catch it and complain. The masking trick does make the hash
306 * noticably faster for short strings (like English words).
308 #ifndef VALGRIND
310 switch(length)
312 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
313 case 11: c+=k[2]&0xffffff; b+=k[1]; a+=k[0]; break;
314 case 10: c+=k[2]&0xffff; b+=k[1]; a+=k[0]; break;
315 case 9 : c+=k[2]&0xff; b+=k[1]; a+=k[0]; break;
316 case 8 : b+=k[1]; a+=k[0]; break;
317 case 7 : b+=k[1]&0xffffff; a+=k[0]; break;
318 case 6 : b+=k[1]&0xffff; a+=k[0]; break;
319 case 5 : b+=k[1]&0xff; a+=k[0]; break;
320 case 4 : a+=k[0]; break;
321 case 3 : a+=k[0]&0xffffff; break;
322 case 2 : a+=k[0]&0xffff; break;
323 case 1 : a+=k[0]&0xff; break;
324 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
327 #else /* make valgrind happy */
329 k8 = (const uint8_t *)k;
330 switch(length)
332 case 12: c+=k[2]; b+=k[1]; a+=k[0]; break;
333 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
334 case 10: c+=((uint32_t)k8[9])<<8; /* fall through */
335 case 9 : c+=k8[8]; /* fall through */
336 case 8 : b+=k[1]; a+=k[0]; break;
337 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
338 case 6 : b+=((uint32_t)k8[5])<<8; /* fall through */
339 case 5 : b+=k8[4]; /* fall through */
340 case 4 : a+=k[0]; break;
341 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
342 case 2 : a+=((uint32_t)k8[1])<<8; /* fall through */
343 case 1 : a+=k8[0]; break;
344 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
347 #endif /* !valgrind */
349 } else if (HASH_LITTLE_ENDIAN && ((u.i & 0x1) == 0)) {
350 const uint16_t *k = (const uint16_t *)key; /* read 16-bit chunks */
351 const uint8_t *k8;
353 /*--------------- all but last block: aligned reads and different mixing */
354 while (length > 12)
356 a += k[0] + (((uint32_t)k[1])<<16);
357 b += k[2] + (((uint32_t)k[3])<<16);
358 c += k[4] + (((uint32_t)k[5])<<16);
359 mix(a,b,c);
360 length -= 12;
361 k += 6;
364 /*----------------------------- handle the last (probably partial) block */
365 k8 = (const uint8_t *)k;
366 switch(length)
368 case 12: c+=k[4]+(((uint32_t)k[5])<<16);
369 b+=k[2]+(((uint32_t)k[3])<<16);
370 a+=k[0]+(((uint32_t)k[1])<<16);
371 break;
372 case 11: c+=((uint32_t)k8[10])<<16; /* fall through */
373 case 10: c+=k[4];
374 b+=k[2]+(((uint32_t)k[3])<<16);
375 a+=k[0]+(((uint32_t)k[1])<<16);
376 break;
377 case 9 : c+=k8[8]; /* fall through */
378 case 8 : b+=k[2]+(((uint32_t)k[3])<<16);
379 a+=k[0]+(((uint32_t)k[1])<<16);
380 break;
381 case 7 : b+=((uint32_t)k8[6])<<16; /* fall through */
382 case 6 : b+=k[2];
383 a+=k[0]+(((uint32_t)k[1])<<16);
384 break;
385 case 5 : b+=k8[4]; /* fall through */
386 case 4 : a+=k[0]+(((uint32_t)k[1])<<16);
387 break;
388 case 3 : a+=((uint32_t)k8[2])<<16; /* fall through */
389 case 2 : a+=k[0];
390 break;
391 case 1 : a+=k8[0];
392 break;
393 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
396 } else { /* need to read the key one byte at a time */
397 const uint8_t *k = (const uint8_t *)key;
399 /*--------------- all but the last block: affect some 32 bits of (a,b,c) */
400 while (length > 12)
402 a += k[0];
403 a += ((uint32_t)k[1])<<8;
404 a += ((uint32_t)k[2])<<16;
405 a += ((uint32_t)k[3])<<24;
406 b += k[4];
407 b += ((uint32_t)k[5])<<8;
408 b += ((uint32_t)k[6])<<16;
409 b += ((uint32_t)k[7])<<24;
410 c += k[8];
411 c += ((uint32_t)k[9])<<8;
412 c += ((uint32_t)k[10])<<16;
413 c += ((uint32_t)k[11])<<24;
414 mix(a,b,c);
415 length -= 12;
416 k += 12;
419 /*-------------------------------- last block: affect all 32 bits of (c) */
420 switch(length) /* all the case statements fall through */
422 case 12: c+=((uint32_t)k[11])<<24;
423 case 11: c+=((uint32_t)k[10])<<16;
424 case 10: c+=((uint32_t)k[9])<<8;
425 case 9 : c+=k[8];
426 case 8 : b+=((uint32_t)k[7])<<24;
427 case 7 : b+=((uint32_t)k[6])<<16;
428 case 6 : b+=((uint32_t)k[5])<<8;
429 case 5 : b+=k[4];
430 case 4 : a+=((uint32_t)k[3])<<24;
431 case 3 : a+=((uint32_t)k[2])<<16;
432 case 2 : a+=((uint32_t)k[1])<<8;
433 case 1 : a+=k[0];
434 break;
435 case 0 : *pc=c; *pb=b; return; /* zero length strings require no mixing */
439 final(a,b,c);
440 *pc=c; *pb=b;