tools/*: remove bcc and as86 as they are no longer required to compile the system...
[virtualbox.git] / src / recompiler / cutils.c
blob5fbf70bde7c11a9837ae1cb338c1573b4e7af031
1 /*
2 * Simple C functions to supplement the C library
4 * Copyright (c) 2006 Fabrice Bellard
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
24 #include "qemu-common.h"
25 #include "host-utils.h"
27 #ifdef VBOX
28 # include "osdep.h"
31 static inline int toupper(int ch) {
32 if ( (unsigned int)(ch - 'a') < 26u )
33 ch += 'A' - 'a';
34 return ch;
37 /* Quick sort from OpenSolaris:
38 http://src.opensolaris.org/source/raw/onnv/onnv-gate/usr/src/common/util/qsort.c */
40 * choose a median of 3 values
42 * note: cstyle specifically prohibits nested conditional operators
43 * but this is the only way to do the median of 3 function in-line
45 #define med3(a, b, c) (cmp((a), (b)) < 0) \
46 ? ((cmp((b), (c)) < 0) ? (b) : (cmp((a), (c)) < 0) ? (c) : (a)) \
47 : ((cmp((b), (c)) > 0) ? (b) : (cmp((a), (c)) > 0) ? (c) : (a))
49 #define THRESH_L 5 /* threshold for insertion sort */
50 #define THRESH_M3 20 /* threshold for median of 3 */
51 #define THRESH_M9 50 /* threshold for median of 9 */
53 typedef struct {
54 char *b_lim;
55 size_t nrec;
56 } stk_t;
59 * The following swap functions should not create a stack frame
60 * the SPARC call / return instruction will be executed
61 * but the a save / restore will not be executed
62 * which means we won't do a window turn with the spill / fill overhead
63 * verify this by examining the assembly code
66 /* ARGSUSED */
67 static void
68 swapp32(uint32_t *r1, uint32_t *r2, size_t cnt)
70 uint32_t temp;
72 temp = *r1;
73 *r1++ = *r2;
74 *r2++ = temp;
77 /* ARGSUSED */
78 static void
79 swapp64(uint64_t *r1, uint64_t *r2, size_t cnt)
81 uint64_t temp;
83 temp = *r1;
84 *r1++ = *r2;
85 *r2++ = temp;
88 static void
89 swapi(uint32_t *r1, uint32_t *r2, size_t cnt)
91 uint32_t temp;
93 /* character by character */
94 while (cnt--) {
95 temp = *r1;
96 *r1++ = *r2;
97 *r2++ = temp;
101 static void
102 swapb(char *r1, char *r2, size_t cnt)
104 char temp;
106 /* character by character */
107 while (cnt--) {
108 temp = *r1;
109 *r1++ = *r2;
110 *r2++ = temp;
115 * qsort() is a general purpose, in-place sorting routine using a
116 * user provided call back function for comparisons. This implementation
117 * utilizes a ternary quicksort algorithm, and cuts over to an
118 * insertion sort for partitions involving fewer than THRESH_L records.
120 * Potential User Errors
121 * There is no return value from qsort, this function has no method
122 * of alerting the user that a sort did not work or could not work.
123 * We do not print an error message or exit the process or thread,
124 * Even if we can detect an error, We CANNOT silently return without
125 * sorting the data, if we did so the user could never be sure the
126 * sort completed successfully.
127 * It is possible we could change the return value of sort from void
128 * to int and return success or some error codes, but this gets into
129 * standards and compatibility issues.
131 * Examples of qsort parameter errors might be
132 * 1) record size (rsiz) equal to 0
133 * qsort will loop and never return.
134 * 2) record size (rsiz) less than 0
135 * rsiz is unsigned, so a negative value is insanely large
136 * 3) number of records (nrec) is 0
137 * This is legal - qsort will return without examining any records
138 * 4) number of records (nrec) is less than 0
139 * nrec is unsigned, so a negative value is insanely large.
140 * 5) nrec * rsiz > memory allocation for sort array
141 * a segment violation may occur
142 * corruption of other memory may occur
143 * 6) The base address of the sort array is invalid
144 * a segment violation may occur
145 * corruption of other memory may occur
146 * 7) The user call back function is invalid
147 * we may get alignment errors or segment violations
148 * we may jump into never-never land
150 * Some less obvious errors might be
151 * 8) The user compare function is not comparing correctly
152 * 9) The user compare function modifies the data records
155 void
156 qemu_qsort(
157 void *basep,
158 size_t nrec,
159 size_t rsiz,
160 int (*cmp)(const void *, const void *))
162 size_t i; /* temporary variable */
164 /* variables used by swap */
165 void (*swapf)(char *, char *, size_t);
166 size_t loops;
168 /* variables used by sort */
169 stk_t stack[8 * sizeof (nrec) + 1];
170 stk_t *sp;
171 char *b_lim; /* bottom limit */
172 char *b_dup; /* bottom duplicate */
173 char *b_par; /* bottom partition */
174 char *t_lim; /* top limit */
175 char *t_dup; /* top duplicate */
176 char *t_par; /* top partition */
177 char *m1, *m2, *m3; /* median pointers */
178 uintptr_t d_bytelength; /* byte length of duplicate records */
179 int b_nrec;
180 int t_nrec;
181 int cv; /* results of compare (bottom / top) */
184 * choose a swap function based on alignment and size
186 * The qsort function sorts an array of fixed length records.
187 * We have very limited knowledge about the data record itself.
188 * It may be that the data record is in the array we are sorting
189 * or it may be that the array contains pointers or indexes to
190 * the actual data record and all that we are sorting is the indexes.
192 * The following decision will choose an optimal swap function
193 * based on the size and alignment of the data records
194 * swapp64 will swap 64 bit pointers
195 * swapp32 will swap 32 bit pointers
196 * swapi will swap an array of 32 bit integers
197 * swapb will swap an array of 8 bit characters
199 * swapi and swapb will also require the variable loops to be set
200 * to control the length of the array being swapped
202 if ((((uintptr_t)basep & (sizeof (uint64_t) - 1)) == 0) &&
203 (rsiz == sizeof (uint64_t))) {
204 loops = 1;
205 swapf = (void (*)(char *, char *, size_t))swapp64;
206 } else if ((((uintptr_t)basep & (sizeof (uint32_t) - 1)) == 0) &&
207 (rsiz == sizeof (uint32_t))) {
208 loops = 1;
209 swapf = (void (*)(char *, char *, size_t))swapp32;
210 } else if ((((uintptr_t)basep & (sizeof (uint32_t) - 1)) == 0) &&
211 ((rsiz & (sizeof (uint32_t) - 1)) == 0)) {
212 loops = rsiz / sizeof (int);
213 swapf = (void (*)(char *, char *, size_t))swapi;
214 } else {
215 loops = rsiz;
216 swapf = swapb;
220 * qsort is a partitioning sort
222 * the stack is the bookkeeping mechanism to keep track of all
223 * the partitions.
225 * each sort pass takes one partition and sorts it into two partitions.
226 * at the top of the loop we simply take the partition on the top
227 * of the stack and sort it. See the comments at the bottom
228 * of the loop regarding which partitions to add in what order.
230 * initially put the whole partition on the stack
232 sp = stack;
233 sp->b_lim = (char *)basep;
234 sp->nrec = nrec;
235 sp++;
236 while (sp > stack) {
237 sp--;
238 b_lim = sp->b_lim;
239 nrec = sp->nrec;
242 * a linear insertion sort i faster than a qsort for
243 * very small number of records (THRESH_L)
245 * if number records < threshold use linear insertion sort
247 * this also handles the special case where the partition
248 * 0 or 1 records length.
250 if (nrec < THRESH_L) {
252 * Linear insertion sort
254 t_par = b_lim;
255 for (i = 1; i < nrec; i++) {
256 t_par += rsiz;
257 b_par = t_par;
258 while (b_par > b_lim) {
259 b_par -= rsiz;
260 if ((*cmp)(b_par, b_par + rsiz) <= 0) {
261 break;
263 (*swapf)(b_par, b_par + rsiz, loops);
268 * a linear insertion sort will put all records
269 * in their final position and will not create
270 * subpartitions.
272 * therefore when the insertion sort is complete
273 * just go to the top of the loop and get the
274 * next partition to sort.
276 continue;
279 /* quicksort */
282 * choose a pivot record
284 * Ideally the pivot record will divide the partition
285 * into two equal parts. however we have to balance the
286 * work involved in selecting the pivot record with the
287 * expected benefit.
289 * The choice of pivot record depends on the number of
290 * records in the partition
292 * for small partitions (nrec < THRESH_M3)
293 * we just select the record in the middle of the partition
295 * if (nrec >= THRESH_M3 && nrec < THRESH_M9)
296 * we select three values and choose the median of 3
298 * if (nrec >= THRESH_M9)
299 * then we use an approximate median of 9
300 * 9 records are selected and grouped in 3 groups of 3
301 * the median of each of these 3 groups is fed into another
302 * median of 3 decision.
304 * Each median of 3 decision is 2 or 3 compares,
305 * so median of 9 costs between 8 and 12 compares.
307 * i is byte distance between two consecutive samples
308 * m2 will point to the pivot record
310 if (nrec < THRESH_M3) {
311 m2 = b_lim + (nrec / 2) * rsiz;
312 } else if (nrec < THRESH_M9) {
313 /* use median of 3 */
314 i = ((nrec - 1) / 2) * rsiz;
315 m2 = med3(b_lim, b_lim + i, b_lim + 2 * i);
316 } else {
317 /* approx median of 9 */
318 i = ((nrec - 1) / 8) * rsiz;
319 m1 = med3(b_lim, b_lim + i, b_lim + 2 * i);
320 m2 = med3(b_lim + 3 * i, b_lim + 4 * i, b_lim + 5 * i);
321 m3 = med3(b_lim + 6 * i, b_lim + 7 * i, b_lim + 8 * i);
322 m2 = med3(m1, m2, m3);
326 * quick sort partitioning
328 * The partition limits are defined by bottom and top pointers
329 * b_lim and t_lim.
331 * qsort uses a fairly standard method of moving the
332 * partitioning pointers, b_par and t_par, to the middle of
333 * the partition and exchanging records that are in the
334 * wrong part of the partition.
336 * Two enhancements have been made to the basic algorithm.
337 * One for handling duplicate records and one to minimize
338 * the number of swaps.
340 * Two duplicate records pointers are (b_dup and t_dup) are
341 * initially set to b_lim and t_lim. Each time a record
342 * whose sort key value is equal to the pivot record is found
343 * it will be swapped with the record pointed to by
344 * b_dup or t_dup and the duplicate pointer will be
345 * incremented toward the center.
346 * When partitioning is complete, all the duplicate records
347 * will have been collected at the upper and lower limits of
348 * the partition and can easily be moved adjacent to the
349 * pivot record.
351 * The second optimization is to minimize the number of swaps.
352 * The pointer m2 points to the pivot record.
353 * During partitioning, if m2 is ever equal to the partitioning
354 * pointers, b_par or t_par, then b_par or t_par just moves
355 * onto the next record without doing a compare.
356 * If as a result of duplicate record detection,
357 * b_dup or t_dup is ever equal to m2,
358 * then m2 is changed to point to the duplicate record and
359 * b_dup or t_dup is incremented with out swapping records.
361 * When partitioning is done, we may not have the same pivot
362 * record that we started with, but we will have one with
363 * an equal sort key.
365 b_dup = b_par = b_lim;
366 t_dup = t_par = t_lim = b_lim + rsiz * (nrec - 1);
367 for (;;) {
369 /* move bottom pointer up */
370 for (; b_par <= t_par; b_par += rsiz) {
371 if (b_par == m2) {
372 continue;
374 cv = cmp(b_par, m2);
375 if (cv > 0) {
376 break;
378 if (cv == 0) {
379 if (b_dup == m2) {
380 m2 = b_par;
381 } else if (b_dup != b_par) {
382 (*swapf)(b_dup, b_par, loops);
384 b_dup += rsiz;
388 /* move top pointer down */
389 for (; b_par < t_par; t_par -= rsiz) {
390 if (t_par == m2) {
391 continue;
393 cv = cmp(t_par, m2);
394 if (cv < 0) {
395 break;
397 if (cv == 0) {
398 if (t_dup == m2) {
399 m2 = t_par;
400 } else if (t_dup != t_par) {
401 (*swapf)(t_dup, t_par, loops);
403 t_dup -= rsiz;
407 /* break if we are done partitioning */
408 if (b_par >= t_par) {
409 break;
412 /* exchange records at upper and lower break points */
413 (*swapf)(b_par, t_par, loops);
414 b_par += rsiz;
415 t_par -= rsiz;
419 * partitioning is now complete
421 * there are two termination conditions from the partitioning
422 * loop above. Either b_par or t_par have crossed or
423 * they are equal.
425 * we need to swap the pivot record to its final position
426 * m2 could be in either the upper or lower partitions
427 * or it could already be in its final position
430 * R[b_par] > R[m2]
431 * R[t_par] < R[m2]
433 if (t_par < b_par) {
434 if (m2 < t_par) {
435 (*swapf)(m2, t_par, loops);
436 m2 = b_par = t_par;
437 } else if (m2 > b_par) {
438 (*swapf)(m2, b_par, loops);
439 m2 = t_par = b_par;
440 } else {
441 b_par = t_par = m2;
443 } else {
444 if (m2 < t_par) {
445 t_par = b_par = t_par - rsiz;
447 if (m2 != b_par) {
448 (*swapf)(m2, b_par, loops);
450 m2 = t_par;
454 * move bottom duplicates next to pivot
455 * optimized to eliminate overlap
457 d_bytelength = b_dup - b_lim;
458 if (b_par - b_dup < d_bytelength) {
459 b_dup = b_lim + (b_par - b_dup);
461 while (b_dup > b_lim) {
462 b_dup -= rsiz;
463 b_par -= rsiz;
464 (*swapf)(b_dup, b_par, loops);
466 b_par = m2 - d_bytelength;
469 * move top duplicates next to pivot
471 d_bytelength = t_lim - t_dup;
472 if (t_dup - t_par < d_bytelength) {
473 t_dup = t_lim - (t_dup - t_par);
475 while (t_dup < t_lim) {
476 t_dup += rsiz;
477 t_par += rsiz;
478 (*swapf)(t_dup, t_par, loops);
480 t_par = m2 + d_bytelength;
483 * when a qsort pass completes there are three partitions
484 * 1) the lower contains all records less than pivot
485 * 2) the upper contains all records greater than pivot
486 * 3) the pivot partition contains all record equal to pivot
488 * all records in the pivot partition are in their final
489 * position and do not need to be accounted for by the stack
491 * when adding partitions to the stack
492 * it is important to add the largest partition first
493 * to prevent stack overflow.
495 * calculate number of unsorted records in top and bottom
496 * push resulting partitions on stack
498 b_nrec = (b_par - b_lim) / rsiz;
499 t_nrec = (t_lim - t_par) / rsiz;
500 if (b_nrec < t_nrec) {
501 sp->b_lim = t_par + rsiz;
502 sp->nrec = t_nrec;
503 sp++;
504 sp->b_lim = b_lim;
505 sp->nrec = b_nrec;
506 sp++;
507 } else {
508 sp->b_lim = b_lim;
509 sp->nrec = b_nrec;
510 sp++;
511 sp->b_lim = t_par + rsiz;
512 sp->nrec = t_nrec;
513 sp++;
518 #endif /* VBOX */
520 void pstrcpy(char *buf, int buf_size, const char *str)
522 int c;
523 char *q = buf;
525 if (buf_size <= 0)
526 return;
528 for(;;) {
529 c = *str++;
530 if (c == 0 || q >= buf + buf_size - 1)
531 break;
532 *q++ = c;
534 *q = '\0';
537 /* strcat and truncate. */
538 char *pstrcat(char *buf, int buf_size, const char *s)
540 int len;
541 len = strlen(buf);
542 if (len < buf_size)
543 pstrcpy(buf + len, buf_size - len, s);
544 return buf;
547 int strstart(const char *str, const char *val, const char **ptr)
549 const char *p, *q;
550 p = str;
551 q = val;
552 while (*q != '\0') {
553 if (*p != *q)
554 return 0;
555 p++;
556 q++;
558 if (ptr)
559 *ptr = p;
560 return 1;
563 int stristart(const char *str, const char *val, const char **ptr)
565 const char *p, *q;
566 p = str;
567 q = val;
568 while (*q != '\0') {
569 if (qemu_toupper(*p) != qemu_toupper(*q))
570 return 0;
571 p++;
572 q++;
574 if (ptr)
575 *ptr = p;
576 return 1;
579 /* XXX: use host strnlen if available ? */
580 int qemu_strnlen(const char *s, int max_len)
582 int i;
584 for(i = 0; i < max_len; i++) {
585 if (s[i] == '\0') {
586 break;
589 return i;
592 #ifndef VBOX
593 time_t mktimegm(struct tm *tm)
595 time_t t;
596 int y = tm->tm_year + 1900, m = tm->tm_mon + 1, d = tm->tm_mday;
597 if (m < 3) {
598 m += 12;
599 y--;
601 t = 86400 * (d + (153 * m - 457) / 5 + 365 * y + y / 4 - y / 100 +
602 y / 400 - 719469);
603 t += 3600 * tm->tm_hour + 60 * tm->tm_min + tm->tm_sec;
604 return t;
606 #endif /* !VBOX */
608 int qemu_fls(int i)
610 return 32 - clz32(i);
613 #ifndef VBOX
616 * Make sure data goes on disk, but if possible do not bother to
617 * write out the inode just for timestamp updates.
619 * Unfortunately even in 2009 many operating systems do not support
620 * fdatasync and have to fall back to fsync.
622 int qemu_fdatasync(int fd)
624 #ifdef CONFIG_FDATASYNC
625 return fdatasync(fd);
626 #else
627 return fsync(fd);
628 #endif
631 /* io vectors */
633 void qemu_iovec_init(QEMUIOVector *qiov, int alloc_hint)
635 qiov->iov = qemu_malloc(alloc_hint * sizeof(struct iovec));
636 qiov->niov = 0;
637 qiov->nalloc = alloc_hint;
638 qiov->size = 0;
641 void qemu_iovec_init_external(QEMUIOVector *qiov, struct iovec *iov, int niov)
643 int i;
645 qiov->iov = iov;
646 qiov->niov = niov;
647 qiov->nalloc = -1;
648 qiov->size = 0;
649 for (i = 0; i < niov; i++)
650 qiov->size += iov[i].iov_len;
653 void qemu_iovec_add(QEMUIOVector *qiov, void *base, size_t len)
655 assert(qiov->nalloc != -1);
657 if (qiov->niov == qiov->nalloc) {
658 qiov->nalloc = 2 * qiov->nalloc + 1;
659 qiov->iov = qemu_realloc(qiov->iov, qiov->nalloc * sizeof(struct iovec));
661 qiov->iov[qiov->niov].iov_base = base;
662 qiov->iov[qiov->niov].iov_len = len;
663 qiov->size += len;
664 ++qiov->niov;
668 * Copies iovecs from src to the end dst until src is completely copied or the
669 * total size of the copied iovec reaches size. The size of the last copied
670 * iovec is changed in order to fit the specified total size if it isn't a
671 * perfect fit already.
673 void qemu_iovec_concat(QEMUIOVector *dst, QEMUIOVector *src, size_t size)
675 int i;
676 size_t done;
678 assert(dst->nalloc != -1);
680 done = 0;
681 for (i = 0; (i < src->niov) && (done != size); i++) {
682 if (done + src->iov[i].iov_len > size) {
683 qemu_iovec_add(dst, src->iov[i].iov_base, size - done);
684 break;
685 } else {
686 qemu_iovec_add(dst, src->iov[i].iov_base, src->iov[i].iov_len);
688 done += src->iov[i].iov_len;
692 void qemu_iovec_destroy(QEMUIOVector *qiov)
694 assert(qiov->nalloc != -1);
696 qemu_free(qiov->iov);
699 void qemu_iovec_reset(QEMUIOVector *qiov)
701 assert(qiov->nalloc != -1);
703 qiov->niov = 0;
704 qiov->size = 0;
707 void qemu_iovec_to_buffer(QEMUIOVector *qiov, void *buf)
709 uint8_t *p = (uint8_t *)buf;
710 int i;
712 for (i = 0; i < qiov->niov; ++i) {
713 memcpy(p, qiov->iov[i].iov_base, qiov->iov[i].iov_len);
714 p += qiov->iov[i].iov_len;
718 void qemu_iovec_from_buffer(QEMUIOVector *qiov, const void *buf, size_t count)
720 const uint8_t *p = (const uint8_t *)buf;
721 size_t copy;
722 int i;
724 for (i = 0; i < qiov->niov && count; ++i) {
725 copy = count;
726 if (copy > qiov->iov[i].iov_len)
727 copy = qiov->iov[i].iov_len;
728 memcpy(qiov->iov[i].iov_base, p, copy);
729 p += copy;
730 count -= copy;
734 #ifndef _WIN32
735 /* Sets a specific flag */
736 int fcntl_setfl(int fd, int flag)
738 int flags;
740 flags = fcntl(fd, F_GETFL);
741 if (flags == -1)
742 return -errno;
744 if (fcntl(fd, F_SETFL, flags | flag) == -1)
745 return -errno;
747 return 0;
749 #endif
751 #endif /* !VBOX */