bump version
[buildroot.git] / toolchain / uClibc / uClibc-0.9.28-mutex-cancel.patch
blob5e56a7397016c6165048b8a9ccf419f80e105b46
1 diff --git a/include/printf.h b/include/printf.h
2 index 340b6cb..2dea58f 100644
3 --- a/include/printf.h
4 +++ b/include/printf.h
5 @@ -75,6 +75,7 @@ struct printf_info
6 unsigned int is_short:1; /* h flag. */
7 unsigned int is_long:1; /* l flag. */
8 unsigned int is_long_double:1;/* L flag. */
9 + unsigned int __padding:20;/* non-gnu -- total of 32 bits on 32bit arch */
11 #elif __BYTE_ORDER == __BIG_ENDIAN
13 diff --git a/include/pthread.h b/include/pthread.h
14 index 8c01172..cee112b 100644
15 --- a/include/pthread.h
16 +++ b/include/pthread.h
17 @@ -644,7 +644,8 @@ extern void _pthread_cleanup_pop (struct
18 /* Install a cleanup handler as pthread_cleanup_push does, but also
19 saves the current cancellation type and set it to deferred cancellation. */
21 -#ifdef __USE_GNU
22 +/* #ifdef __USE_GNU */
23 +#if defined(__USE_GNU) || defined(_LIBC)
24 # define pthread_cleanup_push_defer_np(routine,arg) \
25 { struct _pthread_cleanup_buffer _buffer; \
26 _pthread_cleanup_push_defer (&_buffer, (routine), (arg));
27 diff --git a/libc/inet/getnetent.c b/libc/inet/getnetent.c
28 index 181c5ad..659bf5d 100644
29 --- a/libc/inet/getnetent.c
30 +++ b/libc/inet/getnetent.c
31 @@ -22,18 +22,9 @@
32 #include <netdb.h>
33 #include <arpa/inet.h>
35 +#include <bits/uClibc_mutex.h>
37 -#ifdef __UCLIBC_HAS_THREADS__
38 -#include <pthread.h>
39 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
40 -# define LOCK __pthread_mutex_lock(&mylock)
41 -# define UNLOCK __pthread_mutex_unlock(&mylock);
42 -#else
43 -# define LOCK
44 -# define UNLOCK
45 -#endif
48 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
50 #define MAXALIASES 35
51 static const char NETDB[] = _PATH_NETWORKS;
52 @@ -46,25 +37,25 @@ int _net_stayopen;
54 void setnetent(int f)
56 - LOCK;
57 + __UCLIBC_MUTEX_LOCK(mylock);
58 if (netf == NULL)
59 - netf = fopen(NETDB, "r" );
60 + netf = fopen(NETDB, "r" );
61 else
62 - rewind(netf);
63 + rewind(netf);
64 _net_stayopen |= f;
65 - UNLOCK;
66 + __UCLIBC_MUTEX_UNLOCK(mylock);
67 return;
70 void endnetent(void)
72 - LOCK;
73 + __UCLIBC_MUTEX_LOCK(mylock);
74 if (netf) {
75 - fclose(netf);
76 - netf = NULL;
77 + fclose(netf);
78 + netf = NULL;
80 _net_stayopen = 0;
81 - UNLOCK;
82 + __UCLIBC_MUTEX_UNLOCK(mylock);
85 static char * any(register char *cp, char *match)
86 @@ -72,10 +63,10 @@ static char * any(register char *cp, cha
87 register char *mp, c;
89 while ((c = *cp)) {
90 - for (mp = match; *mp; mp++)
91 - if (*mp == c)
92 - return (cp);
93 - cp++;
94 + for (mp = match; *mp; mp++)
95 + if (*mp == c)
96 + return (cp);
97 + cp++;
99 return ((char *)0);
101 @@ -84,59 +75,62 @@ struct netent * getnetent(void)
103 char *p;
104 register char *cp, **q;
105 + struct netent *rv = NULL;
107 - LOCK;
108 + __UCLIBC_MUTEX_LOCK(mylock);
109 if (netf == NULL && (netf = fopen(NETDB, "r" )) == NULL) {
110 - UNLOCK;
111 - return (NULL);
112 + goto DONE;
114 -again:
115 + again:
117 if (!line) {
118 - line = malloc(BUFSIZ + 1);
119 - if (!line)
120 - abort();
121 + line = malloc(BUFSIZ + 1);
122 + if (!line)
123 + abort();
126 p = fgets(line, BUFSIZ, netf);
127 if (p == NULL) {
128 - UNLOCK;
129 - return (NULL);
130 + goto DONE;
132 if (*p == '#')
133 - goto again;
134 + goto again;
135 cp = any(p, "#\n");
136 if (cp == NULL)
137 - goto again;
138 + goto again;
139 *cp = '\0';
140 net.n_name = p;
141 cp = any(p, " \t");
142 if (cp == NULL)
143 - goto again;
144 + goto again;
145 *cp++ = '\0';
146 while (*cp == ' ' || *cp == '\t')
147 - cp++;
148 + cp++;
149 p = any(cp, " \t");
150 if (p != NULL)
151 - *p++ = '\0';
152 + *p++ = '\0';
153 net.n_net = inet_network(cp);
154 net.n_addrtype = AF_INET;
155 q = net.n_aliases = net_aliases;
156 if (p != NULL)
157 - cp = p;
158 + cp = p;
159 while (cp && *cp) {
160 - if (*cp == ' ' || *cp == '\t') {
161 - cp++;
162 - continue;
164 - if (q < &net_aliases[MAXALIASES - 1])
165 - *q++ = cp;
166 - cp = any(cp, " \t");
167 - if (cp != NULL)
168 - *cp++ = '\0';
169 + if (*cp == ' ' || *cp == '\t') {
170 + cp++;
171 + continue;
173 + if (q < &net_aliases[MAXALIASES - 1])
174 + *q++ = cp;
175 + cp = any(cp, " \t");
176 + if (cp != NULL)
177 + *cp++ = '\0';
179 *q = NULL;
180 - UNLOCK;
181 - return (&net);
183 + rv = &net;
185 + DONE:
186 + __UCLIBC_MUTEX_UNLOCK(mylock);
187 + return rv;
190 diff --git a/libc/inet/getproto.c b/libc/inet/getproto.c
191 index c9f35f1..3665d89 100644
192 --- a/libc/inet/getproto.c
193 +++ b/libc/inet/getproto.c
194 @@ -62,17 +62,9 @@
195 #include <string.h>
196 #include <errno.h>
198 -#ifdef __UCLIBC_HAS_THREADS__
199 -#include <pthread.h>
200 -static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
201 -# define LOCK __pthread_mutex_lock(&mylock)
202 -# define UNLOCK __pthread_mutex_unlock(&mylock);
203 -#else
204 -# define LOCK
205 -# define UNLOCK
206 -#endif
208 +#include <bits/uClibc_mutex.h>
210 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
212 #define MAXALIASES 35
213 #define SBUFSIZE (BUFSIZ + 1 + (sizeof(char *) * MAXALIASES))
214 @@ -85,109 +77,114 @@ static int proto_stayopen;
215 static void __initbuf(void)
217 if (!static_aliases) {
218 - static_aliases = malloc(SBUFSIZE);
219 - if (!static_aliases)
220 - abort();
221 + static_aliases = malloc(SBUFSIZE);
222 + if (!static_aliases)
223 + abort();
227 void setprotoent(int f)
229 - LOCK;
230 + __UCLIBC_MUTEX_LOCK(mylock);
231 if (protof == NULL)
232 - protof = fopen(_PATH_PROTOCOLS, "r" );
233 + protof = fopen(_PATH_PROTOCOLS, "r" );
234 else
235 - rewind(protof);
236 + rewind(protof);
237 proto_stayopen |= f;
238 - UNLOCK;
239 + __UCLIBC_MUTEX_UNLOCK(mylock);
242 void endprotoent(void)
244 - LOCK;
245 + __UCLIBC_MUTEX_LOCK(mylock);
246 if (protof) {
247 - fclose(protof);
248 - protof = NULL;
249 + fclose(protof);
250 + protof = NULL;
252 proto_stayopen = 0;
253 - UNLOCK;
254 + __UCLIBC_MUTEX_UNLOCK(mylock);
257 int getprotoent_r(struct protoent *result_buf,
258 - char *buf, size_t buflen,
259 - struct protoent **result)
260 + char *buf, size_t buflen,
261 + struct protoent **result)
263 char *p;
264 register char *cp, **q;
265 char **proto_aliases;
266 char *line;
267 + int rv;
269 *result = NULL;
271 if (buflen < sizeof(*proto_aliases)*MAXALIASES) {
272 - errno=ERANGE;
273 - return errno;
274 + errno=ERANGE;
275 + return errno;
277 - LOCK;
279 + __UCLIBC_MUTEX_LOCK(mylock);
280 proto_aliases=(char **)buf;
281 buf+=sizeof(*proto_aliases)*MAXALIASES;
282 buflen-=sizeof(*proto_aliases)*MAXALIASES;
284 if (buflen < BUFSIZ+1) {
285 - UNLOCK;
286 - errno=ERANGE;
287 - return errno;
288 + errno=rv=ERANGE;
289 + goto DONE;
291 line=buf;
292 buf+=BUFSIZ+1;
293 buflen-=BUFSIZ+1;
295 if (protof == NULL && (protof = fopen(_PATH_PROTOCOLS, "r" )) == NULL) {
296 - UNLOCK;
297 - return errno;
298 + rv=errno;
299 + goto DONE;
301 -again:
302 + again:
303 if ((p = fgets(line, BUFSIZ, protof)) == NULL) {
304 - UNLOCK;
305 - return TRY_AGAIN;
306 + rv=TRY_AGAIN;
307 + goto DONE;
310 if (*p == '#')
311 - goto again;
312 + goto again;
313 cp = strpbrk(p, "#\n");
314 if (cp == NULL)
315 - goto again;
316 + goto again;
317 *cp = '\0';
318 result_buf->p_name = p;
319 cp = strpbrk(p, " \t");
320 if (cp == NULL)
321 - goto again;
322 + goto again;
323 *cp++ = '\0';
324 while (*cp == ' ' || *cp == '\t')
325 - cp++;
326 + cp++;
327 p = strpbrk(cp, " \t");
328 if (p != NULL)
329 - *p++ = '\0';
330 + *p++ = '\0';
331 result_buf->p_proto = atoi(cp);
332 q = result_buf->p_aliases = proto_aliases;
333 if (p != NULL) {
334 - cp = p;
335 - while (cp && *cp) {
336 - if (*cp == ' ' || *cp == '\t') {
337 - cp++;
338 - continue;
340 - if (q < &proto_aliases[MAXALIASES - 1])
341 - *q++ = cp;
342 - cp = strpbrk(cp, " \t");
343 - if (cp != NULL)
344 - *cp++ = '\0';
346 + cp = p;
347 + while (cp && *cp) {
348 + if (*cp == ' ' || *cp == '\t') {
349 + cp++;
350 + continue;
352 + if (q < &proto_aliases[MAXALIASES - 1])
353 + *q++ = cp;
354 + cp = strpbrk(cp, " \t");
355 + if (cp != NULL)
356 + *cp++ = '\0';
359 *q = NULL;
360 *result=result_buf;
361 - UNLOCK;
362 - return 0;
364 + rv = 0;
366 + DONE:
367 + __UCLIBC_MUTEX_UNLOCK(mylock);
368 + return rv;
371 struct protoent * getprotoent(void)
372 @@ -201,26 +198,26 @@ struct protoent * getprotoent(void)
375 int getprotobyname_r(const char *name,
376 - struct protoent *result_buf,
377 - char *buf, size_t buflen,
378 - struct protoent **result)
379 + struct protoent *result_buf,
380 + char *buf, size_t buflen,
381 + struct protoent **result)
383 register char **cp;
384 int ret;
386 - LOCK;
387 + __UCLIBC_MUTEX_LOCK(mylock);
388 setprotoent(proto_stayopen);
389 while (!(ret=getprotoent_r(result_buf, buf, buflen, result))) {
390 - if (strcmp(result_buf->p_name, name) == 0)
391 - break;
392 - for (cp = result_buf->p_aliases; *cp != 0; cp++)
393 - if (strcmp(*cp, name) == 0)
394 - goto found;
395 + if (strcmp(result_buf->p_name, name) == 0)
396 + break;
397 + for (cp = result_buf->p_aliases; *cp != 0; cp++)
398 + if (strcmp(*cp, name) == 0)
399 + goto found;
401 -found:
402 + found:
403 if (!proto_stayopen)
404 - endprotoent();
405 - UNLOCK;
406 + endprotoent();
407 + __UCLIBC_MUTEX_UNLOCK(mylock);
408 return *result?0:ret;
411 @@ -236,20 +233,20 @@ struct protoent * getprotobyname(const c
414 int getprotobynumber_r (int proto_num,
415 - struct protoent *result_buf,
416 - char *buf, size_t buflen,
417 - struct protoent **result)
418 + struct protoent *result_buf,
419 + char *buf, size_t buflen,
420 + struct protoent **result)
422 int ret;
424 - LOCK;
425 + __UCLIBC_MUTEX_LOCK(mylock);
426 setprotoent(proto_stayopen);
427 while (!(ret=getprotoent_r(result_buf, buf, buflen, result)))
428 - if (result_buf->p_proto == proto_num)
429 - break;
430 + if (result_buf->p_proto == proto_num)
431 + break;
432 if (!proto_stayopen)
433 - endprotoent();
434 - UNLOCK;
435 + endprotoent();
436 + __UCLIBC_MUTEX_UNLOCK(mylock);
437 return *result?0:ret;
440 diff --git a/libc/inet/getservice.c b/libc/inet/getservice.c
441 index cbe5c50..b666057 100644
442 --- a/libc/inet/getservice.c
443 +++ b/libc/inet/getservice.c
444 @@ -65,20 +65,9 @@
445 #include <arpa/inet.h>
446 #include <errno.h>
448 +#include <bits/uClibc_mutex.h>
451 -#ifdef __UCLIBC_HAS_THREADS__
452 -#include <pthread.h>
453 -static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
454 -# define LOCK __pthread_mutex_lock(&mylock)
455 -# define UNLOCK __pthread_mutex_unlock(&mylock);
456 -#else
457 -# define LOCK
458 -# define UNLOCK
459 -#endif
463 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
465 #define MAXALIASES 35
466 #define SBUFSIZE (BUFSIZ + 1 + (sizeof(char *) * MAXALIASES))
467 @@ -91,32 +80,32 @@ static int serv_stayopen;
468 static void __initbuf(void)
470 if (!servbuf) {
471 - servbuf = malloc(SBUFSIZE);
472 - if (!servbuf)
473 - abort();
474 + servbuf = malloc(SBUFSIZE);
475 + if (!servbuf)
476 + abort();
480 void setservent(int f)
482 - LOCK;
483 + __UCLIBC_MUTEX_LOCK(mylock);
484 if (servf == NULL)
485 - servf = fopen(_PATH_SERVICES, "r" );
486 + servf = fopen(_PATH_SERVICES, "r" );
487 else
488 - rewind(servf);
489 + rewind(servf);
490 serv_stayopen |= f;
491 - UNLOCK;
492 + __UCLIBC_MUTEX_UNLOCK(mylock);
495 void endservent(void)
497 - LOCK;
498 + __UCLIBC_MUTEX_LOCK(mylock);
499 if (servf) {
500 - fclose(servf);
501 - servf = NULL;
502 + fclose(servf);
503 + servf = NULL;
505 serv_stayopen = 0;
506 - UNLOCK;
507 + __UCLIBC_MUTEX_UNLOCK(mylock);
510 struct servent * getservent(void)
511 @@ -149,127 +138,129 @@ struct servent * getservbyport(int port,
514 int getservent_r(struct servent * result_buf,
515 - char * buf, size_t buflen,
516 - struct servent ** result)
517 + char * buf, size_t buflen,
518 + struct servent ** result)
520 char *p;
521 register char *cp, **q;
522 char **serv_aliases;
523 char *line;
524 + int rv;
526 *result=NULL;
528 if (buflen < sizeof(*serv_aliases)*MAXALIASES) {
529 - errno=ERANGE;
530 - return errno;
531 + errno=ERANGE;
532 + return errno;
534 - LOCK;
535 + __UCLIBC_MUTEX_LOCK(mylock);
536 serv_aliases=(char **)buf;
537 buf+=sizeof(*serv_aliases)*MAXALIASES;
538 buflen-=sizeof(*serv_aliases)*MAXALIASES;
540 if (buflen < BUFSIZ+1) {
541 - UNLOCK;
542 - errno=ERANGE;
543 - return errno;
544 + errno=rv=ERANGE;
545 + goto DONE;
547 line=buf;
548 buf+=BUFSIZ+1;
549 buflen-=BUFSIZ+1;
551 if (servf == NULL && (servf = fopen(_PATH_SERVICES, "r" )) == NULL) {
552 - UNLOCK;
553 - errno=EIO;
554 - return errno;
555 + errno=rv=EIO;
556 + goto DONE;
558 -again:
559 + again:
560 if ((p = fgets(line, BUFSIZ, servf)) == NULL) {
561 - UNLOCK;
562 - errno=EIO;
563 - return errno;
564 + errno=rv=EIO;
565 + goto DONE;
567 if (*p == '#')
568 - goto again;
569 + goto again;
570 cp = strpbrk(p, "#\n");
571 if (cp == NULL)
572 - goto again;
573 + goto again;
574 *cp = '\0';
575 result_buf->s_name = p;
576 p = strpbrk(p, " \t");
577 if (p == NULL)
578 - goto again;
579 + goto again;
580 *p++ = '\0';
581 while (*p == ' ' || *p == '\t')
582 - p++;
583 + p++;
584 cp = strpbrk(p, ",/");
585 if (cp == NULL)
586 - goto again;
587 + goto again;
588 *cp++ = '\0';
589 result_buf->s_port = htons((u_short)atoi(p));
590 result_buf->s_proto = cp;
591 q = result_buf->s_aliases = serv_aliases;
592 cp = strpbrk(cp, " \t");
593 if (cp != NULL)
594 - *cp++ = '\0';
595 + *cp++ = '\0';
596 while (cp && *cp) {
597 - if (*cp == ' ' || *cp == '\t') {
598 - cp++;
599 - continue;
601 - if (q < &serv_aliases[MAXALIASES - 1])
602 - *q++ = cp;
603 - cp = strpbrk(cp, " \t");
604 - if (cp != NULL)
605 - *cp++ = '\0';
606 + if (*cp == ' ' || *cp == '\t') {
607 + cp++;
608 + continue;
610 + if (q < &serv_aliases[MAXALIASES - 1])
611 + *q++ = cp;
612 + cp = strpbrk(cp, " \t");
613 + if (cp != NULL)
614 + *cp++ = '\0';
616 *q = NULL;
617 *result=result_buf;
618 - UNLOCK;
619 - return 0;
621 + rv = 0;
623 + DONE:
624 + __UCLIBC_MUTEX_UNLOCK(mylock);
625 + return rv;
628 int getservbyname_r(const char *name, const char *proto,
629 - struct servent * result_buf, char * buf, size_t buflen,
630 - struct servent ** result)
631 + struct servent * result_buf, char * buf, size_t buflen,
632 + struct servent ** result)
634 register char **cp;
635 int ret;
637 - LOCK;
638 + __UCLIBC_MUTEX_LOCK(mylock);
639 setservent(serv_stayopen);
640 while (!(ret=getservent_r(result_buf, buf, buflen, result))) {
641 - if (strcmp(name, result_buf->s_name) == 0)
642 - goto gotname;
643 - for (cp = result_buf->s_aliases; *cp; cp++)
644 - if (strcmp(name, *cp) == 0)
645 - goto gotname;
646 - continue;
647 -gotname:
648 - if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
649 - break;
650 + if (strcmp(name, result_buf->s_name) == 0)
651 + goto gotname;
652 + for (cp = result_buf->s_aliases; *cp; cp++)
653 + if (strcmp(name, *cp) == 0)
654 + goto gotname;
655 + continue;
656 + gotname:
657 + if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
658 + break;
660 if (!serv_stayopen)
661 - endservent();
662 - UNLOCK;
663 + endservent();
664 + __UCLIBC_MUTEX_UNLOCK(mylock);
665 return *result?0:ret;
668 int getservbyport_r(int port, const char *proto,
669 - struct servent * result_buf, char * buf,
670 - size_t buflen, struct servent ** result)
671 + struct servent * result_buf, char * buf,
672 + size_t buflen, struct servent ** result)
674 int ret;
676 - LOCK;
677 + __UCLIBC_MUTEX_LOCK(mylock);
678 setservent(serv_stayopen);
679 while (!(ret=getservent_r(result_buf, buf, buflen, result))) {
680 - if (result_buf->s_port != port)
681 - continue;
682 - if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
683 - break;
684 + if (result_buf->s_port != port)
685 + continue;
686 + if (proto == 0 || strcmp(result_buf->s_proto, proto) == 0)
687 + break;
689 if (!serv_stayopen)
690 - endservent();
691 - UNLOCK;
692 + endservent();
693 + __UCLIBC_MUTEX_UNLOCK(mylock);
694 return *result?0:ret;
696 diff --git a/libc/inet/resolv.c b/libc/inet/resolv.c
697 index 27b60ef..0f583ab 100644
698 --- a/libc/inet/resolv.c
699 +++ b/libc/inet/resolv.c
700 @@ -7,7 +7,7 @@
701 * modify it under the terms of the GNU Library General Public
702 * License as published by the Free Software Foundation; either
703 * version 2 of the License, or (at your option) any later version.
705 + */
708 * Portions Copyright (c) 1985, 1993
709 @@ -153,6 +153,11 @@
710 #include <sys/utsname.h>
711 #include <sys/un.h>
713 +#include <bits/uClibc_mutex.h>
715 +__UCLIBC_MUTEX_EXTERN(__resolv_lock);
718 #define MAX_RECURSE 5
719 #define REPLY_TIMEOUT 10
720 #define MAX_RETRIES 3
721 @@ -180,18 +185,6 @@ extern char * __nameserver[MAX_SERVERS];
722 extern int __searchdomains;
723 extern char * __searchdomain[MAX_SEARCH];
725 -#ifdef __UCLIBC_HAS_THREADS__
726 -#include <pthread.h>
727 -extern pthread_mutex_t __resolv_lock;
728 -# define BIGLOCK __pthread_mutex_lock(&__resolv_lock)
729 -# define BIGUNLOCK __pthread_mutex_unlock(&__resolv_lock);
730 -#else
731 -# define BIGLOCK
732 -# define BIGUNLOCK
733 -#endif
737 /* Structs */
738 struct resolv_header {
739 int id;
740 @@ -229,49 +222,49 @@ enum etc_hosts_action {
742 /* function prototypes */
743 extern int __get_hosts_byname_r(const char * name, int type,
744 - struct hostent * result_buf,
745 - char * buf, size_t buflen,
746 - struct hostent ** result,
747 - int * h_errnop);
748 + struct hostent * result_buf,
749 + char * buf, size_t buflen,
750 + struct hostent ** result,
751 + int * h_errnop);
752 extern int __get_hosts_byaddr_r(const char * addr, int len, int type,
753 - struct hostent * result_buf,
754 - char * buf, size_t buflen,
755 - struct hostent ** result,
756 - int * h_errnop);
757 + struct hostent * result_buf,
758 + char * buf, size_t buflen,
759 + struct hostent ** result,
760 + int * h_errnop);
761 extern void __open_etc_hosts(FILE **fp);
762 extern int __read_etc_hosts_r(FILE *fp, const char * name, int type,
763 - enum etc_hosts_action action,
764 - struct hostent * result_buf,
765 - char * buf, size_t buflen,
766 - struct hostent ** result,
767 - int * h_errnop);
768 + enum etc_hosts_action action,
769 + struct hostent * result_buf,
770 + char * buf, size_t buflen,
771 + struct hostent ** result,
772 + int * h_errnop);
773 extern int __dns_lookup(const char * name, int type, int nscount,
774 - char ** nsip, unsigned char ** outpacket, struct resolv_answer * a);
775 + char ** nsip, unsigned char ** outpacket, struct resolv_answer * a);
777 extern int __encode_dotted(const char * dotted, unsigned char * dest, int maxlen);
778 extern int __decode_dotted(const unsigned char * message, int offset,
779 - char * dest, int maxlen);
780 + char * dest, int maxlen);
781 extern int __length_dotted(const unsigned char * message, int offset);
782 extern int __encode_header(struct resolv_header * h, unsigned char * dest, int maxlen);
783 extern int __decode_header(unsigned char * data, struct resolv_header * h);
784 extern int __encode_question(struct resolv_question * q,
785 - unsigned char * dest, int maxlen);
786 + unsigned char * dest, int maxlen);
787 extern int __decode_question(unsigned char * message, int offset,
788 - struct resolv_question * q);
789 + struct resolv_question * q);
790 extern int __encode_answer(struct resolv_answer * a,
791 - unsigned char * dest, int maxlen);
792 + unsigned char * dest, int maxlen);
793 extern int __decode_answer(unsigned char * message, int offset,
794 - struct resolv_answer * a);
795 + struct resolv_answer * a);
796 extern int __length_question(unsigned char * message, int offset);
797 extern int __open_nameservers(void);
798 extern void __close_nameservers(void);
799 extern int __dn_expand(const u_char *, const u_char *, const u_char *,
800 - char *, int);
801 + char *, int);
802 extern int __ns_name_uncompress(const u_char *, const u_char *,
803 - const u_char *, char *, size_t);
804 + const u_char *, char *, size_t);
805 extern int __ns_name_ntop(const u_char *, char *, size_t);
806 extern int __ns_name_unpack(const u_char *, const u_char *, const u_char *,
807 - u_char *, size_t);
808 + u_char *, size_t);
811 #ifdef L_encodeh
812 @@ -361,7 +354,7 @@ int __encode_dotted(const char *dotted,
813 This routine understands compressed data. */
815 int __decode_dotted(const unsigned char *data, int offset,
816 - char *dest, int maxlen)
817 + char *dest, int maxlen)
819 int l;
820 int measure = 1;
821 @@ -435,7 +428,7 @@ int __length_dotted(const unsigned char
823 #ifdef L_encodeq
824 int __encode_question(struct resolv_question *q,
825 - unsigned char *dest, int maxlen)
826 + unsigned char *dest, int maxlen)
828 int i;
830 @@ -460,7 +453,7 @@ int __encode_question(struct resolv_ques
832 #ifdef L_decodeq
833 int __decode_question(unsigned char *message, int offset,
834 - struct resolv_question *q)
835 + struct resolv_question *q)
837 char temp[256];
838 int i;
839 @@ -525,7 +518,7 @@ int __encode_answer(struct resolv_answer
841 #ifdef L_decodea
842 int __decode_answer(unsigned char *message, int offset,
843 - struct resolv_answer *a)
844 + struct resolv_answer *a)
846 char temp[256];
847 int i;
848 @@ -557,11 +550,11 @@ int __decode_answer(unsigned char *messa
850 #ifdef L_encodep
851 int __encode_packet(struct resolv_header *h,
852 - struct resolv_question **q,
853 - struct resolv_answer **an,
854 - struct resolv_answer **ns,
855 - struct resolv_answer **ar,
856 - unsigned char *dest, int maxlen)
857 + struct resolv_question **q,
858 + struct resolv_answer **an,
859 + struct resolv_answer **ns,
860 + struct resolv_answer **ar,
861 + unsigned char *dest, int maxlen)
863 int i, total = 0;
864 int j;
865 @@ -621,7 +614,7 @@ int __decode_packet(unsigned char *data,
867 #ifdef L_formquery
868 int __form_query(int id, const char *name, int type, unsigned char *packet,
869 - int maxlen)
870 + int maxlen)
872 struct resolv_header h;
873 struct resolv_question q;
874 @@ -649,14 +642,7 @@ int __form_query(int id, const char *nam
876 #ifdef L_dnslookup
878 -#ifdef __UCLIBC_HAS_THREADS__
879 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
880 -# define LOCK __pthread_mutex_lock(&mylock)
881 -# define UNLOCK __pthread_mutex_unlock(&mylock);
882 -#else
883 -# define LOCK
884 -# define UNLOCK
885 -#endif
886 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
888 /* Just for the record, having to lock __dns_lookup() just for these two globals
889 * is pretty lame. I think these two variables can probably be de-global-ized,
890 @@ -665,7 +651,7 @@ static pthread_mutex_t mylock = PTHREAD_
891 static int ns=0, id=1;
893 int __dns_lookup(const char *name, int type, int nscount, char **nsip,
894 - unsigned char **outpacket, struct resolv_answer *a)
895 + unsigned char **outpacket, struct resolv_answer *a)
897 int i, j, len, fd, pos, rc;
898 struct timeval tv;
899 @@ -693,10 +679,10 @@ int __dns_lookup(const char *name, int t
900 DPRINTF("Looking up type %d answer for '%s'\n", type, name);
902 /* Mess with globals while under lock */
903 - LOCK;
904 + __UCLIBC_MUTEX_LOCK(mylock);
905 local_ns = ns % nscount;
906 local_id = id;
907 - UNLOCK;
908 + __UCLIBC_MUTEX_UNLOCK(mylock);
910 while (retries < MAX_RETRIES) {
911 if (fd != -1)
912 @@ -722,13 +708,13 @@ int __dns_lookup(const char *name, int t
914 strncpy(lookup,name,MAXDNAME);
915 if (variant >= 0) {
916 - BIGLOCK;
917 - if (variant < __searchdomains) {
918 - strncat(lookup,".", MAXDNAME);
919 - strncat(lookup,__searchdomain[variant], MAXDNAME);
921 - BIGUNLOCK;
923 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
924 + if (variant < __searchdomains) {
925 + strncat(lookup,".", MAXDNAME);
926 + strncat(lookup,__searchdomain[variant], MAXDNAME);
928 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
930 DPRINTF("lookup name: %s\n", lookup);
931 q.dotted = (char *)lookup;
932 q.qtype = type;
933 @@ -750,7 +736,7 @@ int __dns_lookup(const char *name, int t
934 fd = socket(AF_INET, SOCK_DGRAM, IPPROTO_UDP);
935 #endif
936 if (fd < 0) {
937 - retries++;
938 + retries++;
939 continue;
942 @@ -772,11 +758,11 @@ int __dns_lookup(const char *name, int t
943 #endif
944 if (rc < 0) {
945 if (errno == ENETUNREACH) {
946 - /* routing error, presume not transient */
947 - goto tryall;
948 + /* routing error, presume not transient */
949 + goto tryall;
950 } else
951 - /* retry */
952 - retries++;
953 + /* retry */
954 + retries++;
955 continue;
958 @@ -838,55 +824,55 @@ int __dns_lookup(const char *name, int t
960 first_answer = 1;
961 for (j=0;j<h.ancount;j++,pos += i)
963 - i = __decode_answer(packet, pos, &ma);
965 + i = __decode_answer(packet, pos, &ma);
967 - if (i<0) {
968 - DPRINTF("failed decode %d\n", i);
969 - goto again;
971 + if (i<0) {
972 + DPRINTF("failed decode %d\n", i);
973 + goto again;
976 - if ( first_answer )
978 - ma.buf = a->buf;
979 - ma.buflen = a->buflen;
980 - ma.add_count = a->add_count;
981 - memcpy(a, &ma, sizeof(ma));
982 - if (a->atype != T_SIG && (0 == a->buf || (type != T_A && type != T_AAAA)))
984 - break;
986 - if (a->atype != type)
988 - free(a->dotted);
989 - continue;
991 - a->add_count = h.ancount - j - 1;
992 - if ((a->rdlength + sizeof(struct in_addr*)) * a->add_count > a->buflen)
994 - break;
996 - a->add_count = 0;
997 - first_answer = 0;
999 - else
1001 - free(ma.dotted);
1002 - if (ma.atype != type)
1004 - continue;
1006 - if (a->rdlength != ma.rdlength)
1008 - free(a->dotted);
1009 - DPRINTF("Answer address len(%u) differs from original(%u)\n",
1010 - ma.rdlength, a->rdlength);
1011 - goto again;
1012 + if ( first_answer )
1014 + ma.buf = a->buf;
1015 + ma.buflen = a->buflen;
1016 + ma.add_count = a->add_count;
1017 + memcpy(a, &ma, sizeof(ma));
1018 + if (a->atype != T_SIG && (0 == a->buf || (type != T_A && type != T_AAAA)))
1020 + break;
1022 + if (a->atype != type)
1024 + free(a->dotted);
1025 + continue;
1027 + a->add_count = h.ancount - j - 1;
1028 + if ((a->rdlength + sizeof(struct in_addr*)) * a->add_count > a->buflen)
1030 + break;
1032 + a->add_count = 0;
1033 + first_answer = 0;
1035 + else
1037 + free(ma.dotted);
1038 + if (ma.atype != type)
1040 + continue;
1042 + if (a->rdlength != ma.rdlength)
1044 + free(a->dotted);
1045 + DPRINTF("Answer address len(%u) differs from original(%u)\n",
1046 + ma.rdlength, a->rdlength);
1047 + goto again;
1049 + memcpy(a->buf + (a->add_count * ma.rdlength), ma.rdata, ma.rdlength);
1050 + ++a->add_count;
1053 - memcpy(a->buf + (a->add_count * ma.rdlength), ma.rdata, ma.rdlength);
1054 - ++a->add_count;
1058 DPRINTF("Answer name = |%s|\n", a->dotted);
1059 DPRINTF("Answer type = |%d|\n", a->atype);
1060 @@ -900,48 +886,48 @@ int __dns_lookup(const char *name, int t
1061 free(lookup);
1063 /* Mess with globals while under lock */
1064 - LOCK;
1065 + __UCLIBC_MUTEX_LOCK(mylock);
1066 ns = local_ns;
1067 id = local_id;
1068 - UNLOCK;
1069 + __UCLIBC_MUTEX_UNLOCK(mylock);
1071 return (len); /* success! */
1073 - tryall:
1074 + tryall:
1075 /* if there are other nameservers, give them a go,
1076 otherwise return with error */
1078 variant = -1;
1079 - local_ns = (local_ns + 1) % nscount;
1080 - if (local_ns == 0)
1081 - retries++;
1082 + local_ns = (local_ns + 1) % nscount;
1083 + if (local_ns == 0)
1084 + retries++;
1086 - continue;
1087 + continue;
1090 - again:
1091 + again:
1092 /* if there are searchdomains, try them or fallback as passed */
1094 int sdomains;
1095 - BIGLOCK;
1096 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1097 sdomains=__searchdomains;
1098 - BIGUNLOCK;
1099 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1101 if (variant < sdomains - 1) {
1102 - /* next search */
1103 - variant++;
1104 + /* next search */
1105 + variant++;
1106 } else {
1107 - /* next server, first search */
1108 - local_ns = (local_ns + 1) % nscount;
1109 - if (local_ns == 0)
1110 - retries++;
1111 + /* next server, first search */
1112 + local_ns = (local_ns + 1) % nscount;
1113 + if (local_ns == 0)
1114 + retries++;
1116 - variant = -1;
1117 + variant = -1;
1122 -fail:
1123 + fail:
1124 if (fd != -1)
1125 close(fd);
1126 if (lookup)
1127 @@ -951,10 +937,10 @@ fail:
1128 h_errno = NETDB_INTERNAL;
1129 /* Mess with globals while under lock */
1130 if (local_ns != -1) {
1131 - LOCK;
1132 + __UCLIBC_MUTEX_LOCK(mylock);
1133 ns = local_ns;
1134 id = local_id;
1135 - UNLOCK;
1136 + __UCLIBC_MUTEX_UNLOCK(mylock);
1138 return -1;
1140 @@ -966,9 +952,8 @@ int __nameservers;
1141 char * __nameserver[MAX_SERVERS];
1142 int __searchdomains;
1143 char * __searchdomain[MAX_SEARCH];
1144 -#ifdef __UCLIBC_HAS_THREADS__
1145 -pthread_mutex_t __resolv_lock = PTHREAD_MUTEX_INITIALIZER;
1146 -#endif
1148 +__UCLIBC_MUTEX_INIT(__resolv_lock, PTHREAD_MUTEX_INITIALIZER);
1151 * we currently read formats not quite the same as that on normal
1152 @@ -982,60 +967,63 @@ int __open_nameservers()
1153 #define RESOLV_ARGS 5
1154 char szBuffer[128], *p, *argv[RESOLV_ARGS];
1155 int argc;
1156 + int rv = 0;
1158 - BIGLOCK;
1159 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1160 if (__nameservers > 0) {
1161 - BIGUNLOCK;
1162 - return 0;
1163 + goto DONE;
1166 if ((fp = fopen("/etc/resolv.conf", "r")) ||
1167 - (fp = fopen("/etc/config/resolv.conf", "r")))
1170 - while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) {
1171 + (fp = fopen("/etc/config/resolv.conf", "r")))
1174 - for (p = szBuffer; *p && isspace(*p); p++)
1175 - /* skip white space */;
1176 - if (*p == '\0' || *p == '\n' || *p == '#') /* skip comments etc */
1177 - continue;
1178 - argc = 0;
1179 - while (*p && argc < RESOLV_ARGS) {
1180 - argv[argc++] = p;
1181 - while (*p && !isspace(*p) && *p != '\n')
1182 - p++;
1183 - while (*p && (isspace(*p) || *p == '\n')) /* remove spaces */
1184 - *p++ = '\0';
1186 + while (fgets(szBuffer, sizeof(szBuffer), fp) != NULL) {
1188 - if (strcmp(argv[0], "nameserver") == 0) {
1189 - for (i = 1; i < argc && __nameservers < MAX_SERVERS; i++) {
1190 - __nameserver[__nameservers++] = strdup(argv[i]);
1191 - DPRINTF("adding nameserver %s\n", argv[i]);
1192 + for (p = szBuffer; *p && isspace(*p); p++)
1193 + /* skip white space */;
1194 + if (*p == '\0' || *p == '\n' || *p == '#') /* skip comments etc */
1195 + continue;
1196 + argc = 0;
1197 + while (*p && argc < RESOLV_ARGS) {
1198 + argv[argc++] = p;
1199 + while (*p && !isspace(*p) && *p != '\n')
1200 + p++;
1201 + while (*p && (isspace(*p) || *p == '\n')) /* remove spaces */
1202 + *p++ = '\0';
1206 - /* domain and search are mutually exclusive, the last one wins */
1207 - if (strcmp(argv[0],"domain")==0 || strcmp(argv[0],"search")==0) {
1208 - while (__searchdomains > 0) {
1209 - free(__searchdomain[--__searchdomains]);
1210 - __searchdomain[__searchdomains] = NULL;
1211 + if (strcmp(argv[0], "nameserver") == 0) {
1212 + for (i = 1; i < argc && __nameservers < MAX_SERVERS; i++) {
1213 + __nameserver[__nameservers++] = strdup(argv[i]);
1214 + DPRINTF("adding nameserver %s\n", argv[i]);
1217 - for (i=1; i < argc && __searchdomains < MAX_SEARCH; i++) {
1218 - __searchdomain[__searchdomains++] = strdup(argv[i]);
1219 - DPRINTF("adding search %s\n", argv[i]);
1221 + /* domain and search are mutually exclusive, the last one wins */
1222 + if (strcmp(argv[0],"domain")==0 || strcmp(argv[0],"search")==0) {
1223 + while (__searchdomains > 0) {
1224 + free(__searchdomain[--__searchdomains]);
1225 + __searchdomain[__searchdomains] = NULL;
1227 + for (i=1; i < argc && __searchdomains < MAX_SEARCH; i++) {
1228 + __searchdomain[__searchdomains++] = strdup(argv[i]);
1229 + DPRINTF("adding search %s\n", argv[i]);
1233 + fclose(fp);
1234 + DPRINTF("nameservers = %d\n", __nameservers);
1235 + goto DONE;
1237 - fclose(fp);
1238 - DPRINTF("nameservers = %d\n", __nameservers);
1239 - BIGUNLOCK;
1240 - return 0;
1242 DPRINTF("failed to open %s\n", "resolv.conf");
1243 h_errno = NO_RECOVERY;
1244 - BIGUNLOCK;
1245 - return -1;
1247 + rv = -1;
1249 + DONE:
1250 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1251 + return rv;
1253 #endif
1255 @@ -1044,7 +1032,7 @@ int __open_nameservers()
1257 void __close_nameservers(void)
1259 - BIGLOCK;
1260 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1261 while (__nameservers > 0) {
1262 free(__nameserver[--__nameservers]);
1263 __nameserver[__nameservers] = NULL;
1264 @@ -1053,7 +1041,7 @@ void __close_nameservers(void)
1265 free(__searchdomain[--__searchdomains]);
1266 __searchdomain[__searchdomains] = NULL;
1268 - BIGUNLOCK;
1269 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1271 #endif
1273 @@ -1063,8 +1051,8 @@ struct hostent *gethostbyname(const char
1275 static struct hostent h;
1276 static char buf[sizeof(struct in_addr) +
1277 - sizeof(struct in_addr *)*2 +
1278 - sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1279 + sizeof(struct in_addr *)*2 +
1280 + sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1281 struct hostent *hp;
1283 gethostbyname_r(name, &h, buf, sizeof(buf), &hp, &h_errno);
1284 @@ -1082,8 +1070,8 @@ struct hostent *gethostbyname2(const cha
1285 #else /* __UCLIBC_HAS_IPV6__ */
1286 static struct hostent h;
1287 static char buf[sizeof(struct in6_addr) +
1288 - sizeof(struct in6_addr *)*2 +
1289 - sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1290 + sizeof(struct in6_addr *)*2 +
1291 + sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1292 struct hostent *hp;
1294 gethostbyname2_r(name, family, &h, buf, sizeof(buf), &hp, &h_errno);
1295 @@ -1119,7 +1107,7 @@ int res_init(void)
1296 /** rp->rhook = NULL; **/
1297 /** rp->_u._ext.nsinit = 0; **/
1299 - BIGLOCK;
1300 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1301 if(__searchdomains) {
1302 int i;
1303 for(i=0; i<__searchdomains; i++) {
1304 @@ -1139,7 +1127,7 @@ int res_init(void)
1307 rp->nscount = __nameservers;
1308 - BIGUNLOCK;
1309 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1311 return(0);
1313 @@ -1175,10 +1163,10 @@ int res_query(const char *dname, int cla
1315 memset((char *) &a, '\0', sizeof(a));
1317 - BIGLOCK;
1318 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1319 __nameserversXX=__nameservers;
1320 __nameserverXX=__nameserver;
1321 - BIGUNLOCK;
1322 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1323 i = __dns_lookup(dname, type, __nameserversXX, __nameserverXX, &packet, &a);
1325 if (i < 0) {
1326 @@ -1207,10 +1195,10 @@ int res_query(const char *dname, int cla
1327 * is detected. Error code, if any, is left in h_errno.
1329 int res_search(name, class, type, answer, anslen)
1330 - const char *name; /* domain name */
1331 - int class, type; /* class and type of query */
1332 - u_char *answer; /* buffer to put answer */
1333 - int anslen; /* size of answer */
1334 + const char *name; /* domain name */
1335 + int class, type; /* class and type of query */
1336 + u_char *answer; /* buffer to put answer */
1337 + int anslen; /* size of answer */
1339 const char *cp, * const *domain;
1340 HEADER *hp = (HEADER *)(void *)answer;
1341 @@ -1256,11 +1244,11 @@ int res_search(name, class, type, answer
1342 int done = 0;
1344 for (domain = (const char * const *)_res.dnsrch;
1345 - *domain && !done;
1346 - domain++) {
1347 + *domain && !done;
1348 + domain++) {
1350 ret = res_querydomain(name, *domain, class, type,
1351 - answer, anslen);
1352 + answer, anslen);
1353 if (ret > 0)
1354 return (ret);
1356 @@ -1283,22 +1271,22 @@ int res_search(name, class, type, answer
1359 switch (h_errno) {
1360 - case NO_DATA:
1361 - got_nodata++;
1362 - /* FALLTHROUGH */
1363 - case HOST_NOT_FOUND:
1364 - /* keep trying */
1365 - break;
1366 - case TRY_AGAIN:
1367 - if (hp->rcode == SERVFAIL) {
1368 - /* try next search element, if any */
1369 - got_servfail++;
1370 + case NO_DATA:
1371 + got_nodata++;
1372 + /* FALLTHROUGH */
1373 + case HOST_NOT_FOUND:
1374 + /* keep trying */
1375 break;
1377 - /* FALLTHROUGH */
1378 - default:
1379 - /* anything else implies that we're done */
1380 - done++;
1381 + case TRY_AGAIN:
1382 + if (hp->rcode == SERVFAIL) {
1383 + /* try next search element, if any */
1384 + got_servfail++;
1385 + break;
1387 + /* FALLTHROUGH */
1388 + default:
1389 + /* anything else implies that we're done */
1390 + done++;
1393 * if we got here for some reason other than DNSRCH,
1394 @@ -1342,10 +1330,10 @@ int res_search(name, class, type, answer
1395 * removing a trailing dot from name if domain is NULL.
1397 int res_querydomain(name, domain, class, type, answer, anslen)
1398 - const char *name, *domain;
1399 - int class, type; /* class and type of query */
1400 - u_char *answer; /* buffer to put answer */
1401 - int anslen; /* size of answer */
1402 + const char *name, *domain;
1403 + int class, type; /* class and type of query */
1404 + u_char *answer; /* buffer to put answer */
1405 + int anslen; /* size of answer */
1407 char nbuf[MAXDNAME];
1408 const char *longname = nbuf;
1409 @@ -1359,7 +1347,7 @@ int res_querydomain(name, domain, class,
1410 #ifdef DEBUG
1411 if (_res.options & RES_DEBUG)
1412 printf(";; res_querydomain(%s, %s, %d, %d)\n",
1413 - name, domain?domain:"<Nil>", class, type);
1414 + name, domain?domain:"<Nil>", class, type);
1415 #endif
1416 if (domain == NULL) {
1418 @@ -1400,11 +1388,11 @@ struct hostent *gethostbyaddr (const voi
1419 static struct hostent h;
1420 static char buf[
1421 #ifndef __UCLIBC_HAS_IPV6__
1422 - sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
1423 + sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
1424 #else
1425 - sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
1426 + sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
1427 #endif /* __UCLIBC_HAS_IPV6__ */
1428 - sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1429 + sizeof(char *)*(ALIAS_DIM) + 384/*namebuffer*/ + 32/* margin */];
1430 struct hostent *hp;
1432 gethostbyaddr_r(addr, len, type, &h, buf, sizeof(buf), &hp, &h_errno);
1433 @@ -1425,11 +1413,11 @@ void __open_etc_hosts(FILE **fp)
1436 int __read_etc_hosts_r(FILE * fp, const char * name, int type,
1437 - enum etc_hosts_action action,
1438 - struct hostent * result_buf,
1439 - char * buf, size_t buflen,
1440 - struct hostent ** result,
1441 - int * h_errnop)
1442 + enum etc_hosts_action action,
1443 + struct hostent * result_buf,
1444 + char * buf, size_t buflen,
1445 + struct hostent ** result,
1446 + int * h_errnop)
1448 struct in_addr *in=NULL;
1449 struct in_addr **addr_list=NULL;
1450 @@ -1576,56 +1564,49 @@ int __read_etc_hosts_r(FILE * fp, const
1452 #ifdef L_gethostent
1454 -#ifdef __UCLIBC_HAS_THREADS__
1455 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
1456 -# define LOCK __pthread_mutex_lock(&mylock)
1457 -# define UNLOCK __pthread_mutex_unlock(&mylock);
1458 -#else
1459 -# define LOCK
1460 -# define UNLOCK
1461 -#endif
1462 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
1464 static int __stay_open;
1465 static FILE * __gethostent_fp;
1467 void endhostent (void)
1469 - LOCK;
1470 + __UCLIBC_MUTEX_LOCK(mylock);
1471 __stay_open = 0;
1472 if (__gethostent_fp) {
1473 - fclose(__gethostent_fp);
1474 + fclose(__gethostent_fp);
1476 - UNLOCK;
1477 + __UCLIBC_MUTEX_UNLOCK(mylock);
1480 void sethostent (int stay_open)
1482 - LOCK;
1483 + __UCLIBC_MUTEX_LOCK(mylock);
1484 __stay_open = stay_open;
1485 - UNLOCK;
1486 + __UCLIBC_MUTEX_UNLOCK(mylock);
1489 int gethostent_r(struct hostent *result_buf, char *buf, size_t buflen,
1490 - struct hostent **result, int *h_errnop)
1491 + struct hostent **result, int *h_errnop)
1493 - int ret;
1494 + int ret = 0;
1496 - LOCK;
1497 + __UCLIBC_MUTEX_LOCK(mylock);
1498 if (__gethostent_fp == NULL) {
1499 - __open_etc_hosts(&__gethostent_fp);
1500 - if (__gethostent_fp == NULL) {
1501 - UNLOCK;
1502 - *result=NULL;
1503 - return 0;
1505 + __open_etc_hosts(&__gethostent_fp);
1506 + if (__gethostent_fp == NULL) {
1507 + *result=NULL;
1508 + goto DONE;
1512 ret = __read_etc_hosts_r(__gethostent_fp, NULL, AF_INET, GETHOSTENT,
1513 - result_buf, buf, buflen, result, h_errnop);
1514 + result_buf, buf, buflen, result, h_errnop);
1515 if (__stay_open==0) {
1516 - fclose(__gethostent_fp);
1517 + fclose(__gethostent_fp);
1519 - UNLOCK;
1520 + DONE:
1521 + __UCLIBC_MUTEX_UNLOCK(mylock);
1522 return(ret);
1525 @@ -1634,17 +1615,17 @@ struct hostent *gethostent (void)
1526 static struct hostent h;
1527 static char buf[
1528 #ifndef __UCLIBC_HAS_IPV6__
1529 - sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
1530 + sizeof(struct in_addr) + sizeof(struct in_addr *)*2 +
1531 #else
1532 - sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
1533 + sizeof(struct in6_addr) + sizeof(struct in6_addr *)*2 +
1534 #endif /* __UCLIBC_HAS_IPV6__ */
1535 - sizeof(char *)*(ALIAS_DIM) +
1536 - 80/*namebuffer*/ + 2/* margin */];
1537 + sizeof(char *)*(ALIAS_DIM) +
1538 + 80/*namebuffer*/ + 2/* margin */];
1539 struct hostent *host;
1541 - LOCK;
1542 + __UCLIBC_MUTEX_LOCK(mylock);
1543 gethostent_r(&h, buf, sizeof(buf), &host, &h_errno);
1544 - UNLOCK;
1545 + __UCLIBC_MUTEX_UNLOCK(mylock);
1546 return(host);
1548 #endif
1549 @@ -1652,23 +1633,23 @@ struct hostent *gethostent (void)
1550 #ifdef L_get_hosts_byname_r
1552 int __get_hosts_byname_r(const char * name, int type,
1553 - struct hostent * result_buf,
1554 - char * buf, size_t buflen,
1555 - struct hostent ** result,
1556 - int * h_errnop)
1557 + struct hostent * result_buf,
1558 + char * buf, size_t buflen,
1559 + struct hostent ** result,
1560 + int * h_errnop)
1562 return(__read_etc_hosts_r(NULL, name, type, GET_HOSTS_BYNAME,
1563 - result_buf, buf, buflen, result, h_errnop));
1564 + result_buf, buf, buflen, result, h_errnop));
1566 #endif
1568 #ifdef L_get_hosts_byaddr_r
1570 int __get_hosts_byaddr_r(const char * addr, int len, int type,
1571 - struct hostent * result_buf,
1572 - char * buf, size_t buflen,
1573 - struct hostent ** result,
1574 - int * h_errnop)
1575 + struct hostent * result_buf,
1576 + char * buf, size_t buflen,
1577 + struct hostent ** result,
1578 + int * h_errnop)
1580 #ifndef __UCLIBC_HAS_IPV6__
1581 char ipaddr[INET_ADDRSTRLEN];
1582 @@ -1677,24 +1658,24 @@ int __get_hosts_byaddr_r(const char * ad
1583 #endif /* __UCLIBC_HAS_IPV6__ */
1585 switch (type) {
1586 - case AF_INET:
1587 - if (len != sizeof(struct in_addr))
1588 - return 0;
1589 - break;
1590 + case AF_INET:
1591 + if (len != sizeof(struct in_addr))
1592 + return 0;
1593 + break;
1594 #ifdef __UCLIBC_HAS_IPV6__
1595 - case AF_INET6:
1596 - if (len != sizeof(struct in6_addr))
1597 - return 0;
1598 - break;
1599 + case AF_INET6:
1600 + if (len != sizeof(struct in6_addr))
1601 + return 0;
1602 + break;
1603 #endif /* __UCLIBC_HAS_IPV6__ */
1604 - default:
1605 - return 0;
1606 + default:
1607 + return 0;
1610 inet_ntop(type, addr, ipaddr, sizeof(ipaddr));
1612 return(__read_etc_hosts_r(NULL, ipaddr, type, GET_HOSTS_BYADDR,
1613 - result_buf, buf, buflen, result, h_errnop));
1614 + result_buf, buf, buflen, result, h_errnop));
1616 #endif
1618 @@ -1705,8 +1686,8 @@ int __get_hosts_byaddr_r(const char * ad
1619 #endif /* min */
1621 int getnameinfo (const struct sockaddr *sa, socklen_t addrlen, char *host,
1622 - socklen_t hostlen, char *serv, socklen_t servlen,
1623 - unsigned int flags)
1624 + socklen_t hostlen, char *serv, socklen_t servlen,
1625 + unsigned int flags)
1627 int serrno = errno;
1628 int ok = 0;
1629 @@ -1720,167 +1701,167 @@ int getnameinfo (const struct sockaddr *
1630 return EAI_FAMILY;
1632 switch (sa->sa_family) {
1633 - case AF_LOCAL:
1634 - break;
1635 - case AF_INET:
1636 - if (addrlen < sizeof (struct sockaddr_in))
1637 - return EAI_FAMILY;
1638 - break;
1639 + case AF_LOCAL:
1640 + break;
1641 + case AF_INET:
1642 + if (addrlen < sizeof (struct sockaddr_in))
1643 + return EAI_FAMILY;
1644 + break;
1645 #ifdef __UCLIBC_HAS_IPV6__
1646 - case AF_INET6:
1647 - if (addrlen < sizeof (struct sockaddr_in6))
1648 - return EAI_FAMILY;
1649 - break;
1650 + case AF_INET6:
1651 + if (addrlen < sizeof (struct sockaddr_in6))
1652 + return EAI_FAMILY;
1653 + break;
1654 #endif /* __UCLIBC_HAS_IPV6__ */
1655 - default:
1656 - return EAI_FAMILY;
1657 + default:
1658 + return EAI_FAMILY;
1661 if (host != NULL && hostlen > 0)
1662 switch (sa->sa_family) {
1663 - case AF_INET:
1664 + case AF_INET:
1665 #ifdef __UCLIBC_HAS_IPV6__
1666 - case AF_INET6:
1667 + case AF_INET6:
1668 #endif /* __UCLIBC_HAS_IPV6__ */
1669 - if (!(flags & NI_NUMERICHOST)) {
1670 + if (!(flags & NI_NUMERICHOST)) {
1671 #ifdef __UCLIBC_HAS_IPV6__
1672 - if (sa->sa_family == AF_INET6)
1673 - h = gethostbyaddr ((const void *)
1674 - &(((const struct sockaddr_in6 *) sa)->sin6_addr),
1675 - sizeof(struct in6_addr), AF_INET6);
1676 - else
1677 -#endif /* __UCLIBC_HAS_IPV6__ */
1678 - h = gethostbyaddr ((const void *) &(((const struct sockaddr_in *)sa)->sin_addr),
1679 - sizeof(struct in_addr), AF_INET);
1681 - if (h) {
1682 - char *c;
1683 - if ((flags & NI_NOFQDN)
1684 - && (getdomainname (domain, sizeof(domain)) == 0)
1685 - && (c = strstr (h->h_name, domain))
1686 - && (c != h->h_name) && (*(--c) == '.')) {
1687 - strncpy (host, h->h_name,
1688 - min(hostlen, (size_t) (c - h->h_name)));
1689 - host[min(hostlen - 1, (size_t) (c - h->h_name))] = '\0';
1690 - ok = 1;
1691 - } else {
1692 - strncpy (host, h->h_name, hostlen);
1693 - ok = 1;
1694 + if (sa->sa_family == AF_INET6)
1695 + h = gethostbyaddr ((const void *)
1696 + &(((const struct sockaddr_in6 *) sa)->sin6_addr),
1697 + sizeof(struct in6_addr), AF_INET6);
1698 + else
1699 +#endif /* __UCLIBC_HAS_IPV6__ */
1700 + h = gethostbyaddr ((const void *) &(((const struct sockaddr_in *)sa)->sin_addr),
1701 + sizeof(struct in_addr), AF_INET);
1703 + if (h) {
1704 + char *c;
1705 + if ((flags & NI_NOFQDN)
1706 + && (getdomainname (domain, sizeof(domain)) == 0)
1707 + && (c = strstr (h->h_name, domain))
1708 + && (c != h->h_name) && (*(--c) == '.')) {
1709 + strncpy (host, h->h_name,
1710 + min(hostlen, (size_t) (c - h->h_name)));
1711 + host[min(hostlen - 1, (size_t) (c - h->h_name))] = '\0';
1712 + ok = 1;
1713 + } else {
1714 + strncpy (host, h->h_name, hostlen);
1715 + ok = 1;
1722 - if (!ok) {
1723 - if (flags & NI_NAMEREQD) {
1724 - errno = serrno;
1725 - return EAI_NONAME;
1726 - } else {
1727 - const char *c;
1728 + if (!ok) {
1729 + if (flags & NI_NAMEREQD) {
1730 + errno = serrno;
1731 + return EAI_NONAME;
1732 + } else {
1733 + const char *c;
1734 #ifdef __UCLIBC_HAS_IPV6__
1735 - if (sa->sa_family == AF_INET6) {
1736 - const struct sockaddr_in6 *sin6p;
1737 + if (sa->sa_family == AF_INET6) {
1738 + const struct sockaddr_in6 *sin6p;
1740 - sin6p = (const struct sockaddr_in6 *) sa;
1741 + sin6p = (const struct sockaddr_in6 *) sa;
1743 - c = inet_ntop (AF_INET6,
1744 - (const void *) &sin6p->sin6_addr, host, hostlen);
1745 + c = inet_ntop (AF_INET6,
1746 + (const void *) &sin6p->sin6_addr, host, hostlen);
1747 #if 0
1748 - /* Does scope id need to be supported? */
1749 - uint32_t scopeid;
1750 - scopeid = sin6p->sin6_scope_id;
1751 - if (scopeid != 0) {
1752 - /* Buffer is >= IFNAMSIZ+1. */
1753 - char scopebuf[IFNAMSIZ + 1];
1754 - char *scopeptr;
1755 - int ni_numericscope = 0;
1756 - size_t real_hostlen = __strnlen (host, hostlen);
1757 - size_t scopelen = 0;
1759 - scopebuf[0] = SCOPE_DELIMITER;
1760 - scopebuf[1] = '\0';
1761 - scopeptr = &scopebuf[1];
1763 - if (IN6_IS_ADDR_LINKLOCAL (&sin6p->sin6_addr)
1764 - || IN6_IS_ADDR_MC_LINKLOCAL (&sin6p->sin6_addr)) {
1765 - if (if_indextoname (scopeid, scopeptr) == NULL)
1766 + /* Does scope id need to be supported? */
1767 + uint32_t scopeid;
1768 + scopeid = sin6p->sin6_scope_id;
1769 + if (scopeid != 0) {
1770 + /* Buffer is >= IFNAMSIZ+1. */
1771 + char scopebuf[IFNAMSIZ + 1];
1772 + char *scopeptr;
1773 + int ni_numericscope = 0;
1774 + size_t real_hostlen = __strnlen (host, hostlen);
1775 + size_t scopelen = 0;
1777 + scopebuf[0] = SCOPE_DELIMITER;
1778 + scopebuf[1] = '\0';
1779 + scopeptr = &scopebuf[1];
1781 + if (IN6_IS_ADDR_LINKLOCAL (&sin6p->sin6_addr)
1782 + || IN6_IS_ADDR_MC_LINKLOCAL (&sin6p->sin6_addr)) {
1783 + if (if_indextoname (scopeid, scopeptr) == NULL)
1784 + ++ni_numericscope;
1785 + else
1786 + scopelen = strlen (scopebuf);
1787 + } else {
1788 ++ni_numericscope;
1789 - else
1790 - scopelen = strlen (scopebuf);
1791 - } else {
1792 - ++ni_numericscope;
1796 - if (ni_numericscope)
1797 - scopelen = 1 + snprintf (scopeptr,
1798 - (scopebuf
1799 - + sizeof scopebuf
1800 - - scopeptr),
1801 - "%u", scopeid);
1803 - if (real_hostlen + scopelen + 1 > hostlen)
1804 - return EAI_SYSTEM;
1805 - memcpy (host + real_hostlen, scopebuf, scopelen + 1);
1807 + if (ni_numericscope)
1808 + scopelen = 1 + snprintf (scopeptr,
1809 + (scopebuf
1810 + + sizeof scopebuf
1811 + - scopeptr),
1812 + "%u", scopeid);
1814 + if (real_hostlen + scopelen + 1 > hostlen)
1815 + return EAI_SYSTEM;
1816 + memcpy (host + real_hostlen, scopebuf, scopelen + 1);
1818 #endif
1819 - } else
1820 + } else
1821 #endif /* __UCLIBC_HAS_IPV6__ */
1822 - c = inet_ntop (AF_INET, (const void *)
1823 - &(((const struct sockaddr_in *) sa)->sin_addr),
1824 - host, hostlen);
1826 - if (c == NULL) {
1827 - errno = serrno;
1828 - return EAI_SYSTEM;
1829 + c = inet_ntop (AF_INET, (const void *)
1830 + &(((const struct sockaddr_in *) sa)->sin_addr),
1831 + host, hostlen);
1833 + if (c == NULL) {
1834 + errno = serrno;
1835 + return EAI_SYSTEM;
1838 + ok = 1;
1840 - ok = 1;
1842 - break;
1844 - case AF_LOCAL:
1845 - if (!(flags & NI_NUMERICHOST)) {
1846 - struct utsname utsname;
1847 + break;
1849 - if (!uname (&utsname)) {
1850 - strncpy (host, utsname.nodename, hostlen);
1851 - break;
1852 + case AF_LOCAL:
1853 + if (!(flags & NI_NUMERICHOST)) {
1854 + struct utsname utsname;
1856 + if (!uname (&utsname)) {
1857 + strncpy (host, utsname.nodename, hostlen);
1858 + break;
1859 + };
1861 - };
1863 - if (flags & NI_NAMEREQD) {
1864 - errno = serrno;
1865 - return EAI_NONAME;
1867 + if (flags & NI_NAMEREQD) {
1868 + errno = serrno;
1869 + return EAI_NONAME;
1872 - strncpy (host, "localhost", hostlen);
1873 - break;
1874 + strncpy (host, "localhost", hostlen);
1875 + break;
1877 - default:
1878 - return EAI_FAMILY;
1880 + default:
1881 + return EAI_FAMILY;
1884 if (serv && (servlen > 0)) {
1885 switch (sa->sa_family) {
1886 - case AF_INET:
1887 + case AF_INET:
1888 #ifdef __UCLIBC_HAS_IPV6__
1889 - case AF_INET6:
1890 + case AF_INET6:
1891 #endif /* __UCLIBC_HAS_IPV6__ */
1892 - if (!(flags & NI_NUMERICSERV)) {
1893 - struct servent *s;
1894 - s = getservbyport (((const struct sockaddr_in *) sa)->sin_port,
1895 - ((flags & NI_DGRAM) ? "udp" : "tcp"));
1896 - if (s) {
1897 - strncpy (serv, s->s_name, servlen);
1898 - break;
1899 + if (!(flags & NI_NUMERICSERV)) {
1900 + struct servent *s;
1901 + s = getservbyport (((const struct sockaddr_in *) sa)->sin_port,
1902 + ((flags & NI_DGRAM) ? "udp" : "tcp"));
1903 + if (s) {
1904 + strncpy (serv, s->s_name, servlen);
1905 + break;
1909 - snprintf (serv, servlen, "%d",
1910 - ntohs (((const struct sockaddr_in *) sa)->sin_port));
1911 - break;
1912 + snprintf (serv, servlen, "%d",
1913 + ntohs (((const struct sockaddr_in *) sa)->sin_port));
1914 + break;
1916 - case AF_LOCAL:
1917 - strncpy (serv, ((const struct sockaddr_un *) sa)->sun_path, servlen);
1918 - break;
1919 + case AF_LOCAL:
1920 + strncpy (serv, ((const struct sockaddr_un *) sa)->sun_path, servlen);
1921 + break;
1924 if (host && (hostlen > 0))
1925 @@ -1896,10 +1877,10 @@ int getnameinfo (const struct sockaddr *
1926 #ifdef L_gethostbyname_r
1928 int gethostbyname_r(const char * name,
1929 - struct hostent * result_buf,
1930 - char * buf, size_t buflen,
1931 - struct hostent ** result,
1932 - int * h_errnop)
1933 + struct hostent * result_buf,
1934 + char * buf, size_t buflen,
1935 + struct hostent ** result,
1936 + int * h_errnop)
1938 struct in_addr *in;
1939 struct in_addr **addr_list;
1940 @@ -1921,7 +1902,7 @@ int gethostbyname_r(const char * name,
1941 __set_errno(0); /* to check for missing /etc/hosts. */
1943 if ((i=__get_hosts_byname_r(name, AF_INET, result_buf,
1944 - buf, buflen, result, h_errnop))==0)
1945 + buf, buflen, result, h_errnop))==0)
1946 return i;
1947 switch (*h_errnop) {
1948 case HOST_NOT_FOUND:
1949 @@ -1983,60 +1964,60 @@ int gethostbyname_r(const char * name,
1951 for (;;) {
1953 - BIGLOCK;
1954 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
1955 __nameserversXX=__nameservers;
1956 __nameserverXX=__nameserver;
1957 - BIGUNLOCK;
1958 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
1959 a.buf = buf;
1960 a.buflen = buflen;
1961 a.add_count = 0;
1962 i = __dns_lookup(name, T_A, __nameserversXX, __nameserverXX, &packet, &a);
1964 if (i < 0) {
1965 - *h_errnop = HOST_NOT_FOUND;
1966 - DPRINTF("__dns_lookup\n");
1967 - return TRY_AGAIN;
1968 + *h_errnop = HOST_NOT_FOUND;
1969 + DPRINTF("__dns_lookup\n");
1970 + return TRY_AGAIN;
1973 if ((a.rdlength + sizeof(struct in_addr*)) * a.add_count + 256 > buflen)
1975 - free(a.dotted);
1976 - free(packet);
1977 - *h_errnop = NETDB_INTERNAL;
1978 - DPRINTF("buffer too small for all addresses\n");
1979 - return ERANGE;
1982 + free(a.dotted);
1983 + free(packet);
1984 + *h_errnop = NETDB_INTERNAL;
1985 + DPRINTF("buffer too small for all addresses\n");
1986 + return ERANGE;
1988 else if(a.add_count > 0)
1990 - memmove(buf - sizeof(struct in_addr*)*2, buf, a.add_count * a.rdlength);
1991 - addr_list = (struct in_addr**)(buf + a.add_count * a.rdlength);
1992 - addr_list[0] = in;
1993 - for (i = a.add_count-1; i>=0; --i)
1994 - addr_list[i+1] = (struct in_addr*)(buf - sizeof(struct in_addr*)*2 + a.rdlength * i);
1995 - addr_list[a.add_count + 1] = 0;
1996 - buflen -= (((char*)&(addr_list[a.add_count + 2])) - buf);
1997 - buf = (char*)&addr_list[a.add_count + 2];
2000 + memmove(buf - sizeof(struct in_addr*)*2, buf, a.add_count * a.rdlength);
2001 + addr_list = (struct in_addr**)(buf + a.add_count * a.rdlength);
2002 + addr_list[0] = in;
2003 + for (i = a.add_count-1; i>=0; --i)
2004 + addr_list[i+1] = (struct in_addr*)(buf - sizeof(struct in_addr*)*2 + a.rdlength * i);
2005 + addr_list[a.add_count + 1] = 0;
2006 + buflen -= (((char*)&(addr_list[a.add_count + 2])) - buf);
2007 + buf = (char*)&addr_list[a.add_count + 2];
2010 strncpy(buf, a.dotted, buflen);
2011 free(a.dotted);
2013 if (a.atype == T_A) { /* ADDRESS */
2014 - memcpy(in, a.rdata, sizeof(*in));
2015 - result_buf->h_name = buf;
2016 - result_buf->h_addrtype = AF_INET;
2017 - result_buf->h_length = sizeof(*in);
2018 - result_buf->h_addr_list = (char **) addr_list;
2019 + memcpy(in, a.rdata, sizeof(*in));
2020 + result_buf->h_name = buf;
2021 + result_buf->h_addrtype = AF_INET;
2022 + result_buf->h_length = sizeof(*in);
2023 + result_buf->h_addr_list = (char **) addr_list;
2024 #ifdef __UCLIBC_MJN3_ONLY__
2025 #warning TODO -- generate the full list
2026 #endif
2027 - result_buf->h_aliases = alias; /* TODO: generate the full list */
2028 - free(packet);
2029 - break;
2030 + result_buf->h_aliases = alias; /* TODO: generate the full list */
2031 + free(packet);
2032 + break;
2033 } else {
2034 - free(packet);
2035 - *h_errnop=HOST_NOT_FOUND;
2036 - return TRY_AGAIN;
2037 + free(packet);
2038 + *h_errnop=HOST_NOT_FOUND;
2039 + return TRY_AGAIN;
2043 @@ -2049,14 +2030,14 @@ int gethostbyname_r(const char * name,
2044 #ifdef L_gethostbyname2_r
2046 int gethostbyname2_r(const char *name, int family,
2047 - struct hostent * result_buf,
2048 - char * buf, size_t buflen,
2049 - struct hostent ** result,
2050 - int * h_errnop)
2051 + struct hostent * result_buf,
2052 + char * buf, size_t buflen,
2053 + struct hostent ** result,
2054 + int * h_errnop)
2056 #ifndef __UCLIBC_HAS_IPV6__
2057 return family == (AF_INET)? gethostbyname_r(name, result_buf,
2058 - buf, buflen, result, h_errnop) : HOST_NOT_FOUND;
2059 + buf, buflen, result, h_errnop) : HOST_NOT_FOUND;
2060 #else /* __UCLIBC_HAS_IPV6__ */
2061 struct in6_addr *in;
2062 struct in6_addr **addr_list;
2063 @@ -2084,7 +2065,7 @@ int gethostbyname2_r(const char *name, i
2064 __set_errno(0); /* to check for missing /etc/hosts. */
2066 if ((i=__get_hosts_byname_r(name, AF_INET, result_buf,
2067 - buf, buflen, result, h_errnop))==0)
2068 + buf, buflen, result, h_errnop))==0)
2069 return i;
2070 switch (*h_errnop) {
2071 case HOST_NOT_FOUND:
2072 @@ -2137,10 +2118,10 @@ int gethostbyname2_r(const char *name, i
2073 memset((char *) &a, '\0', sizeof(a));
2075 for (;;) {
2076 - BIGLOCK;
2077 - __nameserversXX=__nameservers;
2078 - __nameserverXX=__nameserver;
2079 - BIGUNLOCK;
2080 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
2081 + __nameserversXX=__nameservers;
2082 + __nameserverXX=__nameserver;
2083 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
2085 i = __dns_lookup(buf, T_AAAA, __nameserversXX, __nameserverXX, &packet, &a);
2087 @@ -2190,10 +2171,10 @@ int gethostbyname2_r(const char *name, i
2089 #ifdef L_gethostbyaddr_r
2090 int gethostbyaddr_r (const void *addr, socklen_t len, int type,
2091 - struct hostent * result_buf,
2092 - char * buf, size_t buflen,
2093 - struct hostent ** result,
2094 - int * h_errnop)
2095 + struct hostent * result_buf,
2096 + char * buf, size_t buflen,
2097 + struct hostent ** result,
2098 + int * h_errnop)
2101 struct in_addr *in;
2102 @@ -2234,7 +2215,7 @@ int gethostbyaddr_r (const void *addr, s
2104 /* do /etc/hosts first */
2105 if ((i=__get_hosts_byaddr_r(addr, len, type, result_buf,
2106 - buf, buflen, result, h_errnop))==0)
2107 + buf, buflen, result, h_errnop))==0)
2108 return i;
2109 switch (*h_errnop) {
2110 case HOST_NOT_FOUND:
2111 @@ -2294,7 +2275,7 @@ int gethostbyaddr_r (const void *addr, s
2112 addr_list[0] = in;
2114 sprintf(buf, "%u.%u.%u.%u.in-addr.arpa",
2115 - tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]);
2116 + tmp_addr[3], tmp_addr[2], tmp_addr[1], tmp_addr[0]);
2117 #ifdef __UCLIBC_HAS_IPV6__
2118 } else {
2119 memcpy(in6->s6_addr, addr, len);
2120 @@ -2304,7 +2285,7 @@ int gethostbyaddr_r (const void *addr, s
2122 for (i = len - 1; i >= 0; i--) {
2123 qp += sprintf(qp, "%x.%x.", in6->s6_addr[i] & 0xf,
2124 - (in6->s6_addr[i] >> 4) & 0xf);
2125 + (in6->s6_addr[i] >> 4) & 0xf);
2127 strcpy(qp, "ip6.int");
2128 #endif /* __UCLIBC_HAS_IPV6__ */
2129 @@ -2314,10 +2295,10 @@ int gethostbyaddr_r (const void *addr, s
2131 for (;;) {
2133 - BIGLOCK;
2134 - __nameserversXX=__nameservers;
2135 - __nameserverXX=__nameserver;
2136 - BIGUNLOCK;
2137 + __UCLIBC_MUTEX_LOCK(__resolv_lock);
2138 + __nameserversXX=__nameservers;
2139 + __nameserverXX=__nameserver;
2140 + __UCLIBC_MUTEX_UNLOCK(__resolv_lock);
2141 i = __dns_lookup(buf, T_PTR, __nameserversXX, __nameserverXX, &packet, &a);
2143 if (i < 0) {
2144 @@ -2381,7 +2362,7 @@ int gethostbyaddr_r (const void *addr, s
2145 * Return size of compressed name or -1 if there was an error.
2147 int __dn_expand(const u_char *msg, const u_char *eom, const u_char *src,
2148 - char *dst, int dstsiz)
2149 + char *dst, int dstsiz)
2151 int n = ns_name_uncompress(msg, eom, src, dst, (size_t)dstsiz);
2153 @@ -2401,7 +2382,7 @@ int __dn_expand(const u_char *msg, const
2155 static int printable(int ch)
2157 - return (ch > 0x20 && ch < 0x7f);
2158 + return (ch > 0x20 && ch < 0x7f);
2162 @@ -2413,18 +2394,18 @@ static int printable(int ch)
2164 static int special(int ch)
2166 - switch (ch) {
2167 + switch (ch) {
2168 case 0x22: /* '"' */
2169 case 0x2E: /* '.' */
2170 case 0x3B: /* ';' */
2171 case 0x5C: /* '\\' */
2172 - /* Special modifiers in zone files. */
2173 + /* Special modifiers in zone files. */
2174 case 0x40: /* '@' */
2175 case 0x24: /* '$' */
2176 - return (1);
2177 + return (1);
2178 default:
2179 - return (0);
2181 + return (0);
2186 @@ -2436,7 +2417,7 @@ static int special(int ch)
2187 * Root domain returns as "." not "".
2189 int __ns_name_uncompress(const u_char *msg, const u_char *eom,
2190 - const u_char *src, char *dst, size_t dstsiz)
2191 + const u_char *src, char *dst, size_t dstsiz)
2193 u_char tmp[NS_MAXCDNAME];
2194 int n;
2195 @@ -2525,7 +2506,7 @@ int __ns_name_ntop(const u_char *src, ch
2196 return (-1);
2198 *dn++ = '\0';
2199 - return (dn - dst);
2200 + return (dn - dst);
2204 @@ -2535,7 +2516,7 @@ int __ns_name_ntop(const u_char *src, ch
2205 * -1 if it fails, or consumed octets if it succeeds.
2207 int __ns_name_unpack(const u_char *msg, const u_char *eom, const u_char *src,
2208 - u_char *dst, size_t dstsiz)
2209 + u_char *dst, size_t dstsiz)
2211 const u_char *srcp, *dstlim;
2212 u_char *dstp;
2213 @@ -2554,46 +2535,46 @@ int __ns_name_unpack(const u_char *msg,
2214 while ((n = *srcp++) != 0) {
2215 /* Check for indirection. */
2216 switch (n & NS_CMPRSFLGS) {
2217 - case 0:
2218 - /* Limit checks. */
2219 - if (dstp + n + 1 >= dstlim || srcp + n >= eom) {
2220 - __set_errno (EMSGSIZE);
2221 - return (-1);
2223 - checked += n + 1;
2224 - *dstp++ = n;
2225 - memcpy(dstp, srcp, n);
2226 - dstp += n;
2227 - srcp += n;
2228 - break;
2229 + case 0:
2230 + /* Limit checks. */
2231 + if (dstp + n + 1 >= dstlim || srcp + n >= eom) {
2232 + __set_errno (EMSGSIZE);
2233 + return (-1);
2235 + checked += n + 1;
2236 + *dstp++ = n;
2237 + memcpy(dstp, srcp, n);
2238 + dstp += n;
2239 + srcp += n;
2240 + break;
2242 - case NS_CMPRSFLGS:
2243 - if (srcp >= eom) {
2244 - __set_errno (EMSGSIZE);
2245 - return (-1);
2247 - if (len < 0)
2248 - len = srcp - src + 1;
2249 - srcp = msg + (((n & 0x3f) << 8) | (*srcp & 0xff));
2250 - if (srcp < msg || srcp >= eom) { /* Out of range. */
2251 - __set_errno (EMSGSIZE);
2252 - return (-1);
2254 - checked += 2;
2255 - /*
2256 - * Check for loops in the compressed name;
2257 - * if we've looked at the whole message,
2258 - * there must be a loop.
2259 - */
2260 - if (checked >= eom - msg) {
2261 - __set_errno (EMSGSIZE);
2262 - return (-1);
2264 - break;
2265 + case NS_CMPRSFLGS:
2266 + if (srcp >= eom) {
2267 + __set_errno (EMSGSIZE);
2268 + return (-1);
2270 + if (len < 0)
2271 + len = srcp - src + 1;
2272 + srcp = msg + (((n & 0x3f) << 8) | (*srcp & 0xff));
2273 + if (srcp < msg || srcp >= eom) { /* Out of range. */
2274 + __set_errno (EMSGSIZE);
2275 + return (-1);
2277 + checked += 2;
2278 + /*
2279 + * Check for loops in the compressed name;
2280 + * if we've looked at the whole message,
2281 + * there must be a loop.
2282 + */
2283 + if (checked >= eom - msg) {
2284 + __set_errno (EMSGSIZE);
2285 + return (-1);
2287 + break;
2289 - default:
2290 - __set_errno (EMSGSIZE);
2291 - return (-1); /* flag error */
2292 + default:
2293 + __set_errno (EMSGSIZE);
2294 + return (-1); /* flag error */
2297 *dstp = '\0';
2298 diff --git a/libc/inet/rpc/create_xid.c b/libc/inet/rpc/create_xid.c
2299 index cbb961e..c86cbb4 100644
2300 --- a/libc/inet/rpc/create_xid.c
2301 +++ b/libc/inet/rpc/create_xid.c
2302 @@ -27,15 +27,7 @@
2304 /* The RPC code is not threadsafe, but new code should be threadsafe. */
2306 -#ifdef __UCLIBC_HAS_THREADS__
2307 -#include <pthread.h>
2308 -static pthread_mutex_t createxid_lock = PTHREAD_MUTEX_INITIALIZER;
2309 -# define LOCK __pthread_mutex_lock(&createxid_lock)
2310 -# define UNLOCK __pthread_mutex_unlock(&createxid_lock);
2311 -#else
2312 -# define LOCK
2313 -# define UNLOCK
2314 -#endif
2315 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
2317 static int is_initialized;
2318 static struct drand48_data __rpc_lrand48_data;
2319 @@ -43,22 +35,22 @@ static struct drand48_data __rpc_lrand48
2320 unsigned long
2321 _create_xid (void)
2323 - unsigned long res;
2324 + unsigned long res;
2326 - LOCK;
2327 + __UCLIBC_MUTEX_LOCK(mylock);
2329 - if (!is_initialized)
2331 - struct timeval now;
2332 + if (!is_initialized)
2334 + struct timeval now;
2336 - gettimeofday (&now, (struct timezone *) 0);
2337 - srand48_r (now.tv_sec ^ now.tv_usec, &__rpc_lrand48_data);
2338 - is_initialized = 1;
2340 + gettimeofday (&now, (struct timezone *) 0);
2341 + srand48_r (now.tv_sec ^ now.tv_usec, &__rpc_lrand48_data);
2342 + is_initialized = 1;
2345 - lrand48_r (&__rpc_lrand48_data, &res);
2346 + lrand48_r (&__rpc_lrand48_data, &res);
2348 - UNLOCK;
2349 + __UCLIBC_MUTEX_UNLOCK(mylock);
2351 - return res;
2352 + return res;
2354 diff --git a/libc/misc/dirent/closedir.c b/libc/misc/dirent/closedir.c
2355 index 068e2d3..56adb23 100644
2356 --- a/libc/misc/dirent/closedir.c
2357 +++ b/libc/misc/dirent/closedir.c
2358 @@ -4,7 +4,6 @@
2359 #include <unistd.h>
2360 #include "dirstream.h"
2363 int closedir(DIR * dir)
2365 int fd;
2366 @@ -19,14 +18,10 @@ int closedir(DIR * dir)
2367 __set_errno(EBADF);
2368 return -1;
2370 -#ifdef __UCLIBC_HAS_THREADS__
2371 - __pthread_mutex_lock(&(dir->dd_lock));
2372 -#endif
2373 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2374 fd = dir->dd_fd;
2375 dir->dd_fd = -1;
2376 -#ifdef __UCLIBC_HAS_THREADS__
2377 - __pthread_mutex_unlock(&(dir->dd_lock));
2378 -#endif
2379 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2380 free(dir->dd_buf);
2381 free(dir);
2382 return close(fd);
2383 diff --git a/libc/misc/dirent/dirstream.h b/libc/misc/dirent/dirstream.h
2384 index 2dd0264..bd721c5 100644
2385 --- a/libc/misc/dirent/dirstream.h
2386 +++ b/libc/misc/dirent/dirstream.h
2387 @@ -26,9 +26,8 @@ Cambridge, MA 02139, USA. */
2389 #include <features.h>
2390 #include <sys/types.h>
2391 -#ifdef __UCLIBC_HAS_THREADS__
2392 -#include <pthread.h>
2393 -#endif
2395 +#include <bits/uClibc_mutex.h>
2397 /* For now, syscall readdir () only supports one entry at a time. It
2398 * will be changed in the future.
2399 @@ -63,11 +62,7 @@ struct __dirstream {
2400 size_t dd_max;
2402 /* lock */
2403 -#ifdef __UCLIBC_HAS_THREADS__
2404 - pthread_mutex_t dd_lock;
2405 -#else
2406 - void *dd_lock;
2407 -#endif
2408 + __UCLIBC_MUTEX(dd_lock);
2409 }; /* stream data from opendir() */
2412 diff --git a/libc/misc/dirent/readdir.c b/libc/misc/dirent/readdir.c
2413 index 1f196e1..c55317a 100644
2414 --- a/libc/misc/dirent/readdir.c
2415 +++ b/libc/misc/dirent/readdir.c
2416 @@ -5,7 +5,6 @@
2417 #include <dirent.h>
2418 #include "dirstream.h"
2421 struct dirent *readdir(DIR * dir)
2423 ssize_t bytes;
2424 @@ -16,9 +15,7 @@ struct dirent *readdir(DIR * dir)
2425 return NULL;
2428 -#ifdef __UCLIBC_HAS_THREADS__
2429 - __pthread_mutex_lock(&(dir->dd_lock));
2430 -#endif
2431 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2433 do {
2434 if (dir->dd_size <= dir->dd_nextloc) {
2435 @@ -44,8 +41,6 @@ struct dirent *readdir(DIR * dir)
2436 } while (de->d_ino == 0);
2438 all_done:
2439 -#ifdef __UCLIBC_HAS_THREADS__
2440 - __pthread_mutex_unlock(&(dir->dd_lock));
2441 -#endif
2442 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2443 return de;
2445 diff --git a/libc/misc/dirent/readdir64.c b/libc/misc/dirent/readdir64.c
2446 index f798c6f..6da3b0d 100644
2447 --- a/libc/misc/dirent/readdir64.c
2448 +++ b/libc/misc/dirent/readdir64.c
2449 @@ -20,7 +20,6 @@
2450 #include <dirent.h>
2451 #include "dirstream.h"
2454 struct dirent64 *readdir64(DIR * dir)
2456 ssize_t bytes;
2457 @@ -31,9 +30,7 @@ struct dirent64 *readdir64(DIR * dir)
2458 return NULL;
2461 -#ifdef __UCLIBC_HAS_THREADS__
2462 - __pthread_mutex_lock(&(dir->dd_lock));
2463 -#endif
2464 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2466 do {
2467 if (dir->dd_size <= dir->dd_nextloc) {
2468 @@ -59,9 +56,7 @@ struct dirent64 *readdir64(DIR * dir)
2469 } while (de->d_ino == 0);
2471 all_done:
2472 -#ifdef __UCLIBC_HAS_THREADS__
2473 - __pthread_mutex_unlock(&(dir->dd_lock));
2474 -#endif
2475 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2477 return de;
2479 diff --git a/libc/misc/dirent/readdir64_r.c b/libc/misc/dirent/readdir64_r.c
2480 index da3564e..cc96eff 100644
2481 --- a/libc/misc/dirent/readdir64_r.c
2482 +++ b/libc/misc/dirent/readdir64_r.c
2483 @@ -19,7 +19,6 @@
2484 #include <dirent.h>
2485 #include "dirstream.h"
2488 int readdir64_r(DIR *dir, struct dirent64 *entry, struct dirent64 **result)
2490 int ret;
2491 @@ -32,21 +31,19 @@ int readdir64_r(DIR *dir, struct dirent6
2493 de = NULL;
2495 -#ifdef __UCLIBC_HAS_THREADS__
2496 - __pthread_mutex_lock(&(dir->dd_lock));
2497 -#endif
2498 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2500 do {
2501 if (dir->dd_size <= dir->dd_nextloc) {
2502 - /* read dir->dd_max bytes of directory entries. */
2503 - bytes = __getdents64(dir->dd_fd, dir->dd_buf, dir->dd_max);
2504 - if (bytes <= 0) {
2505 - *result = NULL;
2506 - ret = errno;
2507 - goto all_done;
2509 - dir->dd_size = bytes;
2510 - dir->dd_nextloc = 0;
2511 + /* read dir->dd_max bytes of directory entries. */
2512 + bytes = __getdents64(dir->dd_fd, dir->dd_buf, dir->dd_max);
2513 + if (bytes <= 0) {
2514 + *result = NULL;
2515 + ret = errno;
2516 + goto all_done;
2518 + dir->dd_size = bytes;
2519 + dir->dd_nextloc = 0;
2522 de = (struct dirent64 *) (((char *) dir->dd_buf) + dir->dd_nextloc);
2523 @@ -66,12 +63,10 @@ int readdir64_r(DIR *dir, struct dirent6
2525 ret = 0;
2527 -all_done:
2528 + all_done:
2530 -#ifdef __UCLIBC_HAS_THREADS__
2531 - __pthread_mutex_unlock(&(dir->dd_lock));
2532 -#endif
2533 - return((de != NULL)? 0 : ret);
2534 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2535 + return((de != NULL)? 0 : ret);
2537 #endif /* __UCLIBC_HAS_LFS__ */
2539 diff --git a/libc/misc/dirent/readdir_r.c b/libc/misc/dirent/readdir_r.c
2540 index 245dcbd..aeccdd8 100644
2541 --- a/libc/misc/dirent/readdir_r.c
2542 +++ b/libc/misc/dirent/readdir_r.c
2543 @@ -5,7 +5,6 @@
2544 #include <dirent.h>
2545 #include "dirstream.h"
2548 int readdir_r(DIR *dir, struct dirent *entry, struct dirent **result)
2550 int ret;
2551 @@ -18,21 +17,19 @@ int readdir_r(DIR *dir, struct dirent *e
2553 de = NULL;
2555 -#ifdef __UCLIBC_HAS_THREADS__
2556 - __pthread_mutex_lock(&(dir->dd_lock));
2557 -#endif
2558 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2560 do {
2561 if (dir->dd_size <= dir->dd_nextloc) {
2562 - /* read dir->dd_max bytes of directory entries. */
2563 - bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
2564 - if (bytes <= 0) {
2565 - *result = NULL;
2566 - ret = errno;
2567 - goto all_done;
2569 - dir->dd_size = bytes;
2570 - dir->dd_nextloc = 0;
2571 + /* read dir->dd_max bytes of directory entries. */
2572 + bytes = __getdents(dir->dd_fd, dir->dd_buf, dir->dd_max);
2573 + if (bytes <= 0) {
2574 + *result = NULL;
2575 + ret = errno;
2576 + goto all_done;
2578 + dir->dd_size = bytes;
2579 + dir->dd_nextloc = 0;
2582 de = (struct dirent *) (((char *) dir->dd_buf) + dir->dd_nextloc);
2583 @@ -52,10 +49,8 @@ int readdir_r(DIR *dir, struct dirent *e
2585 ret = 0;
2587 -all_done:
2588 + all_done:
2590 -#ifdef __UCLIBC_HAS_THREADS__
2591 - __pthread_mutex_unlock(&(dir->dd_lock));
2592 -#endif
2593 - return((de != NULL)? 0 : ret);
2594 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2595 + return((de != NULL)? 0 : ret);
2597 diff --git a/libc/misc/dirent/rewinddir.c b/libc/misc/dirent/rewinddir.c
2598 index 60ef71d..fe8fc2a 100644
2599 --- a/libc/misc/dirent/rewinddir.c
2600 +++ b/libc/misc/dirent/rewinddir.c
2601 @@ -3,7 +3,6 @@
2602 #include <unistd.h>
2603 #include "dirstream.h"
2606 /* rewinddir() just does an lseek(fd,0,0) - see close for comments */
2607 void rewinddir(DIR * dir)
2609 @@ -11,12 +10,8 @@ void rewinddir(DIR * dir)
2610 __set_errno(EBADF);
2611 return;
2613 -#ifdef __UCLIBC_HAS_THREADS__
2614 - __pthread_mutex_lock(&(dir->dd_lock));
2615 -#endif
2616 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2617 lseek(dir->dd_fd, 0, SEEK_SET);
2618 dir->dd_nextoff = dir->dd_nextloc = dir->dd_size = 0;
2619 -#ifdef __UCLIBC_HAS_THREADS__
2620 - __pthread_mutex_unlock(&(dir->dd_lock));
2621 -#endif
2622 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2624 diff --git a/libc/misc/dirent/seekdir.c b/libc/misc/dirent/seekdir.c
2625 index 139f1e1..6d6f5f0 100644
2626 --- a/libc/misc/dirent/seekdir.c
2627 +++ b/libc/misc/dirent/seekdir.c
2628 @@ -3,19 +3,14 @@
2629 #include <unistd.h>
2630 #include "dirstream.h"
2633 void seekdir(DIR * dir, long int offset)
2635 if (!dir) {
2636 __set_errno(EBADF);
2637 return;
2639 -#ifdef __UCLIBC_HAS_THREADS__
2640 - __pthread_mutex_lock(&(dir->dd_lock));
2641 -#endif
2642 + __UCLIBC_MUTEX_LOCK(dir->dd_lock);
2643 dir->dd_nextoff = lseek(dir->dd_fd, offset, SEEK_SET);
2644 dir->dd_size = dir->dd_nextloc = 0;
2645 -#ifdef __UCLIBC_HAS_THREADS__
2646 - __pthread_mutex_unlock(&(dir->dd_lock));
2647 -#endif
2648 + __UCLIBC_MUTEX_UNLOCK(dir->dd_lock);
2650 diff --git a/libc/misc/mntent/mntent.c b/libc/misc/mntent/mntent.c
2651 index d98a687..af6d848 100644
2652 --- a/libc/misc/mntent/mntent.c
2653 +++ b/libc/misc/mntent/mntent.c
2654 @@ -3,15 +3,9 @@
2655 #include <string.h>
2656 #include <mntent.h>
2658 -#ifdef __UCLIBC_HAS_THREADS__
2659 -#include <pthread.h>
2660 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
2661 -# define LOCK __pthread_mutex_lock(&mylock)
2662 -# define UNLOCK __pthread_mutex_unlock(&mylock);
2663 -#else
2664 -# define LOCK
2665 -# define UNLOCK
2666 -#endif
2667 +#include <bits/uClibc_mutex.h>
2669 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
2671 /* Reentrant version of getmntent. */
2672 struct mntent *getmntent_r (FILE *filep,
2673 @@ -67,7 +61,7 @@ struct mntent *getmntent(FILE * filep)
2674 struct mntent *tmp;
2675 static char *buff = NULL;
2676 static struct mntent mnt;
2677 - LOCK;
2678 + __UCLIBC_MUTEX_LOCK(mylock);
2680 if (!buff) {
2681 buff = malloc(BUFSIZ);
2682 @@ -76,7 +70,7 @@ struct mntent *getmntent(FILE * filep)
2685 tmp = getmntent_r(filep, &mnt, buff, BUFSIZ);
2686 - UNLOCK;
2687 + __UCLIBC_MUTEX_UNLOCK(mylock);
2688 return(tmp);
2691 diff --git a/libc/misc/pthread/weaks.c b/libc/misc/pthread/weaks.c
2692 index 89c2611..c27bd10 100644
2693 --- a/libc/misc/pthread/weaks.c
2694 +++ b/libc/misc/pthread/weaks.c
2695 @@ -21,6 +21,7 @@
2696 #include <limits.h>
2697 #include <stdlib.h>
2699 +static void __pthread_return_void __P ((void));
2700 static int __pthread_return_0 __P ((void));
2701 static int __pthread_return_1 __P ((void));
2703 @@ -104,8 +105,17 @@ weak_alias (__pthread_return_0, __pthrea
2704 weak_alias (__pthread_return_0, __pthread_mutex_trylock)
2705 weak_alias (__pthread_return_0, __pthread_mutex_unlock)
2707 +weak_alias (__pthread_return_void, _pthread_cleanup_push_defer)
2708 +weak_alias (__pthread_return_void, _pthread_cleanup_pop_restore)
2710 /**********************************************************************/
2712 +static void
2713 +__pthread_return_void (void)
2715 + return;
2718 static int
2719 __pthread_return_0 (void)
2721 diff --git a/libc/misc/syslog/syslog.c b/libc/misc/syslog/syslog.c
2722 index 2b478e1..9e9ddbf 100644
2723 --- a/libc/misc/syslog/syslog.c
2724 +++ b/libc/misc/syslog/syslog.c
2725 @@ -80,17 +80,9 @@
2726 #include <ctype.h>
2727 #include <signal.h>
2729 +#include <bits/uClibc_mutex.h>
2731 -#ifdef __UCLIBC_HAS_THREADS__
2732 -#include <pthread.h>
2733 -static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
2734 -# define LOCK __pthread_mutex_lock(&mylock)
2735 -# define UNLOCK __pthread_mutex_unlock(&mylock);
2736 -#else
2737 -# define LOCK
2738 -# define UNLOCK
2739 -#endif
2741 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
2743 static int LogFile = -1; /* fd for log */
2744 static int connected; /* have done connect */
2745 @@ -110,26 +102,26 @@ int setlogmask(int pmask);
2746 static void
2747 closelog_intern(int to_default)
2749 - LOCK;
2750 + __UCLIBC_MUTEX_LOCK(mylock);
2751 if (LogFile != -1) {
2752 (void) close(LogFile);
2754 LogFile = -1;
2755 connected = 0;
2756 if (to_default)
2758 - LogStat = 0;
2759 - LogTag = "syslog";
2760 - LogFacility = LOG_USER;
2761 - LogMask = 0xff;
2763 - UNLOCK;
2765 + LogStat = 0;
2766 + LogTag = "syslog";
2767 + LogFacility = LOG_USER;
2768 + LogMask = 0xff;
2770 + __UCLIBC_MUTEX_UNLOCK(mylock);
2773 static void
2774 sigpipe_handler (int sig)
2776 - closelog_intern (0);
2777 + closelog_intern (0);
2781 @@ -165,7 +157,7 @@ vsyslog( int pri, const char *fmt, va_li
2783 saved_errno = errno;
2785 - LOCK;
2786 + __UCLIBC_MUTEX_LOCK(mylock);
2788 /* See if we should just throw out this message. */
2789 if (!(LogMask & LOG_MASK(LOG_PRI(pri))) || (pri &~ (LOG_PRIMASK|LOG_FACMASK)))
2790 @@ -208,7 +200,7 @@ vsyslog( int pri, const char *fmt, va_li
2791 if (p >= end || p < head_end) { /* Returned -1 in case of error... */
2792 static const char truncate_msg[12] = "[truncated] ";
2793 memmove(head_end + sizeof(truncate_msg), head_end,
2794 - end - head_end - sizeof(truncate_msg));
2795 + end - head_end - sizeof(truncate_msg));
2796 memcpy(head_end, truncate_msg, sizeof(truncate_msg));
2797 if (p < head_end) {
2798 while (p < end && *p) {
2799 @@ -261,11 +253,11 @@ vsyslog( int pri, const char *fmt, va_li
2800 (void)close(fd);
2803 -getout:
2804 - UNLOCK;
2805 + getout:
2806 + __UCLIBC_MUTEX_UNLOCK(mylock);
2807 if (sigpipe == 0)
2808 sigaction (SIGPIPE, &oldaction,
2809 - (struct sigaction *) NULL);
2810 + (struct sigaction *) NULL);
2814 @@ -276,48 +268,48 @@ openlog( const char *ident, int logstat,
2816 int logType = SOCK_DGRAM;
2818 - LOCK;
2819 + __UCLIBC_MUTEX_LOCK(mylock);
2821 if (ident != NULL)
2822 - LogTag = ident;
2823 + LogTag = ident;
2824 LogStat = logstat;
2825 if (logfac != 0 && (logfac &~ LOG_FACMASK) == 0)
2826 - LogFacility = logfac;
2827 + LogFacility = logfac;
2828 if (LogFile == -1) {
2829 - SyslogAddr.sa_family = AF_UNIX;
2830 - (void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
2831 - sizeof(SyslogAddr.sa_data));
2832 -retry:
2833 - if (LogStat & LOG_NDELAY) {
2834 - if ((LogFile = socket(AF_UNIX, logType, 0)) == -1){
2835 - UNLOCK;
2836 - return;
2838 - /* fcntl(LogFile, F_SETFD, 1); */
2840 + SyslogAddr.sa_family = AF_UNIX;
2841 + (void)strncpy(SyslogAddr.sa_data, _PATH_LOG,
2842 + sizeof(SyslogAddr.sa_data));
2843 + retry:
2844 + if (LogStat & LOG_NDELAY) {
2845 + if ((LogFile = socket(AF_UNIX, logType, 0)) == -1){
2846 + goto DONE;
2848 + /* fcntl(LogFile, F_SETFD, 1); */
2852 if (LogFile != -1 && !connected) {
2853 - if (connect(LogFile, &SyslogAddr, sizeof(SyslogAddr) -
2854 - sizeof(SyslogAddr.sa_data) + strlen(SyslogAddr.sa_data)) != -1)
2856 - connected = 1;
2857 - } else if (logType == SOCK_DGRAM) {
2858 - logType = SOCK_STREAM;
2859 - if (LogFile != -1) {
2860 - close(LogFile);
2861 - LogFile = -1;
2863 - goto retry;
2864 - } else {
2865 - if (LogFile != -1) {
2866 - close(LogFile);
2867 - LogFile = -1;
2870 + if (connect(LogFile, &SyslogAddr, sizeof(SyslogAddr) -
2871 + sizeof(SyslogAddr.sa_data) + strlen(SyslogAddr.sa_data)) != -1)
2873 + connected = 1;
2874 + } else if (logType == SOCK_DGRAM) {
2875 + logType = SOCK_STREAM;
2876 + if (LogFile != -1) {
2877 + close(LogFile);
2878 + LogFile = -1;
2880 + goto retry;
2881 + } else {
2882 + if (LogFile != -1) {
2883 + close(LogFile);
2884 + LogFile = -1;
2889 - UNLOCK;
2890 + DONE:
2891 + __UCLIBC_MUTEX_UNLOCK(mylock);
2895 @@ -335,10 +327,10 @@ int setlogmask(int pmask)
2896 int omask;
2898 omask = LogMask;
2899 - LOCK;
2900 + __UCLIBC_MUTEX_LOCK(mylock);
2901 if (pmask != 0)
2902 - LogMask = pmask;
2903 - UNLOCK;
2904 + LogMask = pmask;
2905 + __UCLIBC_MUTEX_UNLOCK(mylock);
2906 return (omask);
2909 diff --git a/libc/misc/time/time.c b/libc/misc/time/time.c
2910 index f43bb8a..6165a52 100644
2911 --- a/libc/misc/time/time.c
2912 +++ b/libc/misc/time/time.c
2913 @@ -143,6 +143,8 @@
2914 #include <locale.h>
2915 #include <bits/uClibc_uintmaxtostr.h>
2917 +#include <bits/uClibc_mutex.h>
2919 #ifdef __UCLIBC_HAS_XLOCALE__
2920 #include <xlocale.h>
2921 #endif
2922 @@ -191,21 +193,7 @@ typedef struct {
2923 char tzname[TZNAME_MAX+1];
2924 } rule_struct;
2926 -#ifdef __UCLIBC_HAS_THREADS__
2928 -#include <pthread.h>
2930 -extern pthread_mutex_t _time_tzlock;
2932 -#define TZLOCK __pthread_mutex_lock(&_time_tzlock)
2933 -#define TZUNLOCK __pthread_mutex_unlock(&_time_tzlock)
2935 -#else
2937 -#define TZLOCK ((void) 0)
2938 -#define TZUNLOCK ((void) 0)
2940 -#endif
2941 +__UCLIBC_MUTEX_EXTERN(_time_tzlock);
2943 extern rule_struct _time_tzinfo[2];
2945 @@ -542,13 +530,13 @@ struct tm *localtime(const time_t *timer
2946 struct tm *localtime_r(register const time_t *__restrict timer,
2947 register struct tm *__restrict result)
2949 - TZLOCK;
2950 + __UCLIBC_MUTEX_LOCK(_time_tzlock);
2952 tzset();
2954 __time_localtime_tzi(timer, result, _time_tzinfo);
2956 - TZUNLOCK;
2957 + __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
2959 return result;
2961 @@ -1037,7 +1025,7 @@ size_t __XL(strftime)(char *__restrict s
2962 goto LOOP;
2965 - o = spec + 26; /* set to "????" */
2966 + o = ((const char *) spec) + 26; /* set to "????" */
2967 if ((code & MASK_SPEC) == CALC_SPEC) {
2969 if (*p == 's') {
2970 @@ -1073,17 +1061,15 @@ size_t __XL(strftime)(char *__restrict s
2972 #ifdef __UCLIBC_HAS_TM_EXTENSIONS__
2974 -#define RSP_TZUNLOCK ((void) 0)
2975 #define RSP_TZNAME timeptr->tm_zone
2976 #define RSP_GMT_OFFSET (-timeptr->tm_gmtoff)
2978 #else
2980 -#define RSP_TZUNLOCK TZUNLOCK
2981 #define RSP_TZNAME rsp->tzname
2982 #define RSP_GMT_OFFSET rsp->gmt_offset
2984 - TZLOCK;
2985 + __UCLIBC_MUTEX_LOCK(_time_tzlock);
2987 rsp = _time_tzinfo;
2988 if (timeptr->tm_isdst > 0) {
2989 @@ -1114,15 +1100,17 @@ size_t __XL(strftime)(char *__restrict s
2991 #endif
2992 o_count = SIZE_MAX;
2993 - RSP_TZUNLOCK;
2994 +/* RSP_TZUNLOCK; */
2995 +#ifdef __UCLIBC_HAS_TM_EXTENSIONS__
2996 goto OUTPUT;
2997 +#endif
2998 } else { /* z */
2999 *s = '+';
3000 if ((tzo = -RSP_GMT_OFFSET) < 0) {
3001 tzo = -tzo;
3002 *s = '-';
3004 - RSP_TZUNLOCK;
3005 +/* RSP_TZUNLOCK; */
3006 ++s;
3007 --count;
3009 @@ -1131,7 +1119,13 @@ size_t __XL(strftime)(char *__restrict s
3011 i = 16 + 6; /* 0-fill, width = 4 */
3014 +#ifdef __UCLIBC_HAS_TM_EXTENSIONS__
3015 +#else
3016 + __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
3017 + if (*p == 'Z') {
3018 + goto OUTPUT;
3020 +#endif
3021 } else {
3022 /* TODO: don't need year for U, W */
3023 for (i=0 ; i < 3 ; i++) {
3024 @@ -1664,9 +1658,7 @@ int daylight = 0;
3025 long timezone = 0;
3026 char *tzname[2] = { (char *) UTC, (char *) (UTC-1) };
3028 -#ifdef __UCLIBC_HAS_THREADS__
3029 -pthread_mutex_t _time_tzlock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
3030 -#endif
3031 +__UCLIBC_MUTEX_INIT(_time_tzlock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
3033 rule_struct _time_tzinfo[2];
3035 @@ -1796,7 +1788,7 @@ void tzset(void)
3036 static char oldval[TZ_BUFLEN]; /* BSS-zero'd. */
3037 #endif /* __UCLIBC_HAS_TZ_CACHING__ */
3039 - TZLOCK;
3040 + __UCLIBC_MUTEX_LOCK(_time_tzlock);
3042 e = getenv(TZ); /* TZ env var always takes precedence. */
3044 @@ -1962,10 +1954,10 @@ void tzset(void)
3045 daylight = !!_time_tzinfo[1].tzname[0];
3046 timezone = _time_tzinfo[0].gmt_offset;
3048 -#if defined(__UCLIBC_HAS_TZ_FILE__)
3049 +#if defined(__UCLIBC_HAS_TZ_FILE__) || defined(__UCLIBC_HAS_TZ_CACHING__)
3050 FAST_DONE:
3051 #endif
3052 - TZUNLOCK;
3053 + __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
3056 #endif
3057 @@ -2167,13 +2159,13 @@ time_t _time_mktime(struct tm *timeptr,
3059 time_t t;
3061 - TZLOCK;
3062 + __UCLIBC_MUTEX_LOCK(_time_tzlock);
3064 tzset();
3066 t = _time_mktime_tzi(timeptr, store_on_success, _time_tzinfo);
3068 - TZUNLOCK;
3069 + __UCLIBC_MUTEX_UNLOCK(_time_tzlock);
3071 return t;
3073 diff --git a/libc/misc/ttyent/getttyent.c b/libc/misc/ttyent/getttyent.c
3074 index 6e2fbd2..c85c73a 100644
3075 --- a/libc/misc/ttyent/getttyent.c
3076 +++ b/libc/misc/ttyent/getttyent.c
3077 @@ -35,9 +35,6 @@
3078 #include <ctype.h>
3079 #include <string.h>
3080 #include <stdlib.h>
3081 -#ifdef __UCLIBC_HAS_THREADS__
3082 -#include <pthread.h>
3083 -#endif
3085 static char zapchar;
3086 static FILE *tf;
3087 @@ -50,8 +47,8 @@ struct ttyent * getttynam(const char *tt
3089 setttyent();
3090 while ((t = getttyent()))
3091 - if (!strcmp(tty, t->ty_name))
3092 - break;
3093 + if (!strcmp(tty, t->ty_name))
3094 + break;
3095 endttyent();
3096 return (t);
3098 @@ -67,27 +64,27 @@ static char * skip(register char *p)
3099 register int c, q;
3101 for (q = 0, t = p; (c = *p) != '\0'; p++) {
3102 - if (c == '"') {
3103 - q ^= QUOTED; /* obscure, but nice */
3104 - continue;
3106 - if (q == QUOTED && *p == '\\' && *(p+1) == '"')
3107 - p++;
3108 - *t++ = *p;
3109 - if (q == QUOTED)
3110 - continue;
3111 - if (c == '#') {
3112 - zapchar = c;
3113 - *p = 0;
3114 - break;
3116 - if (c == '\t' || c == ' ' || c == '\n') {
3117 - zapchar = c;
3118 - *p++ = 0;
3119 - while ((c = *p) == '\t' || c == ' ' || c == '\n')
3120 - p++;
3121 - break;
3123 + if (c == '"') {
3124 + q ^= QUOTED; /* obscure, but nice */
3125 + continue;
3127 + if (q == QUOTED && *p == '\\' && *(p+1) == '"')
3128 + p++;
3129 + *t++ = *p;
3130 + if (q == QUOTED)
3131 + continue;
3132 + if (c == '#') {
3133 + zapchar = c;
3134 + *p = 0;
3135 + break;
3137 + if (c == '\t' || c == ' ' || c == '\n') {
3138 + zapchar = c;
3139 + *p++ = 0;
3140 + while ((c = *p) == '\t' || c == ' ' || c == '\n')
3141 + p++;
3142 + break;
3145 *--t = '\0';
3146 return (p);
3147 @@ -104,46 +101,46 @@ struct ttyent * getttyent(void)
3148 register int c;
3149 register char *p;
3150 static char *line = NULL;
3151 + struct ttyent *retval = NULL;
3153 if (!tf && !setttyent())
3154 - return (NULL);
3155 + return (NULL);
3157 if (!line) {
3158 - line = malloc(BUFSIZ);
3159 + line = malloc(BUFSIZ);
3160 if (!line)
3161 abort();
3164 - __STDIO_ALWAYS_THREADLOCK(tf);
3165 + __STDIO_ALWAYS_THREADLOCK(tf);
3167 for (;;) {
3168 - if (!fgets_unlocked(p = line, BUFSIZ, tf)) {
3169 - __STDIO_ALWAYS_THREADUNLOCK(tf);
3170 - return (NULL);
3172 - /* skip lines that are too big */
3173 - if (!index(p, '\n')) {
3174 - while ((c = getc_unlocked(tf)) != '\n' && c != EOF)
3176 - continue;
3178 - while (isspace(*p))
3179 - ++p;
3180 - if (*p && *p != '#')
3181 - break;
3182 + if (!fgets_unlocked(p = line, BUFSIZ, tf)) {
3183 + goto DONE;
3185 + /* skip lines that are too big */
3186 + if (!index(p, '\n')) {
3187 + while ((c = getc_unlocked(tf)) != '\n' && c != EOF)
3189 + continue;
3191 + while (isspace(*p))
3192 + ++p;
3193 + if (*p && *p != '#')
3194 + break;
3197 zapchar = 0;
3198 tty.ty_name = p;
3199 p = skip(p);
3200 if (!*(tty.ty_getty = p))
3201 - tty.ty_getty = tty.ty_type = NULL;
3202 + tty.ty_getty = tty.ty_type = NULL;
3203 else {
3204 - p = skip(p);
3205 - if (!*(tty.ty_type = p))
3206 - tty.ty_type = NULL;
3207 - else
3208 - p = skip(p);
3209 + p = skip(p);
3210 + if (!*(tty.ty_type = p))
3211 + tty.ty_type = NULL;
3212 + else
3213 + p = skip(p);
3215 tty.ty_status = 0;
3216 tty.ty_window = NULL;
3217 @@ -151,43 +148,45 @@ struct ttyent * getttyent(void)
3218 #define scmp(e) !strncmp(p, e, sizeof(e) - 1) && isspace(p[sizeof(e) - 1])
3219 #define vcmp(e) !strncmp(p, e, sizeof(e) - 1) && p[sizeof(e) - 1] == '='
3220 for (; *p; p = skip(p)) {
3221 - if (scmp(_TTYS_OFF))
3222 - tty.ty_status &= ~TTY_ON;
3223 - else if (scmp(_TTYS_ON))
3224 - tty.ty_status |= TTY_ON;
3225 - else if (scmp(_TTYS_SECURE))
3226 - tty.ty_status |= TTY_SECURE;
3227 - else if (vcmp(_TTYS_WINDOW))
3228 - tty.ty_window = value(p);
3229 - else
3230 - break;
3231 + if (scmp(_TTYS_OFF))
3232 + tty.ty_status &= ~TTY_ON;
3233 + else if (scmp(_TTYS_ON))
3234 + tty.ty_status |= TTY_ON;
3235 + else if (scmp(_TTYS_SECURE))
3236 + tty.ty_status |= TTY_SECURE;
3237 + else if (vcmp(_TTYS_WINDOW))
3238 + tty.ty_window = value(p);
3239 + else
3240 + break;
3242 - /* We can release the lock only here since `zapchar' is global. */
3243 - __STDIO_ALWAYS_THREADUNLOCK(tf);
3245 if (zapchar == '#' || *p == '#')
3246 - while ((c = *++p) == ' ' || c == '\t')
3248 + while ((c = *++p) == ' ' || c == '\t')
3250 tty.ty_comment = p;
3251 if (*p == 0)
3252 - tty.ty_comment = 0;
3253 + tty.ty_comment = 0;
3254 if ((p = index(p, '\n')))
3255 - *p = '\0';
3256 - return (&tty);
3257 + *p = '\0';
3258 + retval = &tty;
3260 + DONE:
3261 + __STDIO_ALWAYS_THREADUNLOCK(tf);
3262 + return retval;
3265 int setttyent(void)
3268 if (tf) {
3269 - rewind(tf);
3270 - return (1);
3271 + rewind(tf);
3272 + return (1);
3273 } else if ((tf = fopen(_PATH_TTYS, "r"))) {
3274 - /* We do the locking ourselves. */
3275 + /* We do the locking ourselves. */
3276 #ifdef __UCLIBC_HAS_THREADS__
3277 - __fsetlocking (tf, FSETLOCKING_BYCALLER);
3278 + __fsetlocking (tf, FSETLOCKING_BYCALLER);
3279 #endif
3280 - return (1);
3281 + return (1);
3283 return (0);
3285 @@ -197,9 +196,9 @@ int endttyent(void)
3286 int rval;
3288 if (tf) {
3289 - rval = !(fclose(tf) == EOF);
3290 - tf = NULL;
3291 - return (rval);
3292 + rval = !(fclose(tf) == EOF);
3293 + tf = NULL;
3294 + return (rval);
3296 return (1);
3298 diff --git a/libc/misc/utmp/utent.c b/libc/misc/utmp/utent.c
3299 index c1d8d6f..0fc6df4 100644
3300 --- a/libc/misc/utmp/utent.c
3301 +++ b/libc/misc/utmp/utent.c
3302 @@ -20,19 +20,9 @@
3303 #include <string.h>
3304 #include <utmp.h>
3306 +#include <bits/uClibc_mutex.h>
3309 -#ifdef __UCLIBC_HAS_THREADS__
3310 -#include <pthread.h>
3311 -static pthread_mutex_t utmplock = PTHREAD_MUTEX_INITIALIZER;
3312 -# define LOCK __pthread_mutex_lock(&utmplock)
3313 -# define UNLOCK __pthread_mutex_unlock(&utmplock)
3314 -#else
3315 -# define LOCK
3316 -# define UNLOCK
3317 -#endif
3320 +__UCLIBC_MUTEX_STATIC(utmplock, PTHREAD_MUTEX_INITIALIZER);
3322 /* Some global crap */
3323 static int static_fd = -1;
3324 @@ -46,19 +36,19 @@ static struct utmp *__getutent(int utmp_
3327 if (utmp_fd == -1) {
3328 - setutent();
3329 + setutent();
3331 if (utmp_fd == -1) {
3332 - return NULL;
3333 + return NULL;
3336 - LOCK;
3337 + __UCLIBC_MUTEX_LOCK(utmplock);
3338 if (read(utmp_fd, (char *) &static_utmp, sizeof(struct utmp)) != sizeof(struct utmp))
3340 - return NULL;
3343 + return NULL;
3346 - UNLOCK;
3347 + __UCLIBC_MUTEX_UNLOCK(utmplock);
3348 return &static_utmp;
3351 @@ -66,39 +56,39 @@ void setutent(void)
3353 int ret;
3355 - LOCK;
3356 + __UCLIBC_MUTEX_LOCK(utmplock);
3357 if (static_fd == -1) {
3358 - if ((static_fd = open(static_ut_name, O_RDWR)) < 0) {
3359 - if ((static_fd = open(static_ut_name, O_RDONLY)) < 0) {
3360 - goto bummer;
3363 - /* Make sure the file will be closed on exec() */
3364 - ret = fcntl(static_fd, F_GETFD, 0);
3365 - if (ret >= 0) {
3366 - ret = fcntl(static_fd, F_GETFD, 0);
3368 - if (ret < 0) {
3369 -bummer:
3370 - UNLOCK;
3371 - static_fd = -1;
3372 - close(static_fd);
3373 - return;
3375 + if ((static_fd = open(static_ut_name, O_RDWR)) < 0) {
3376 + if ((static_fd = open(static_ut_name, O_RDONLY)) < 0) {
3377 + goto bummer;
3380 + /* Make sure the file will be closed on exec() */
3381 + ret = fcntl(static_fd, F_GETFD, 0);
3382 + if (ret >= 0) {
3383 + ret = fcntl(static_fd, F_GETFD, 0);
3385 + if (ret < 0) {
3386 + bummer:
3387 + close(static_fd);
3388 + static_fd = -1;
3389 + goto DONE;
3392 lseek(static_fd, 0, SEEK_SET);
3393 - UNLOCK;
3394 + DONE:
3395 + __UCLIBC_MUTEX_UNLOCK(utmplock);
3396 return;
3399 void endutent(void)
3401 - LOCK;
3402 + __UCLIBC_MUTEX_LOCK(utmplock);
3403 if (static_fd != -1) {
3404 - close(static_fd);
3405 + close(static_fd);
3407 static_fd = -1;
3408 - UNLOCK;
3409 + __UCLIBC_MUTEX_UNLOCK(utmplock);
3412 /* Locking is done in __getutent */
3413 @@ -113,22 +103,22 @@ struct utmp *getutid (const struct utmp
3414 struct utmp *lutmp;
3416 while ((lutmp = __getutent(static_fd)) != NULL) {
3417 - if ( (utmp_entry->ut_type == RUN_LVL ||
3418 - utmp_entry->ut_type == BOOT_TIME ||
3419 - utmp_entry->ut_type == NEW_TIME ||
3420 - utmp_entry->ut_type == OLD_TIME) &&
3421 - lutmp->ut_type == utmp_entry->ut_type)
3423 - return lutmp;
3425 - if ( (utmp_entry->ut_type == INIT_PROCESS ||
3426 - utmp_entry->ut_type == DEAD_PROCESS ||
3427 - utmp_entry->ut_type == LOGIN_PROCESS ||
3428 - utmp_entry->ut_type == USER_PROCESS) &&
3429 - !strncmp(lutmp->ut_id, utmp_entry->ut_id, sizeof(lutmp->ut_id)))
3431 - return lutmp;
3433 + if ( (utmp_entry->ut_type == RUN_LVL ||
3434 + utmp_entry->ut_type == BOOT_TIME ||
3435 + utmp_entry->ut_type == NEW_TIME ||
3436 + utmp_entry->ut_type == OLD_TIME) &&
3437 + lutmp->ut_type == utmp_entry->ut_type)
3439 + return lutmp;
3441 + if ( (utmp_entry->ut_type == INIT_PROCESS ||
3442 + utmp_entry->ut_type == DEAD_PROCESS ||
3443 + utmp_entry->ut_type == LOGIN_PROCESS ||
3444 + utmp_entry->ut_type == USER_PROCESS) &&
3445 + !strncmp(lutmp->ut_id, utmp_entry->ut_id, sizeof(lutmp->ut_id)))
3447 + return lutmp;
3451 return NULL;
3452 @@ -140,11 +130,11 @@ struct utmp *getutline(const struct utmp
3453 struct utmp *lutmp;
3455 while ((lutmp = __getutent(static_fd)) != NULL) {
3456 - if ((lutmp->ut_type == USER_PROCESS || lutmp->ut_type == LOGIN_PROCESS) &&
3457 - !strcmp(lutmp->ut_line, utmp_entry->ut_line))
3459 - return lutmp;
3461 + if ((lutmp->ut_type == USER_PROCESS || lutmp->ut_type == LOGIN_PROCESS) &&
3462 + !strcmp(lutmp->ut_line, utmp_entry->ut_line))
3464 + return lutmp;
3468 return NULL;
3469 @@ -152,42 +142,42 @@ struct utmp *getutline(const struct utmp
3471 struct utmp *pututline (const struct utmp *utmp_entry)
3473 - LOCK;
3474 + __UCLIBC_MUTEX_LOCK(utmplock);
3475 /* Ignore the return value. That way, if they've already positioned
3476 the file pointer where they want it, everything will work out. */
3477 lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
3479 if (getutid(utmp_entry) != NULL) {
3480 - lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
3481 - if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
3482 - return NULL;
3483 + lseek(static_fd, (off_t) - sizeof(struct utmp), SEEK_CUR);
3484 + if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
3485 + return NULL;
3486 } else {
3487 - lseek(static_fd, (off_t) 0, SEEK_END);
3488 - if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
3489 - return NULL;
3490 + lseek(static_fd, (off_t) 0, SEEK_END);
3491 + if (write(static_fd, utmp_entry, sizeof(struct utmp)) != sizeof(struct utmp))
3492 + return NULL;
3495 - UNLOCK;
3496 + __UCLIBC_MUTEX_UNLOCK(utmplock);
3497 return (struct utmp *)utmp_entry;
3500 int utmpname (const char *new_ut_name)
3502 - LOCK;
3503 + __UCLIBC_MUTEX_LOCK(utmplock);
3504 if (new_ut_name != NULL) {
3505 - if (static_ut_name != default_file_name)
3506 - free((char *)static_ut_name);
3507 - static_ut_name = strdup(new_ut_name);
3508 - if (static_ut_name == NULL) {
3509 - /* We should probably whine about out-of-memory
3510 - * errors here... Instead just reset to the default */
3511 - static_ut_name = default_file_name;
3513 + if (static_ut_name != default_file_name)
3514 + free((char *)static_ut_name);
3515 + static_ut_name = strdup(new_ut_name);
3516 + if (static_ut_name == NULL) {
3517 + /* We should probably whine about out-of-memory
3518 + * errors here... Instead just reset to the default */
3519 + static_ut_name = default_file_name;
3523 if (static_fd != -1)
3524 - close(static_fd);
3525 - UNLOCK;
3526 + close(static_fd);
3527 + __UCLIBC_MUTEX_UNLOCK(utmplock);
3528 return 0;
3531 diff --git a/libc/misc/wchar/wstdio.c b/libc/misc/wchar/wstdio.c
3532 index b49494f..408c57a 100644
3533 --- a/libc/misc/wchar/wstdio.c
3534 +++ b/libc/misc/wchar/wstdio.c
3535 @@ -82,9 +82,6 @@ strong_alias(NAME,NAME##_unlocked) \
3536 void NAME PARAMS
3537 #endif
3539 -#define __STDIO_THREADLOCK_OPENLIST
3540 -#define __STDIO_THREADUNLOCK_OPENLIST
3542 #else /* __UCLIBC_HAS_THREADS__ */
3544 #include <pthread.h>
3545 @@ -112,15 +109,6 @@ void NAME PARAMS \
3547 void NAME##_unlocked PARAMS
3549 -#define __STDIO_THREADLOCK_OPENLIST \
3550 - __pthread_mutex_lock(&_stdio_openlist_lock)
3552 -#define __STDIO_THREADUNLOCK_OPENLIST \
3553 - __pthread_mutex_unlock(&_stdio_openlist_lock)
3555 -#define __STDIO_THREADTRYLOCK_OPENLIST \
3556 - __pthread_mutex_trylock(&_stdio_openlist_lock)
3558 #endif /* __UCLIBC_HAS_THREADS__ */
3560 #ifndef __STDIO_BUFFERS
3561 diff --git a/libc/pwd_grp/lckpwdf.c b/libc/pwd_grp/lckpwdf.c
3562 index 6b9c251..063fed4 100644
3563 --- a/libc/pwd_grp/lckpwdf.c
3564 +++ b/libc/pwd_grp/lckpwdf.c
3565 @@ -27,15 +27,9 @@
3566 #include <sys/file.h>
3567 #include <paths.h>
3569 -#ifdef __UCLIBC_HAS_THREADS__
3570 -#include <pthread.h>
3571 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
3572 -# define LOCK __pthread_mutex_lock(&mylock)
3573 -# define UNLOCK __pthread_mutex_unlock(&mylock);
3574 -#else
3575 -# define LOCK
3576 -# define UNLOCK
3577 -#endif
3578 +#include <bits/uClibc_mutex.h>
3580 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
3582 /* How long to wait for getting the lock before returning with an
3583 error. */
3584 @@ -57,18 +51,18 @@ int lckpwdf (void)
3585 struct sigaction new_act; /* New signal action. */
3586 struct flock fl; /* Information struct for locking. */
3587 int result;
3588 + int rv = -1;
3590 if (lock_fd != -1)
3591 /* Still locked by own process. */
3592 return -1;
3594 - LOCK;
3595 + __UCLIBC_MUTEX_LOCK(mylock);
3597 lock_fd = open (_PATH_PASSWD, O_WRONLY);
3598 if (lock_fd == -1) {
3599 /* Cannot create lock file. */
3600 - UNLOCK;
3601 - return -1;
3602 + goto DONE;
3605 /* Make sure file gets correctly closed when process finished. */
3606 @@ -77,16 +71,14 @@ int lckpwdf (void)
3607 /* Cannot get file flags. */
3608 close(lock_fd);
3609 lock_fd = -1;
3610 - UNLOCK;
3611 - return -1;
3612 + goto DONE;
3614 flags |= FD_CLOEXEC; /* Close on exit. */
3615 if (fcntl (lock_fd, F_SETFD, flags) < 0) {
3616 /* Cannot set new flags. */
3617 close(lock_fd);
3618 lock_fd = -1;
3619 - UNLOCK;
3620 - return -1;
3621 + goto DONE;
3624 /* Now we have to get exclusive write access. Since multiple
3625 @@ -107,8 +99,7 @@ int lckpwdf (void)
3626 /* Cannot install signal handler. */
3627 close(lock_fd);
3628 lock_fd = -1;
3629 - UNLOCK;
3630 - return -1;
3631 + goto DONE;
3634 /* Now make sure the alarm signal is not blocked. */
3635 @@ -118,8 +109,7 @@ int lckpwdf (void)
3636 sigaction (SIGALRM, &saved_act, NULL);
3637 close(lock_fd);
3638 lock_fd = -1;
3639 - UNLOCK;
3640 - return -1;
3641 + goto DONE;
3644 /* Start timer. If we cannot get the lock in the specified time we
3645 @@ -146,12 +136,14 @@ int lckpwdf (void)
3646 if (result < 0) {
3647 close(lock_fd);
3648 lock_fd = -1;
3649 - UNLOCK;
3650 - return -1;
3651 + goto DONE;
3654 - UNLOCK;
3655 - return 0;
3656 + rv = 0;
3658 + DONE:
3659 + __UCLIBC_MUTEX_UNLOCK(mylock);
3660 + return rv;
3664 @@ -164,11 +156,11 @@ int ulckpwdf (void)
3665 result = -1;
3667 else {
3668 - LOCK;
3669 + __UCLIBC_MUTEX_LOCK(mylock);
3670 result = close (lock_fd);
3671 /* Mark descriptor as unused. */
3672 lock_fd = -1;
3673 - UNLOCK;
3674 + __UCLIBC_MUTEX_UNLOCK(mylock);
3677 return result;
3678 diff --git a/libc/pwd_grp/pwd_grp.c b/libc/pwd_grp/pwd_grp.c
3679 index 91c0d83..a302c7c 100644
3680 --- a/libc/pwd_grp/pwd_grp.c
3681 +++ b/libc/pwd_grp/pwd_grp.c
3682 @@ -42,9 +42,8 @@
3683 #include <pwd.h>
3684 #include <grp.h>
3685 #include <shadow.h>
3686 -#ifdef __UCLIBC_HAS_THREADS__
3687 -#include <pthread.h>
3688 -#endif
3690 +#include <bits/uClibc_mutex.h>
3692 /**********************************************************************/
3693 /* Sizes for staticly allocated buffers. */
3694 @@ -445,34 +444,27 @@ int getpw(uid_t uid, char *buf)
3695 /**********************************************************************/
3696 #ifdef L_getpwent_r
3698 -#ifdef __UCLIBC_HAS_THREADS__
3699 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
3700 -# define LOCK __pthread_mutex_lock(&mylock)
3701 -# define UNLOCK __pthread_mutex_unlock(&mylock);
3702 -#else
3703 -# define LOCK ((void) 0)
3704 -# define UNLOCK ((void) 0)
3705 -#endif
3706 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
3708 static FILE *pwf /*= NULL*/;
3710 void setpwent(void)
3712 - LOCK;
3713 + __UCLIBC_MUTEX_LOCK(mylock);
3714 if (pwf) {
3715 rewind(pwf);
3717 - UNLOCK;
3718 + __UCLIBC_MUTEX_UNLOCK(mylock);
3721 void endpwent(void)
3723 - LOCK;
3724 + __UCLIBC_MUTEX_LOCK(mylock);
3725 if (pwf) {
3726 fclose(pwf);
3727 pwf = NULL;
3729 - UNLOCK;
3730 + __UCLIBC_MUTEX_UNLOCK(mylock);
3734 @@ -482,7 +474,7 @@ int getpwent_r(struct passwd *__restrict
3736 int rv;
3738 - LOCK;
3739 + __UCLIBC_MUTEX_LOCK(mylock);
3741 *result = NULL; /* In case of error... */
3743 @@ -500,7 +492,7 @@ int getpwent_r(struct passwd *__restrict
3746 ERR:
3747 - UNLOCK;
3748 + __UCLIBC_MUTEX_UNLOCK(mylock);
3750 return rv;
3752 @@ -509,34 +501,27 @@ int getpwent_r(struct passwd *__restrict
3753 /**********************************************************************/
3754 #ifdef L_getgrent_r
3756 -#ifdef __UCLIBC_HAS_THREADS__
3757 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
3758 -# define LOCK __pthread_mutex_lock(&mylock)
3759 -# define UNLOCK __pthread_mutex_unlock(&mylock);
3760 -#else
3761 -# define LOCK ((void) 0)
3762 -# define UNLOCK ((void) 0)
3763 -#endif
3764 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
3766 static FILE *grf /*= NULL*/;
3768 void setgrent(void)
3770 - LOCK;
3771 + __UCLIBC_MUTEX_LOCK(mylock);
3772 if (grf) {
3773 rewind(grf);
3775 - UNLOCK;
3776 + __UCLIBC_MUTEX_UNLOCK(mylock);
3779 void endgrent(void)
3781 - LOCK;
3782 + __UCLIBC_MUTEX_LOCK(mylock);
3783 if (grf) {
3784 fclose(grf);
3785 grf = NULL;
3787 - UNLOCK;
3788 + __UCLIBC_MUTEX_UNLOCK(mylock);
3791 int getgrent_r(struct group *__restrict resultbuf,
3792 @@ -545,7 +530,7 @@ int getgrent_r(struct group *__restrict
3794 int rv;
3796 - LOCK;
3797 + __UCLIBC_MUTEX_LOCK(mylock);
3799 *result = NULL; /* In case of error... */
3801 @@ -563,7 +548,7 @@ int getgrent_r(struct group *__restrict
3804 ERR:
3805 - UNLOCK;
3806 + __UCLIBC_MUTEX_UNLOCK(mylock);
3808 return rv;
3810 @@ -572,34 +557,27 @@ int getgrent_r(struct group *__restrict
3811 /**********************************************************************/
3812 #ifdef L_getspent_r
3814 -#ifdef __UCLIBC_HAS_THREADS__
3815 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
3816 -# define LOCK __pthread_mutex_lock(&mylock)
3817 -# define UNLOCK __pthread_mutex_unlock(&mylock);
3818 -#else
3819 -# define LOCK ((void) 0)
3820 -# define UNLOCK ((void) 0)
3821 -#endif
3822 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
3824 static FILE *spf /*= NULL*/;
3826 void setspent(void)
3828 - LOCK;
3829 + __UCLIBC_MUTEX_LOCK(mylock);
3830 if (spf) {
3831 rewind(spf);
3833 - UNLOCK;
3834 + __UCLIBC_MUTEX_UNLOCK(mylock);
3837 void endspent(void)
3839 - LOCK;
3840 + __UCLIBC_MUTEX_LOCK(mylock);
3841 if (spf) {
3842 fclose(spf);
3843 spf = NULL;
3845 - UNLOCK;
3846 + __UCLIBC_MUTEX_UNLOCK(mylock);
3849 int getspent_r(struct spwd *resultbuf, char *buffer,
3850 @@ -607,7 +585,7 @@ int getspent_r(struct spwd *resultbuf, c
3852 int rv;
3854 - LOCK;
3855 + __UCLIBC_MUTEX_LOCK(mylock);
3857 *result = NULL; /* In case of error... */
3859 @@ -625,7 +603,7 @@ int getspent_r(struct spwd *resultbuf, c
3862 ERR:
3863 - UNLOCK;
3864 + __UCLIBC_MUTEX_UNLOCK(mylock);
3866 return rv;
3868 diff --git a/libc/stdio/_READ.c b/libc/stdio/_READ.c
3869 index 7d3c38c..fe1bc91 100644
3870 --- a/libc/stdio/_READ.c
3871 +++ b/libc/stdio/_READ.c
3872 @@ -41,7 +41,7 @@ size_t __stdio_READ(register FILE *strea
3873 #warning EINTR?
3874 #endif
3875 /* RETRY: */
3876 - if ((rv = __READ(stream, buf, bufsize)) <= 0) {
3877 + if ((rv = __READ(stream, (char *) buf, bufsize)) <= 0) {
3878 if (rv == 0) {
3879 __STDIO_STREAM_SET_EOF(stream);
3880 } else {
3881 diff --git a/libc/stdio/_WRITE.c b/libc/stdio/_WRITE.c
3882 index d300d39..4131eb7 100644
3883 --- a/libc/stdio/_WRITE.c
3884 +++ b/libc/stdio/_WRITE.c
3885 @@ -47,7 +47,7 @@ size_t __stdio_WRITE(register FILE *stre
3886 return bufsize;
3888 stodo = (todo <= SSIZE_MAX) ? todo : SSIZE_MAX;
3889 - if ((rv = __WRITE(stream, buf, stodo)) >= 0) {
3890 + if ((rv = __WRITE(stream, (char *) buf, stodo)) >= 0) {
3891 #ifdef __UCLIBC_MJN3_ONLY__
3892 #warning TODO: Make custom stream write return check optional.
3893 #endif
3894 diff --git a/libc/stdio/_fopen.c b/libc/stdio/_fopen.c
3895 index f7f5bb6..4984f11 100644
3896 --- a/libc/stdio/_fopen.c
3897 +++ b/libc/stdio/_fopen.c
3898 @@ -194,10 +194,23 @@ FILE *_stdio_fopen(intptr_t fname_or_mod
3899 #endif
3901 #ifdef __STDIO_HAS_OPENLIST
3902 - __STDIO_THREADLOCK_OPENLIST;
3903 - stream->__nextopen = _stdio_openlist; /* New files are inserted at */
3904 - _stdio_openlist = stream; /* the head of the list. */
3905 - __STDIO_THREADUNLOCK_OPENLIST;
3906 +#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
3907 + if (!(stream->__modeflags & __FLAG_FREEFILE))
3909 + /* An freopen call so the file was never removed from the list. */
3911 + else
3912 +#endif
3914 + /* We have to lock the del mutex in case another thread wants to fclose()
3915 + * the last file. */
3916 + __STDIO_THREADLOCK_OPENLIST_DEL;
3917 + __STDIO_THREADLOCK_OPENLIST_ADD;
3918 + stream->__nextopen = _stdio_openlist; /* New files are inserted at */
3919 + _stdio_openlist = stream; /* the head of the list. */
3920 + __STDIO_THREADUNLOCK_OPENLIST_ADD;
3921 + __STDIO_THREADUNLOCK_OPENLIST_DEL;
3923 #endif
3925 __STDIO_STREAM_VALIDATE(stream);
3926 diff --git a/libc/stdio/_stdio.c b/libc/stdio/_stdio.c
3927 index 4aae3c4..9cfe02c 100644
3928 --- a/libc/stdio/_stdio.c
3929 +++ b/libc/stdio/_stdio.c
3930 @@ -151,8 +151,12 @@ FILE *__stdout = _stdio_streams + 1; /*
3931 FILE *_stdio_openlist = _stdio_streams;
3933 # ifdef __UCLIBC_HAS_THREADS__
3934 -pthread_mutex_t _stdio_openlist_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
3935 -int _stdio_openlist_delflag = 0;
3936 +__UCLIBC_MUTEX_INIT(_stdio_openlist_add_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
3937 +#ifdef __STDIO_BUFFERS
3938 +__UCLIBC_MUTEX_INIT(_stdio_openlist_del_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
3939 +volatile int _stdio_openlist_use_count = 0;
3940 +int _stdio_openlist_del_count = 0;
3941 +#endif
3942 # endif
3944 #endif
3945 @@ -162,10 +166,10 @@ int _stdio_openlist_delflag = 0;
3946 /* 2 if threading not initialized and 0 otherwise; */
3947 int _stdio_user_locking = 2;
3949 -void __stdio_init_mutex(pthread_mutex_t *m)
3950 +void __stdio_init_mutex(__UCLIBC_MUTEX_TYPE *m)
3952 - static const pthread_mutex_t __stdio_mutex_initializer
3953 - = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
3954 + const __UCLIBC_MUTEX_STATIC(__stdio_mutex_initializer,
3955 + PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
3957 memcpy(m, &__stdio_mutex_initializer, sizeof(__stdio_mutex_initializer));
3959 @@ -184,7 +188,11 @@ void _stdio_term(void)
3960 * locked, then I suppose there is a chance that a pointer in the
3961 * chain might be corrupt due to a partial store.
3963 - __stdio_init_mutex(&_stdio_openlist_lock);
3964 + __stdio_init_mutex(&_stdio_openlist_add_lock);
3965 +#warning check
3966 +#ifdef __STDIO_BUFFERS
3967 + __stdio_init_mutex(&_stdio_openlist_del_lock);
3968 +#endif
3970 /* Next we need to worry about the streams themselves. If a stream
3971 * is currently locked, then it may be in an invalid state. So we
3972 @@ -192,7 +200,7 @@ void _stdio_term(void)
3973 * Then we reinitialize the locks.
3975 for (ptr = _stdio_openlist ; ptr ; ptr = ptr->__nextopen ) {
3976 - if (__STDIO_ALWAYS_THREADTRYLOCK(ptr)) {
3977 + if (__STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(ptr)) {
3978 /* The stream is already locked, so we don't want to touch it.
3979 * However, if we have custom streams, we can't just close it
3980 * or leave it locked since a custom stream may be stacked
3981 @@ -258,10 +266,6 @@ void _stdio_init(void)
3982 #error Assumption violated about __MASK_READING and __FLAG_UNGOT
3983 #endif
3985 -#ifdef __UCLIBC_HAS_THREADS__
3986 -#include <pthread.h>
3987 -#endif
3989 #ifndef NDEBUG
3991 void _stdio_validate_FILE(const FILE *stream)
3992 diff --git a/libc/stdio/_stdio.h b/libc/stdio/_stdio.h
3993 index e3c2c58..decf57d 100644
3994 --- a/libc/stdio/_stdio.h
3995 +++ b/libc/stdio/_stdio.h
3996 @@ -22,23 +22,57 @@
3997 #include <wchar.h>
3998 #endif
4000 -#ifdef __UCLIBC_HAS_THREADS__
4001 -#include <pthread.h>
4002 +#include <bits/uClibc_mutex.h>
4004 -#define __STDIO_THREADLOCK_OPENLIST \
4005 - __pthread_mutex_lock(&_stdio_openlist_lock)
4006 +#define __STDIO_THREADLOCK_OPENLIST_ADD \
4007 + __UCLIBC_MUTEX_LOCK(_stdio_openlist_add_lock)
4009 -#define __STDIO_THREADUNLOCK_OPENLIST \
4010 - __pthread_mutex_unlock(&_stdio_openlist_lock)
4011 +#define __STDIO_THREADUNLOCK_OPENLIST_ADD \
4012 + __UCLIBC_MUTEX_UNLOCK(_stdio_openlist_add_lock)
4014 -#define __STDIO_THREADTRYLOCK_OPENLIST \
4015 - __pthread_mutex_trylock(&_stdio_openlist_lock)
4016 +#ifdef __STDIO_BUFFERS
4018 -#else
4019 +#define __STDIO_THREADLOCK_OPENLIST_DEL \
4020 + __UCLIBC_MUTEX_LOCK(_stdio_openlist_del_lock)
4022 +#define __STDIO_THREADUNLOCK_OPENLIST_DEL \
4023 + __UCLIBC_MUTEX_UNLOCK(_stdio_openlist_del_lock)
4025 -#define __STDIO_THREADLOCK_OPENLIST ((void)0)
4026 -#define __STDIO_THREADUNLOCK_OPENLIST ((void)0)
4027 +#define __STDIO_OPENLIST_INC_USE \
4028 +do { \
4029 + __STDIO_THREADLOCK_OPENLIST_DEL; \
4030 + ++_stdio_openlist_use_count; \
4031 + __STDIO_THREADUNLOCK_OPENLIST_DEL; \
4032 +} while (0)
4034 +extern void _stdio_openlist_dec_use(void);
4036 +#define __STDIO_OPENLIST_DEC_USE \
4037 + _stdio_openlist_dec_use()
4039 +#define __STDIO_OPENLIST_INC_DEL_CNT \
4040 +do { \
4041 + __STDIO_THREADLOCK_OPENLIST_DEL; \
4042 + ++_stdio_openlist_del_count; \
4043 + __STDIO_THREADUNLOCK_OPENLIST_DEL; \
4044 +} while (0)
4046 +#define __STDIO_OPENLIST_DEC_DEL_CNT \
4047 +do { \
4048 + __STDIO_THREADLOCK_OPENLIST_DEL; \
4049 + --_stdio_openlist_del_count; \
4050 + __STDIO_THREADUNLOCK_OPENLIST_DEL; \
4051 +} while (0)
4053 +#endif /* __STDIO_BUFFERS */
4055 +#ifndef __STDIO_THREADLOCK_OPENLIST_DEL
4056 +#define __STDIO_THREADLOCK_OPENLIST_DEL ((void)0)
4057 +#define __STDIO_THREADUNLOCK_OPENLIST_DEL ((void)0)
4058 +#define __STDIO_OPENLIST_INC_USE ((void)0)
4059 +#define __STDIO_OPENLIST_DEC_USE ((void)0)
4060 +#define __STDIO_OPENLIST_INC_DEL_CNT ((void)0)
4061 +#define __STDIO_OPENLIST_DEC_DEL_CNT ((void)0)
4062 #endif
4064 #define __UNDEFINED_OR_NONPORTABLE ((void)0)
4065 diff --git a/libc/stdio/fclose.c b/libc/stdio/fclose.c
4066 index 4df2e42..dfababc 100644
4067 --- a/libc/stdio/fclose.c
4068 +++ b/libc/stdio/fclose.c
4069 @@ -12,30 +12,34 @@ int fclose(register FILE *stream)
4070 int rv = 0;
4071 __STDIO_AUTO_THREADLOCK_VAR;
4073 - /* First, remove the file from the open file list. */
4074 -#ifdef __STDIO_HAS_OPENLIST
4076 - register FILE *ptr;
4078 - __STDIO_THREADLOCK_OPENLIST;
4079 - if ((ptr = _stdio_openlist) == stream) {
4080 - _stdio_openlist = stream->__nextopen;
4081 - } else {
4082 - while (ptr) {
4083 - if (ptr->__nextopen == stream) {
4084 - ptr->__nextopen = stream->__nextopen;
4085 - break;
4087 - ptr = ptr->__nextopen;
4090 - __STDIO_THREADUNLOCK_OPENLIST;
4092 - if (!ptr) { /* Did not find stream in the open file list! */
4093 - return EOF;
4096 -#endif
4097 +#warning dead code... but may want to simply check and not remove
4098 +/* #ifdef __STDIO_HAS_OPENLIST */
4099 +/* #if !defined(__UCLIBC_HAS_THREADS__) || !defined(__STDIO_BUFFERS) */
4100 +/* /\* First, remove the file from the open file list. *\/ */
4101 +/* { */
4102 +/* register FILE *ptr; */
4104 +/* __STDIO_THREADLOCK_OPENLIST; */
4105 +/* if ((ptr = _stdio_openlist) == stream) { */
4106 +/* #warning does a mod!!! */
4107 +/* _stdio_openlist = stream->__nextopen; */
4108 +/* } else { */
4109 +/* while (ptr) { */
4110 +/* if (ptr->__nextopen == stream) { */
4111 +/* ptr->__nextopen = stream->__nextopen; */
4112 +/* break; */
4113 +/* } */
4114 +/* ptr = ptr->__nextopen; */
4115 +/* } */
4116 +/* } */
4117 +/* __STDIO_THREADUNLOCK_OPENLIST; */
4119 +/* if (!ptr) { /\* Did not find stream in the open file list! *\/ */
4120 +/* return EOF; */
4121 +/* } */
4122 +/* } */
4123 +/* #endif */
4124 +/* #endif */
4126 __STDIO_AUTO_THREADLOCK(stream);
4128 @@ -80,7 +84,15 @@ int fclose(register FILE *stream)
4129 __STDIO_AUTO_THREADUNLOCK(stream);
4131 __STDIO_STREAM_FREE_BUFFER(stream);
4132 +#warning... inefficient - locks and unlocks twice and walks whole list
4133 +#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
4134 + /* inefficient - locks/unlocks twice and walks whole list */
4135 + __STDIO_OPENLIST_INC_USE;
4136 + __STDIO_OPENLIST_INC_DEL_CNT;
4137 + __STDIO_OPENLIST_DEC_USE; /* This with free the file if necessary. */
4138 +#else
4139 __STDIO_STREAM_FREE_FILE(stream);
4140 +#endif
4142 return rv;
4144 diff --git a/libc/stdio/fcloseall.c b/libc/stdio/fcloseall.c
4145 index dbb6000..f62281a 100644
4146 --- a/libc/stdio/fcloseall.c
4147 +++ b/libc/stdio/fcloseall.c
4148 @@ -19,14 +19,34 @@ int fcloseall (void)
4149 #ifdef __STDIO_HAS_OPENLIST
4151 int retval = 0;
4152 + FILE *f;
4154 - __STDIO_THREADLOCK_OPENLIST;
4155 - while (_stdio_openlist) {
4156 - if (fclose(_stdio_openlist)) {
4157 +#warning remove dead code
4158 +/* __STDIO_THREADLOCK_OPENLIST; */
4159 +/* while (_stdio_openlist) { */
4160 +/* if (fclose(_stdio_openlist)) { */
4161 +/* retval = EOF; */
4162 +/* } */
4163 +/* } */
4164 +/* __STDIO_THREADUNLOCK_OPENLIST; */
4166 + __STDIO_OPENLIST_INC_USE;
4168 +#warning should probably have a get_head() operation
4169 + __STDIO_THREADLOCK_OPENLIST_ADD;
4170 + f = _stdio_openlist;
4171 + __STDIO_THREADUNLOCK_OPENLIST_ADD;
4173 + while (f) {
4174 +#warning should probably have a get_next() operation
4175 + FILE *n = f->__nextopen;
4176 + if (fclose(f)) {
4177 retval = EOF;
4179 + f = n;
4181 - __STDIO_THREADUNLOCK_OPENLIST;
4183 + __STDIO_OPENLIST_DEC_USE;
4185 return retval;
4187 diff --git a/libc/stdio/fflush.c b/libc/stdio/fflush.c
4188 index 6baa0ec..66b65cd 100644
4189 --- a/libc/stdio/fflush.c
4190 +++ b/libc/stdio/fflush.c
4191 @@ -20,23 +20,50 @@ weak_alias(__fflush_unlocked,fflush_unlo
4192 weak_alias(__fflush_unlocked,fflush);
4193 #endif
4195 -#ifdef __UCLIBC_HAS_THREADS__
4196 /* Even if the stream is set to user-locking, we still need to lock
4197 * when all (lbf) writing streams are flushed. */
4198 -#define MY_STDIO_THREADLOCK(STREAM) \
4199 - if (_stdio_user_locking != 2) { \
4200 - __STDIO_ALWAYS_THREADLOCK(STREAM); \
4203 -#define MY_STDIO_THREADUNLOCK(STREAM) \
4204 - if (_stdio_user_locking != 2) { \
4205 - __STDIO_ALWAYS_THREADUNLOCK(STREAM); \
4207 -#else
4208 -#define MY_STDIO_THREADLOCK(STREAM) ((void)0)
4209 -#define MY_STDIO_THREADUNLOCK(STREAM) ((void)0)
4210 -#endif
4211 +#define __MY_STDIO_THREADLOCK(__stream) \
4212 + __UCLIBC_MUTEX_CONDITIONAL_LOCK((__stream)->__lock, \
4213 + (_stdio_user_locking != 2))
4215 +#define __MY_STDIO_THREADUNLOCK(__stream) \
4216 + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK((__stream)->__lock, \
4217 + (_stdio_user_locking != 2))
4219 +#if defined(__UCLIBC_HAS_THREADS__) && defined(__STDIO_BUFFERS)
4220 +void _stdio_openlist_dec_use(void)
4222 + __STDIO_THREADLOCK_OPENLIST_DEL;
4223 + if ((_stdio_openlist_use_count == 1) && (_stdio_openlist_del_count > 0)) {
4224 + FILE *p = NULL;
4225 + FILE *n;
4226 + FILE *stream;
4228 + __STDIO_THREADLOCK_OPENLIST_ADD;
4229 + for (stream = _stdio_openlist; stream; stream = n) {
4230 +#warning walk the list and clear out all fclosed()d files
4231 + n = stream->__nextopen;
4232 +#warning fix for nonatomic
4233 + if ((stream->__modeflags & (__FLAG_READONLY|__FLAG_WRITEONLY))
4234 + == (__FLAG_READONLY|__FLAG_WRITEONLY)
4235 + ) { /* The file was closed so remove from the list. */
4236 + if (!p) {
4237 + _stdio_openlist = n;
4238 + } else {
4239 + p->__nextopen = n;
4241 + __STDIO_STREAM_FREE_FILE(stream);
4242 + } else {
4243 + p = stream;
4246 + __STDIO_THREADUNLOCK_OPENLIST_DEL;
4248 + --_stdio_openlist_use_count;
4249 + __STDIO_THREADUNLOCK_OPENLIST_DEL;
4251 +#endif
4253 int __fflush_unlocked(register FILE *stream)
4255 @@ -60,23 +87,39 @@ int __fflush_unlocked(register FILE *str
4258 if (!stream) { /* Flush all (lbf) writing streams. */
4259 - __STDIO_THREADLOCK_OPENLIST;
4260 - for (stream = _stdio_openlist; stream ; stream = stream->__nextopen) {
4261 - MY_STDIO_THREADLOCK(stream);
4262 - if (!(((stream->__modeflags | bufmask)
4263 - ^ (__FLAG_WRITING|__FLAG_LBF)
4264 - ) & (__FLAG_WRITING|__MASK_BUFMODE))
4265 - ) {
4266 - if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
4267 - __STDIO_STREAM_DISABLE_PUTC(stream);
4268 - __STDIO_STREAM_CLEAR_WRITING(stream);
4269 - } else {
4270 - retval = EOF;
4272 + __STDIO_OPENLIST_INC_USE;
4274 + __STDIO_THREADLOCK_OPENLIST_ADD;
4275 + stream = _stdio_openlist;
4276 + __STDIO_THREADUNLOCK_OPENLIST_ADD;
4278 + while(stream) {
4279 + /* We only care about currently writing streams and do not want to
4280 + * block trying to obtain mutexes on non-writing streams. */
4281 +#warning fix for nonatomic
4282 +#warning unnecessary check if no threads
4283 + if (__STDIO_STREAM_IS_WRITING(stream)) { /* ONLY IF ATOMIC!!! */
4284 + __MY_STDIO_THREADLOCK(stream);
4285 + /* Need to check again once we have the lock. */
4286 + if (!(((stream->__modeflags | bufmask)
4287 + ^ (__FLAG_WRITING|__FLAG_LBF)
4288 + ) & (__FLAG_WRITING|__MASK_BUFMODE))
4289 + ) {
4290 + if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
4291 + __STDIO_STREAM_DISABLE_PUTC(stream);
4292 + __STDIO_STREAM_CLEAR_WRITING(stream);
4293 + } else {
4294 + retval = EOF;
4297 + __MY_STDIO_THREADUNLOCK(stream);
4299 - MY_STDIO_THREADUNLOCK(stream);
4300 + stream = stream->__nextopen;
4302 - __STDIO_THREADUNLOCK_OPENLIST;
4304 + __STDIO_OPENLIST_DEC_USE;
4306 } else if (__STDIO_STREAM_IS_WRITING(stream)) {
4307 if (!__STDIO_COMMIT_WRITE_BUFFER(stream)) {
4308 __STDIO_STREAM_DISABLE_PUTC(stream);
4309 diff --git a/libc/stdio/flockfile.c b/libc/stdio/flockfile.c
4310 index 0dcc7c2..3fad711 100644
4311 --- a/libc/stdio/flockfile.c
4312 +++ b/libc/stdio/flockfile.c
4313 @@ -11,6 +11,6 @@ void flockfile(FILE *stream)
4315 __STDIO_STREAM_VALIDATE(stream);
4317 - __STDIO_ALWAYS_THREADLOCK(stream);
4318 + __STDIO_ALWAYS_THREADLOCK_CANCEL_UNSAFE(stream);
4321 diff --git a/libc/stdio/freopen.c b/libc/stdio/freopen.c
4322 index 0eccaac..36b8488 100644
4323 --- a/libc/stdio/freopen.c
4324 +++ b/libc/stdio/freopen.c
4325 @@ -42,6 +42,8 @@ FILE *freopen(const char * __restrict fi
4327 __STDIO_STREAM_VALIDATE(stream);
4329 + __STDIO_OPENLIST_INC_USE; /* Do not remove the file from the list. */
4331 /* First, flush and close, but don't deallocate, the stream. */
4332 /* This also removes the stream for the open file list. */
4333 dynmode = (stream->__modeflags & (__FLAG_FREEBUF|__FLAG_FREEFILE));
4334 @@ -57,9 +59,16 @@ FILE *freopen(const char * __restrict fi
4336 fp = _stdio_fopen(((intptr_t) filename), mode, stream, FILEDES_ARG);
4338 +#warning if fp is NULL, then we do not free file (but beware stdin,stdout,stderr)
4339 + if (fp) {
4340 + __STDIO_OPENLIST_DEC_DEL_CNT;
4343 /* Reset the allocation flags. */
4344 stream->__modeflags |= dynmode;
4346 + __STDIO_OPENLIST_DEC_USE;
4348 __STDIO_AUTO_THREADUNLOCK(stream);
4350 return fp;
4351 diff --git a/libc/stdio/ftello.c b/libc/stdio/ftello.c
4352 index 7092f34..69385ce 100644
4353 --- a/libc/stdio/ftello.c
4354 +++ b/libc/stdio/ftello.c
4355 @@ -48,7 +48,10 @@ OFFSET_TYPE FTELL(register FILE *stream)
4357 __STDIO_STREAM_VALIDATE(stream);
4359 - if ((__SEEK(stream, &pos, SEEK_CUR) < 0)
4360 + if ((__SEEK(stream, &pos,
4361 + ((__STDIO_STREAM_IS_WRITING(stream)
4362 + && (stream->__modeflags & __FLAG_APPEND))
4363 + ? SEEK_END : SEEK_CUR)) < 0)
4364 || (__stdio_adjust_position(stream, &pos) < 0)) {
4365 pos = -1;
4367 diff --git a/libc/stdio/ftrylockfile.c b/libc/stdio/ftrylockfile.c
4368 index d85b8ff..0d2e156 100644
4369 --- a/libc/stdio/ftrylockfile.c
4370 +++ b/libc/stdio/ftrylockfile.c
4371 @@ -15,5 +15,5 @@ int ftrylockfile(FILE *stream)
4373 __STDIO_STREAM_VALIDATE(stream);
4375 - return __STDIO_ALWAYS_THREADTRYLOCK(stream);
4376 + return __STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(stream);
4378 diff --git a/libc/stdio/funlockfile.c b/libc/stdio/funlockfile.c
4379 index 048c093..2ddf097 100644
4380 --- a/libc/stdio/funlockfile.c
4381 +++ b/libc/stdio/funlockfile.c
4382 @@ -11,5 +11,5 @@ void funlockfile(FILE *stream)
4384 __STDIO_STREAM_VALIDATE(stream);
4386 - __STDIO_ALWAYS_THREADUNLOCK(stream);
4387 + __STDIO_ALWAYS_THREADUNLOCK_CANCEL_UNSAFE(stream);
4389 diff --git a/libc/stdio/popen.c b/libc/stdio/popen.c
4390 index c7887ad..ab8d296 100644
4391 --- a/libc/stdio/popen.c
4392 +++ b/libc/stdio/popen.c
4393 @@ -14,6 +14,7 @@
4394 * Fix failure exit code for failed execve().
4397 +#warning hmm... susv3 says "Pipe streams are byte-oriented."
4399 #include <stdio.h>
4400 #include <stdlib.h>
4401 @@ -21,6 +22,8 @@
4402 #include <unistd.h>
4403 #include <sys/wait.h>
4405 +#include <bits/uClibc_mutex.h>
4407 /* uClinux-2.0 has vfork, but Linux 2.0 doesn't */
4408 #include <sys/syscall.h>
4409 #if ! defined __NR_vfork
4410 @@ -29,19 +32,11 @@
4411 # define VFORK_UNLOCK ((void) 0)
4412 #endif
4414 -#ifdef __UCLIBC_HAS_THREADS__
4415 -#include <pthread.h>
4416 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
4417 -# define LOCK __pthread_mutex_lock(&mylock)
4418 -# define UNLOCK __pthread_mutex_unlock(&mylock);
4419 -#else
4420 -# define LOCK ((void) 0)
4421 -# define UNLOCK ((void) 0)
4422 -#endif
4423 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
4425 #ifndef VFORK_LOCK
4426 -# define VFORK_LOCK LOCK
4427 -# define VFORK_UNLOCK UNLOCK
4428 +# define VFORK_LOCK __UCLIBC_MUTEX_LOCK(mylock)
4429 +# define VFORK_UNLOCK __UCLIBC_MUTEX_UNLOCK(mylock)
4430 #endif
4432 struct popen_list_item {
4433 @@ -118,10 +113,10 @@ FILE *popen(const char *command, const c
4434 if (pid > 0) { /* Parent of vfork... */
4435 pi->pid = pid;
4436 pi->f = fp;
4437 - LOCK;
4438 + __UCLIBC_MUTEX_LOCK(mylock);
4439 pi->next = popen_list;
4440 popen_list = pi;
4441 - UNLOCK;
4442 + __UCLIBC_MUTEX_UNLOCK(mylock);
4444 return fp;
4446 @@ -136,6 +131,8 @@ FILE *popen(const char *command, const c
4447 return NULL;
4450 +#warning is pclose correct wrt the new mutex semantics?
4452 int pclose(FILE *stream)
4454 struct popen_list_item *p;
4455 @@ -144,7 +141,7 @@ int pclose(FILE *stream)
4457 /* First, find the list entry corresponding to stream and remove it
4458 * from the list. Set p to the list item (NULL if not found). */
4459 - LOCK;
4460 + __UCLIBC_MUTEX_LOCK(mylock);
4461 if ((p = popen_list) != NULL) {
4462 if (p->f == stream) {
4463 popen_list = p->next;
4464 @@ -163,7 +160,7 @@ int pclose(FILE *stream)
4465 } while (1);
4468 - UNLOCK;
4469 + __UCLIBC_MUTEX_UNLOCK(mylock);
4471 if (p) {
4472 pid = p->pid; /* Save the pid we need */
4473 diff --git a/libc/stdio/setvbuf.c b/libc/stdio/setvbuf.c
4474 index 3fe62c6..6d53ab1 100644
4475 --- a/libc/stdio/setvbuf.c
4476 +++ b/libc/stdio/setvbuf.c
4477 @@ -75,8 +75,8 @@ int setvbuf(register FILE * __restrict s
4480 stream->__modeflags |= alloc_flag;
4481 - stream->__bufstart = buf;
4482 - stream->__bufend = buf + size;
4483 + stream->__bufstart = (unsigned char *) buf;
4484 + stream->__bufend = (unsigned char *) buf + size;
4485 __STDIO_STREAM_INIT_BUFREAD_BUFPOS(stream);
4486 __STDIO_STREAM_DISABLE_GETC(stream);
4487 __STDIO_STREAM_DISABLE_PUTC(stream);
4488 diff --git a/libc/stdio/vasprintf.c b/libc/stdio/vasprintf.c
4489 index 688ab7c..6d7664d 100644
4490 --- a/libc/stdio/vasprintf.c
4491 +++ b/libc/stdio/vasprintf.c
4492 @@ -63,6 +63,8 @@ int vasprintf(char **__restrict buf, con
4493 free(*buf);
4494 *buf = NULL;
4496 + } else {
4497 + rv = -1;
4501 diff --git a/libc/stdio/vdprintf.c b/libc/stdio/vdprintf.c
4502 index de8362c..7cb707f 100644
4503 --- a/libc/stdio/vdprintf.c
4504 +++ b/libc/stdio/vdprintf.c
4505 @@ -15,8 +15,8 @@ int vdprintf(int filedes, const char * _
4506 #ifdef __STDIO_BUFFERS
4507 char buf[64]; /* TODO: provide _optional_ buffering? */
4509 - f.__bufend = buf + sizeof(buf);
4510 - f.__bufstart = buf;
4511 + f.__bufend = (unsigned char *) buf + sizeof(buf);
4512 + f.__bufstart = (unsigned char *) buf;
4513 __STDIO_STREAM_DISABLE_GETC(&f);
4514 __STDIO_STREAM_DISABLE_PUTC(&f);
4515 __STDIO_STREAM_INIT_BUFREAD_BUFPOS(&f);
4516 diff --git a/libc/stdio/vfprintf.c b/libc/stdio/vfprintf.c
4517 index 10114f0..9214e3b 100644
4518 --- a/libc/stdio/vfprintf.c
4519 +++ b/libc/stdio/vfprintf.c
4520 @@ -569,7 +569,7 @@ int _ppfs_init(register ppfs_t *ppfs, co
4521 ppfs->fmtpos = fmt0; /* rewind */
4524 -#ifdef NL_MAX_ARG
4525 +#ifdef NL_ARGMAX
4526 /* If we have positional args, make sure we know all the types. */
4528 register int *p = ppfs->argtype;
4529 @@ -581,7 +581,7 @@ int _ppfs_init(register ppfs_t *ppfs, co
4530 ++p;
4533 -#endif /* NL_MAX_ARG */
4534 +#endif /* NL_ARGMAX */
4536 return 0;
4538 @@ -1214,7 +1214,7 @@ static size_t _fp_out_narrow(FILE *fp, i
4540 len = buflen;
4542 - return r + OUTNSTR(fp, (const char *) buf, len);
4543 + return r + OUTNSTR(fp, (const unsigned char *) buf, len);
4546 #endif /* __STDIO_PRINTF_FLOAT */
4547 diff --git a/libc/stdlib/abort.c b/libc/stdlib/abort.c
4548 index 77c2cdc..9f69918 100644
4549 --- a/libc/stdlib/abort.c
4550 +++ b/libc/stdlib/abort.c
4551 @@ -70,16 +70,9 @@ extern void _exit __P((int __status)) __
4552 static int been_there_done_that = 0;
4554 /* Be prepared in case multiple threads try to abort() */
4555 -#ifdef __UCLIBC_HAS_THREADS__
4556 -# include <pthread.h>
4557 -static pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
4558 -# define LOCK __pthread_mutex_lock(&mylock)
4559 -# define UNLOCK __pthread_mutex_unlock(&mylock)
4560 -#else
4561 -# define LOCK
4562 -# define UNLOCK
4563 -#endif
4564 +#include <bits/uClibc_mutex.h>
4566 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
4568 /* Cause an abnormal program termination with core-dump */
4569 void abort(void)
4570 @@ -87,7 +80,7 @@ void abort(void)
4571 sigset_t sigset;
4573 /* Make sure we acquire the lock before proceeding */
4574 - LOCK;
4575 + __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(mylock);
4577 /* Unmask SIGABRT to be sure we can get it */
4578 if (__sigemptyset(&sigset) == 0 && __sigaddset(&sigset, SIGABRT) == 0) {
4579 @@ -110,9 +103,9 @@ void abort(void)
4580 #endif
4582 abort_it:
4583 - UNLOCK;
4584 + __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(mylock);
4585 raise(SIGABRT);
4586 - LOCK;
4587 + __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(mylock);
4590 /* Still here? Try to remove any signal handlers */
4591 diff --git a/libc/stdlib/atexit.c b/libc/stdlib/atexit.c
4592 index 280f42c..b028068 100644
4593 --- a/libc/stdlib/atexit.c
4594 +++ b/libc/stdlib/atexit.c
4595 @@ -40,17 +40,9 @@
4596 #include <stdlib.h>
4597 #include <errno.h>
4599 +#include <bits/uClibc_mutex.h>
4601 -#ifdef __UCLIBC_HAS_THREADS__
4602 -#include <pthread.h>
4603 -extern pthread_mutex_t mylock;
4604 -# define LOCK __pthread_mutex_lock(&mylock)
4605 -# define UNLOCK __pthread_mutex_unlock(&mylock);
4606 -#else
4607 -# define LOCK
4608 -# define UNLOCK
4609 -#endif
4611 +__UCLIBC_MUTEX_EXTERN(__atexit_lock);
4613 typedef void (*aefuncp) (void); /* atexit function pointer */
4614 typedef void (*oefuncp) (int, void *); /* on_exit function pointer */
4615 @@ -90,8 +82,9 @@ extern struct exit_function __exit_funct
4616 int atexit(aefuncp func)
4618 struct exit_function *efp;
4619 + int rv = -1;
4621 - LOCK;
4622 + __UCLIBC_MUTEX_LOCK(__atexit_lock);
4623 if (func) {
4624 #ifdef __UCLIBC_DYNAMIC_ATEXIT__
4625 /* If we are out of function table slots, make some more */
4626 @@ -99,18 +92,16 @@ int atexit(aefuncp func)
4627 efp=realloc(__exit_function_table,
4628 (__exit_slots+20)*sizeof(struct exit_function));
4629 if (efp==NULL) {
4630 - UNLOCK;
4631 __set_errno(ENOMEM);
4632 - return -1;
4633 + goto DONE;
4635 __exit_function_table = efp;
4636 __exit_slots+=20;
4638 #else
4639 if (__exit_count >= __UCLIBC_MAX_ATEXIT) {
4640 - UNLOCK;
4641 __set_errno(ENOMEM);
4642 - return -1;
4643 + goto DONE;
4645 #endif
4646 __exit_cleanup = __exit_handler; /* enable cleanup */
4647 @@ -118,8 +109,12 @@ int atexit(aefuncp func)
4648 efp->type = ef_atexit;
4649 efp->funcs.atexit = func;
4651 - UNLOCK;
4652 - return 0;
4654 + rv = 0;
4656 + DONE:
4657 + __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
4658 + return rv;
4660 #endif
4662 @@ -133,8 +128,9 @@ int atexit(aefuncp func)
4663 int on_exit(oefuncp func, void *arg)
4665 struct exit_function *efp;
4666 + int rv = -1;
4668 - LOCK;
4669 + __UCLIBC_MUTEX_LOCK(__atexit_lock);
4670 if (func) {
4671 #ifdef __UCLIBC_DYNAMIC_ATEXIT__
4672 /* If we are out of function table slots, make some more */
4673 @@ -142,18 +138,16 @@ int on_exit(oefuncp func, void *arg)
4674 efp=realloc(__exit_function_table,
4675 (__exit_slots+20)*sizeof(struct exit_function));
4676 if (efp==NULL) {
4677 - UNLOCK;
4678 __set_errno(ENOMEM);
4679 - return -1;
4680 + goto DONE;
4682 __exit_function_table=efp;
4683 __exit_slots+=20;
4685 #else
4686 if (__exit_count >= __UCLIBC_MAX_ATEXIT) {
4687 - UNLOCK;
4688 __set_errno(ENOMEM);
4689 - return -1;
4690 + goto DONE;
4692 #endif
4694 @@ -163,8 +157,12 @@ int on_exit(oefuncp func, void *arg)
4695 efp->funcs.on_exit.func = func;
4696 efp->funcs.on_exit.arg = arg;
4698 - UNLOCK;
4699 - return 0;
4701 + rv = 0;
4703 + DONE:
4704 + __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
4705 + return rv;
4707 #endif
4709 @@ -214,9 +212,8 @@ void __exit_handler(int status)
4710 #ifdef L_exit
4711 extern void weak_function _stdio_term(void);
4712 void (*__exit_cleanup) (int) = 0;
4713 -#ifdef __UCLIBC_HAS_THREADS__
4714 -pthread_mutex_t mylock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
4715 -#endif
4717 +__UCLIBC_MUTEX_INIT(__atexit_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
4719 #ifdef __UCLIBC_CTOR_DTOR__
4720 extern void (*__app_fini)(void);
4721 @@ -229,11 +226,11 @@ extern void (*__rtld_fini)(void);
4722 void exit(int rv)
4724 /* Perform exit-specific cleanup (atexit and on_exit) */
4725 - LOCK;
4726 + __UCLIBC_MUTEX_LOCK(__atexit_lock);
4727 if (__exit_cleanup) {
4728 __exit_cleanup(rv);
4730 - UNLOCK;
4731 + __UCLIBC_MUTEX_UNLOCK(__atexit_lock);
4733 #ifdef __UCLIBC_CTOR_DTOR__
4734 if (__app_fini != NULL)
4735 diff --git a/libc/stdlib/malloc-simple/alloc.c b/libc/stdlib/malloc-simple/alloc.c
4736 index ed14c37..519a875 100644
4737 --- a/libc/stdlib/malloc-simple/alloc.c
4738 +++ b/libc/stdlib/malloc-simple/alloc.c
4739 @@ -108,15 +108,14 @@ void free(void *ptr)
4740 #endif
4742 #ifdef L_memalign
4743 -#ifdef __UCLIBC_HAS_THREADS__
4744 -#include <pthread.h>
4745 -pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
4746 -# define LOCK __pthread_mutex_lock(&__malloc_lock)
4747 -# define UNLOCK __pthread_mutex_unlock(&__malloc_lock);
4748 -#else
4749 -# define LOCK
4750 -# define UNLOCK
4751 -#endif
4753 +#include <bits/uClibc_mutex.h>
4755 +__UCLIBC_MUTEX_EXTERN(__malloc_lock);
4757 +#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
4758 +#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
4761 /* List of blocks allocated with memalign or valloc */
4762 struct alignlist
4763 @@ -135,7 +134,7 @@ int __libc_free_aligned(void *ptr)
4764 if (ptr == NULL)
4765 return 0;
4767 - LOCK;
4768 + __MALLOC_LOCK;
4769 for (l = _aligned_blocks; l != NULL; l = l->next) {
4770 if (l->aligned == ptr) {
4771 /* Mark the block as free */
4772 @@ -146,7 +145,7 @@ int __libc_free_aligned(void *ptr)
4773 return 1;
4776 - UNLOCK;
4777 + __MALLOC_UNLOCK;
4778 return 0;
4780 void * memalign (size_t alignment, size_t size)
4781 @@ -159,10 +158,10 @@ void * memalign (size_t alignment, size_
4782 return NULL;
4784 adj = (unsigned long int) ((unsigned long int) ((char *) result -
4785 - (char *) NULL)) % alignment;
4786 + (char *) NULL)) % alignment;
4787 if (adj != 0) {
4788 struct alignlist *l;
4789 - LOCK;
4790 + __MALLOC_LOCK;
4791 for (l = _aligned_blocks; l != NULL; l = l->next)
4792 if (l->aligned == NULL)
4793 /* This slot is free. Use it. */
4794 @@ -171,15 +170,16 @@ void * memalign (size_t alignment, size_
4795 l = (struct alignlist *) malloc (sizeof (struct alignlist));
4796 if (l == NULL) {
4797 free(result);
4798 - UNLOCK;
4799 - return NULL;
4800 + result = NULL;
4801 + goto DONE;
4803 l->next = _aligned_blocks;
4804 _aligned_blocks = l;
4806 l->exact = result;
4807 result = l->aligned = (char *) result + alignment - adj;
4808 - UNLOCK;
4809 + DONE:
4810 + __MALLOC_UNLOCK;
4813 return result;
4814 diff --git a/libc/stdlib/malloc-standard/calloc.c b/libc/stdlib/malloc-standard/calloc.c
4815 index a67dad7..4277954 100644
4816 --- a/libc/stdlib/malloc-standard/calloc.c
4817 +++ b/libc/stdlib/malloc-standard/calloc.c
4818 @@ -8,7 +8,7 @@
4819 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4821 Note: There may be an updated version of this malloc obtainable at
4822 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
4823 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
4824 Check before installing!
4826 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
4827 @@ -31,63 +31,63 @@ void* calloc(size_t n_elements, size_t e
4828 * to fall through and call malloc(0) */
4829 size = n_elements * elem_size;
4830 if (n_elements && elem_size != (size / n_elements)) {
4831 - __set_errno(ENOMEM);
4832 - return NULL;
4833 + __set_errno(ENOMEM);
4834 + return NULL;
4837 - LOCK;
4838 + __MALLOC_LOCK;
4839 mem = malloc(size);
4840 if (mem != 0) {
4841 - p = mem2chunk(mem);
4842 + p = mem2chunk(mem);
4844 - if (!chunk_is_mmapped(p))
4846 - /*
4847 - Unroll clear of <= 36 bytes (72 if 8byte sizes)
4848 - We know that contents have an odd number of
4849 - size_t-sized words; minimally 3.
4850 - */
4852 - d = (size_t*)mem;
4853 - clearsize = chunksize(p) - (sizeof(size_t));
4854 - nclears = clearsize / sizeof(size_t);
4855 - assert(nclears >= 3);
4857 - if (nclears > 9)
4858 - memset(d, 0, clearsize);
4860 - else {
4861 - *(d+0) = 0;
4862 - *(d+1) = 0;
4863 - *(d+2) = 0;
4864 - if (nclears > 4) {
4865 - *(d+3) = 0;
4866 - *(d+4) = 0;
4867 - if (nclears > 6) {
4868 - *(d+5) = 0;
4869 - *(d+6) = 0;
4870 - if (nclears > 8) {
4871 - *(d+7) = 0;
4872 - *(d+8) = 0;
4873 + if (!chunk_is_mmapped(p))
4875 + /*
4876 + Unroll clear of <= 36 bytes (72 if 8byte sizes)
4877 + We know that contents have an odd number of
4878 + size_t-sized words; minimally 3.
4879 + */
4881 + d = (size_t*)mem;
4882 + clearsize = chunksize(p) - (sizeof(size_t));
4883 + nclears = clearsize / sizeof(size_t);
4884 + assert(nclears >= 3);
4886 + if (nclears > 9)
4887 + memset(d, 0, clearsize);
4889 + else {
4890 + *(d+0) = 0;
4891 + *(d+1) = 0;
4892 + *(d+2) = 0;
4893 + if (nclears > 4) {
4894 + *(d+3) = 0;
4895 + *(d+4) = 0;
4896 + if (nclears > 6) {
4897 + *(d+5) = 0;
4898 + *(d+6) = 0;
4899 + if (nclears > 8) {
4900 + *(d+7) = 0;
4901 + *(d+8) = 0;
4911 #if 0
4912 - else
4914 - /* Standard unix mmap using /dev/zero clears memory so calloc
4915 - * doesn't need to actually zero anything....
4916 - */
4917 - d = (size_t*)mem;
4918 - /* Note the additional (sizeof(size_t)) */
4919 - clearsize = chunksize(p) - 2*(sizeof(size_t));
4920 - memset(d, 0, clearsize);
4922 + else
4924 + /* Standard unix mmap using /dev/zero clears memory so calloc
4925 + * doesn't need to actually zero anything....
4926 + */
4927 + d = (size_t*)mem;
4928 + /* Note the additional (sizeof(size_t)) */
4929 + clearsize = chunksize(p) - 2*(sizeof(size_t));
4930 + memset(d, 0, clearsize);
4932 #endif
4934 - UNLOCK;
4935 + __MALLOC_UNLOCK;
4936 return mem;
4939 diff --git a/libc/stdlib/malloc-standard/free.c b/libc/stdlib/malloc-standard/free.c
4940 index 94e1d65..4e08ef7 100644
4941 --- a/libc/stdlib/malloc-standard/free.c
4942 +++ b/libc/stdlib/malloc-standard/free.c
4943 @@ -8,7 +8,7 @@
4944 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
4946 Note: There may be an updated version of this malloc obtainable at
4947 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
4948 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
4949 Check before installing!
4951 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
4952 @@ -42,71 +42,71 @@ static int __malloc_trim(size_t pad, mst
4954 if (extra > 0) {
4956 - /*
4957 - Only proceed if end of memory is where we last set it.
4958 - This avoids problems if there were foreign sbrk calls.
4959 - */
4960 - current_brk = (char*)(MORECORE(0));
4961 - if (current_brk == (char*)(av->top) + top_size) {
4963 - /*
4964 - Attempt to release memory. We ignore MORECORE return value,
4965 - and instead call again to find out where new end of memory is.
4966 - This avoids problems if first call releases less than we asked,
4967 - of if failure somehow altered brk value. (We could still
4968 - encounter problems if it altered brk in some very bad way,
4969 - but the only thing we can do is adjust anyway, which will cause
4970 - some downstream failure.)
4971 - */
4973 - MORECORE(-extra);
4974 - new_brk = (char*)(MORECORE(0));
4976 - if (new_brk != (char*)MORECORE_FAILURE) {
4977 - released = (long)(current_brk - new_brk);
4979 - if (released != 0) {
4980 - /* Success. Adjust top. */
4981 - av->sbrked_mem -= released;
4982 - set_head(av->top, (top_size - released) | PREV_INUSE);
4983 - check_malloc_state();
4984 - return 1;
4985 + /*
4986 + Only proceed if end of memory is where we last set it.
4987 + This avoids problems if there were foreign sbrk calls.
4988 + */
4989 + current_brk = (char*)(MORECORE(0));
4990 + if (current_brk == (char*)(av->top) + top_size) {
4992 + /*
4993 + Attempt to release memory. We ignore MORECORE return value,
4994 + and instead call again to find out where new end of memory is.
4995 + This avoids problems if first call releases less than we asked,
4996 + of if failure somehow altered brk value. (We could still
4997 + encounter problems if it altered brk in some very bad way,
4998 + but the only thing we can do is adjust anyway, which will cause
4999 + some downstream failure.)
5000 + */
5002 + MORECORE(-extra);
5003 + new_brk = (char*)(MORECORE(0));
5005 + if (new_brk != (char*)MORECORE_FAILURE) {
5006 + released = (long)(current_brk - new_brk);
5008 + if (released != 0) {
5009 + /* Success. Adjust top. */
5010 + av->sbrked_mem -= released;
5011 + set_head(av->top, (top_size - released) | PREV_INUSE);
5012 + check_malloc_state();
5013 + return 1;
5020 return 0;
5023 /* ------------------------- malloc_trim -------------------------
5024 - malloc_trim(size_t pad);
5025 + malloc_trim(size_t pad);
5027 - If possible, gives memory back to the system (via negative
5028 - arguments to sbrk) if there is unused memory at the `high' end of
5029 - the malloc pool. You can call this after freeing large blocks of
5030 - memory to potentially reduce the system-level memory requirements
5031 - of a program. However, it cannot guarantee to reduce memory. Under
5032 - some allocation patterns, some large free blocks of memory will be
5033 - locked between two used chunks, so they cannot be given back to
5034 - the system.
5036 - The `pad' argument to malloc_trim represents the amount of free
5037 - trailing space to leave untrimmed. If this argument is zero,
5038 - only the minimum amount of memory to maintain internal data
5039 - structures will be left (one page or less). Non-zero arguments
5040 - can be supplied to maintain enough trailing space to service
5041 - future expected allocations without having to re-obtain memory
5042 - from the system.
5044 - Malloc_trim returns 1 if it actually released any memory, else 0.
5045 - On systems that do not support "negative sbrks", it will always
5046 - return 0.
5047 + If possible, gives memory back to the system (via negative
5048 + arguments to sbrk) if there is unused memory at the `high' end of
5049 + the malloc pool. You can call this after freeing large blocks of
5050 + memory to potentially reduce the system-level memory requirements
5051 + of a program. However, it cannot guarantee to reduce memory. Under
5052 + some allocation patterns, some large free blocks of memory will be
5053 + locked between two used chunks, so they cannot be given back to
5054 + the system.
5056 + The `pad' argument to malloc_trim represents the amount of free
5057 + trailing space to leave untrimmed. If this argument is zero,
5058 + only the minimum amount of memory to maintain internal data
5059 + structures will be left (one page or less). Non-zero arguments
5060 + can be supplied to maintain enough trailing space to service
5061 + future expected allocations without having to re-obtain memory
5062 + from the system.
5064 + Malloc_trim returns 1 if it actually released any memory, else 0.
5065 + On systems that do not support "negative sbrks", it will always
5066 + return 0.
5068 int malloc_trim(size_t pad)
5070 - mstate av = get_malloc_state();
5071 - __malloc_consolidate(av);
5072 - return __malloc_trim(pad, av);
5073 + mstate av = get_malloc_state();
5074 + __malloc_consolidate(av);
5075 + return __malloc_trim(pad, av);
5079 @@ -125,8 +125,8 @@ static void malloc_init_state(mstate av)
5081 /* Establish circular links for normal bins */
5082 for (i = 1; i < NBINS; ++i) {
5083 - bin = bin_at(av,i);
5084 - bin->fd = bin->bk = bin;
5085 + bin = bin_at(av,i);
5086 + bin->fd = bin->bk = bin;
5089 av->top_pad = DEFAULT_TOP_PAD;
5090 @@ -157,15 +157,15 @@ static void malloc_init_state(mstate av)
5092 /* ------------------------- __malloc_consolidate -------------------------
5094 - __malloc_consolidate is a specialized version of free() that tears
5095 - down chunks held in fastbins. Free itself cannot be used for this
5096 - purpose since, among other things, it might place chunks back onto
5097 - fastbins. So, instead, we need to use a minor variant of the same
5098 - code.
5100 - Also, because this routine needs to be called the first time through
5101 - malloc anyway, it turns out to be the perfect place to trigger
5102 - initialization code.
5103 +__malloc_consolidate is a specialized version of free() that tears
5104 +down chunks held in fastbins. Free itself cannot be used for this
5105 +purpose since, among other things, it might place chunks back onto
5106 +fastbins. So, instead, we need to use a minor variant of the same
5107 +code.
5109 +Also, because this routine needs to be called the first time through
5110 +malloc anyway, it turns out to be the perfect place to trigger
5111 +initialization code.
5113 void __malloc_consolidate(mstate av)
5115 @@ -186,78 +186,78 @@ void __malloc_consolidate(mstate av)
5116 mchunkptr fwd;
5119 - If max_fast is 0, we know that av hasn't
5120 - yet been initialized, in which case do so below
5121 - */
5122 + If max_fast is 0, we know that av hasn't
5123 + yet been initialized, in which case do so below
5124 + */
5126 if (av->max_fast != 0) {
5127 - clear_fastchunks(av);
5128 + clear_fastchunks(av);
5130 - unsorted_bin = unsorted_chunks(av);
5131 + unsorted_bin = unsorted_chunks(av);
5133 - /*
5134 - Remove each chunk from fast bin and consolidate it, placing it
5135 - then in unsorted bin. Among other reasons for doing this,
5136 - placing in unsorted bin avoids needing to calculate actual bins
5137 - until malloc is sure that chunks aren't immediately going to be
5138 - reused anyway.
5139 - */
5141 - maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
5142 - fb = &(av->fastbins[0]);
5143 - do {
5144 - if ( (p = *fb) != 0) {
5145 - *fb = 0;
5146 + /*
5147 + Remove each chunk from fast bin and consolidate it, placing it
5148 + then in unsorted bin. Among other reasons for doing this,
5149 + placing in unsorted bin avoids needing to calculate actual bins
5150 + until malloc is sure that chunks aren't immediately going to be
5151 + reused anyway.
5152 + */
5154 + maxfb = &(av->fastbins[fastbin_index(av->max_fast)]);
5155 + fb = &(av->fastbins[0]);
5156 do {
5157 - check_inuse_chunk(p);
5158 - nextp = p->fd;
5159 + if ( (p = *fb) != 0) {
5160 + *fb = 0;
5162 - /* Slightly streamlined version of consolidation code in free() */
5163 - size = p->size & ~PREV_INUSE;
5164 - nextchunk = chunk_at_offset(p, size);
5165 - nextsize = chunksize(nextchunk);
5166 + do {
5167 + check_inuse_chunk(p);
5168 + nextp = p->fd;
5170 + /* Slightly streamlined version of consolidation code in free() */
5171 + size = p->size & ~PREV_INUSE;
5172 + nextchunk = chunk_at_offset(p, size);
5173 + nextsize = chunksize(nextchunk);
5175 + if (!prev_inuse(p)) {
5176 + prevsize = p->prev_size;
5177 + size += prevsize;
5178 + p = chunk_at_offset(p, -((long) prevsize));
5179 + unlink(p, bck, fwd);
5182 + if (nextchunk != av->top) {
5183 + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
5184 + set_head(nextchunk, nextsize);
5186 + if (!nextinuse) {
5187 + size += nextsize;
5188 + unlink(nextchunk, bck, fwd);
5191 + first_unsorted = unsorted_bin->fd;
5192 + unsorted_bin->fd = p;
5193 + first_unsorted->bk = p;
5195 + set_head(p, size | PREV_INUSE);
5196 + p->bk = unsorted_bin;
5197 + p->fd = first_unsorted;
5198 + set_foot(p, size);
5201 + else {
5202 + size += nextsize;
5203 + set_head(p, size | PREV_INUSE);
5204 + av->top = p;
5207 - if (!prev_inuse(p)) {
5208 - prevsize = p->prev_size;
5209 - size += prevsize;
5210 - p = chunk_at_offset(p, -((long) prevsize));
5211 - unlink(p, bck, fwd);
5213 + } while ( (p = nextp) != 0);
5215 - if (nextchunk != av->top) {
5216 - nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
5217 - set_head(nextchunk, nextsize);
5219 - if (!nextinuse) {
5220 - size += nextsize;
5221 - unlink(nextchunk, bck, fwd);
5224 - first_unsorted = unsorted_bin->fd;
5225 - unsorted_bin->fd = p;
5226 - first_unsorted->bk = p;
5228 - set_head(p, size | PREV_INUSE);
5229 - p->bk = unsorted_bin;
5230 - p->fd = first_unsorted;
5231 - set_foot(p, size);
5234 - else {
5235 - size += nextsize;
5236 - set_head(p, size | PREV_INUSE);
5237 - av->top = p;
5240 - } while ( (p = nextp) != 0);
5243 - } while (fb++ != maxfb);
5244 + } while (fb++ != maxfb);
5246 else {
5247 - malloc_init_state(av);
5248 - check_malloc_state();
5249 + malloc_init_state(av);
5250 + check_malloc_state();
5254 @@ -279,9 +279,9 @@ void free(void* mem)
5256 /* free(0) has no effect */
5257 if (mem == NULL)
5258 - return;
5259 + return;
5261 - LOCK;
5262 + __MALLOC_LOCK;
5263 av = get_malloc_state();
5264 p = mem2chunk(mem);
5265 size = chunksize(p);
5266 @@ -289,9 +289,9 @@ void free(void* mem)
5267 check_inuse_chunk(p);
5270 - If eligible, place chunk on a fastbin so it can be found
5271 - and used quickly in malloc.
5272 - */
5273 + If eligible, place chunk on a fastbin so it can be found
5274 + and used quickly in malloc.
5275 + */
5277 if ((unsigned long)(size) <= (unsigned long)(av->max_fast)
5279 @@ -300,114 +300,114 @@ void free(void* mem)
5280 bordering top into fastbins */
5281 && (chunk_at_offset(p, size) != av->top)
5282 #endif
5283 - ) {
5284 + ) {
5286 - set_fastchunks(av);
5287 - fb = &(av->fastbins[fastbin_index(size)]);
5288 - p->fd = *fb;
5289 - *fb = p;
5290 + set_fastchunks(av);
5291 + fb = &(av->fastbins[fastbin_index(size)]);
5292 + p->fd = *fb;
5293 + *fb = p;
5297 - Consolidate other non-mmapped chunks as they arrive.
5298 - */
5299 + Consolidate other non-mmapped chunks as they arrive.
5300 + */
5302 else if (!chunk_is_mmapped(p)) {
5303 - set_anychunks(av);
5304 + set_anychunks(av);
5306 + nextchunk = chunk_at_offset(p, size);
5307 + nextsize = chunksize(nextchunk);
5309 + /* consolidate backward */
5310 + if (!prev_inuse(p)) {
5311 + prevsize = p->prev_size;
5312 + size += prevsize;
5313 + p = chunk_at_offset(p, -((long) prevsize));
5314 + unlink(p, bck, fwd);
5317 + if (nextchunk != av->top) {
5318 + /* get and clear inuse bit */
5319 + nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
5320 + set_head(nextchunk, nextsize);
5322 + /* consolidate forward */
5323 + if (!nextinuse) {
5324 + unlink(nextchunk, bck, fwd);
5325 + size += nextsize;
5328 + /*
5329 + Place the chunk in unsorted chunk list. Chunks are
5330 + not placed into regular bins until after they have
5331 + been given one chance to be used in malloc.
5332 + */
5334 + bck = unsorted_chunks(av);
5335 + fwd = bck->fd;
5336 + p->bk = bck;
5337 + p->fd = fwd;
5338 + bck->fd = p;
5339 + fwd->bk = p;
5341 - nextchunk = chunk_at_offset(p, size);
5342 - nextsize = chunksize(nextchunk);
5343 + set_head(p, size | PREV_INUSE);
5344 + set_foot(p, size);
5346 + check_free_chunk(p);
5349 + /*
5350 + If the chunk borders the current high end of memory,
5351 + consolidate into top
5352 + */
5354 - /* consolidate backward */
5355 - if (!prev_inuse(p)) {
5356 - prevsize = p->prev_size;
5357 - size += prevsize;
5358 - p = chunk_at_offset(p, -((long) prevsize));
5359 - unlink(p, bck, fwd);
5362 - if (nextchunk != av->top) {
5363 - /* get and clear inuse bit */
5364 - nextinuse = inuse_bit_at_offset(nextchunk, nextsize);
5365 - set_head(nextchunk, nextsize);
5367 - /* consolidate forward */
5368 - if (!nextinuse) {
5369 - unlink(nextchunk, bck, fwd);
5370 - size += nextsize;
5373 - /*
5374 - Place the chunk in unsorted chunk list. Chunks are
5375 - not placed into regular bins until after they have
5376 - been given one chance to be used in malloc.
5377 - */
5379 - bck = unsorted_chunks(av);
5380 - fwd = bck->fd;
5381 - p->bk = bck;
5382 - p->fd = fwd;
5383 - bck->fd = p;
5384 - fwd->bk = p;
5386 - set_head(p, size | PREV_INUSE);
5387 - set_foot(p, size);
5389 - check_free_chunk(p);
5392 - /*
5393 - If the chunk borders the current high end of memory,
5394 - consolidate into top
5395 - */
5397 - else {
5398 - size += nextsize;
5399 - set_head(p, size | PREV_INUSE);
5400 - av->top = p;
5401 - check_chunk(p);
5404 - /*
5405 - If freeing a large space, consolidate possibly-surrounding
5406 - chunks. Then, if the total unused topmost memory exceeds trim
5407 - threshold, ask malloc_trim to reduce top.
5409 - Unless max_fast is 0, we don't know if there are fastbins
5410 - bordering top, so we cannot tell for sure whether threshold
5411 - has been reached unless fastbins are consolidated. But we
5412 - don't want to consolidate on each free. As a compromise,
5413 - consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
5414 - is reached.
5415 - */
5417 - if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
5418 - if (have_fastchunks(av))
5419 - __malloc_consolidate(av);
5421 - if ((unsigned long)(chunksize(av->top)) >=
5422 - (unsigned long)(av->trim_threshold))
5423 - __malloc_trim(av->top_pad, av);
5425 + else {
5426 + size += nextsize;
5427 + set_head(p, size | PREV_INUSE);
5428 + av->top = p;
5429 + check_chunk(p);
5432 + /*
5433 + If freeing a large space, consolidate possibly-surrounding
5434 + chunks. Then, if the total unused topmost memory exceeds trim
5435 + threshold, ask malloc_trim to reduce top.
5437 + Unless max_fast is 0, we don't know if there are fastbins
5438 + bordering top, so we cannot tell for sure whether threshold
5439 + has been reached unless fastbins are consolidated. But we
5440 + don't want to consolidate on each free. As a compromise,
5441 + consolidation is performed if FASTBIN_CONSOLIDATION_THRESHOLD
5442 + is reached.
5443 + */
5445 + if ((unsigned long)(size) >= FASTBIN_CONSOLIDATION_THRESHOLD) {
5446 + if (have_fastchunks(av))
5447 + __malloc_consolidate(av);
5449 + if ((unsigned long)(chunksize(av->top)) >=
5450 + (unsigned long)(av->trim_threshold))
5451 + __malloc_trim(av->top_pad, av);
5456 - If the chunk was allocated via mmap, release via munmap()
5457 - Note that if HAVE_MMAP is false but chunk_is_mmapped is
5458 - true, then user must have overwritten memory. There's nothing
5459 - we can do to catch this error unless DEBUG is set, in which case
5460 - check_inuse_chunk (above) will have triggered error.
5461 - */
5462 + If the chunk was allocated via mmap, release via munmap()
5463 + Note that if HAVE_MMAP is false but chunk_is_mmapped is
5464 + true, then user must have overwritten memory. There's nothing
5465 + we can do to catch this error unless DEBUG is set, in which case
5466 + check_inuse_chunk (above) will have triggered error.
5467 + */
5469 else {
5470 - int ret;
5471 - size_t offset = p->prev_size;
5472 - av->n_mmaps--;
5473 - av->mmapped_mem -= (size + offset);
5474 - ret = munmap((char*)p - offset, size + offset);
5475 - /* munmap returns non-zero on failure */
5476 - assert(ret == 0);
5477 + int ret;
5478 + size_t offset = p->prev_size;
5479 + av->n_mmaps--;
5480 + av->mmapped_mem -= (size + offset);
5481 + ret = munmap((char*)p - offset, size + offset);
5482 + /* munmap returns non-zero on failure */
5483 + assert(ret == 0);
5485 - UNLOCK;
5486 + __MALLOC_UNLOCK;
5489 diff --git a/libc/stdlib/malloc-standard/mallinfo.c b/libc/stdlib/malloc-standard/mallinfo.c
5490 index 51ac423..1e0875c 100644
5491 --- a/libc/stdlib/malloc-standard/mallinfo.c
5492 +++ b/libc/stdlib/malloc-standard/mallinfo.c
5493 @@ -8,7 +8,7 @@
5494 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5496 Note: There may be an updated version of this malloc obtainable at
5497 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
5498 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
5499 Check before installing!
5501 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
5502 @@ -30,11 +30,11 @@ struct mallinfo mallinfo(void)
5503 int nblocks;
5504 int nfastblocks;
5506 - LOCK;
5507 + __MALLOC_LOCK;
5508 av = get_malloc_state();
5509 /* Ensure initialization */
5510 if (av->top == 0) {
5511 - __malloc_consolidate(av);
5512 + __malloc_consolidate(av);
5515 check_malloc_state();
5516 @@ -48,21 +48,21 @@ struct mallinfo mallinfo(void)
5517 fastavail = 0;
5519 for (i = 0; i < NFASTBINS; ++i) {
5520 - for (p = av->fastbins[i]; p != 0; p = p->fd) {
5521 - ++nfastblocks;
5522 - fastavail += chunksize(p);
5524 + for (p = av->fastbins[i]; p != 0; p = p->fd) {
5525 + ++nfastblocks;
5526 + fastavail += chunksize(p);
5530 avail += fastavail;
5532 /* traverse regular bins */
5533 for (i = 1; i < NBINS; ++i) {
5534 - b = bin_at(av, i);
5535 - for (p = last(b); p != b; p = p->bk) {
5536 - ++nblocks;
5537 - avail += chunksize(p);
5539 + b = bin_at(av, i);
5540 + for (p = last(b); p != b; p = p->bk) {
5541 + ++nblocks;
5542 + avail += chunksize(p);
5546 mi.smblks = nfastblocks;
5547 @@ -75,7 +75,7 @@ struct mallinfo mallinfo(void)
5548 mi.fsmblks = fastavail;
5549 mi.keepcost = chunksize(av->top);
5550 mi.usmblks = av->max_total_mem;
5551 - UNLOCK;
5552 + __MALLOC_UNLOCK;
5553 return mi;
5556 @@ -84,23 +84,40 @@ void malloc_stats(FILE *file)
5557 struct mallinfo mi;
5559 if (file==NULL) {
5560 - file = stderr;
5561 + file = stderr;
5564 mi = mallinfo();
5565 - fprintf(file, "total bytes allocated = %10u\n", (unsigned int)(mi.arena + mi.hblkhd));
5566 - fprintf(file, "total bytes in use bytes = %10u\n", (unsigned int)(mi.uordblks + mi.hblkhd));
5567 - fprintf(file, "total non-mmapped bytes allocated = %10d\n", mi.arena);
5568 - fprintf(file, "number of mmapped regions = %10d\n", mi.hblks);
5569 - fprintf(file, "total allocated mmap space = %10d\n", mi.hblkhd);
5570 - fprintf(file, "total allocated sbrk space = %10d\n", mi.uordblks);
5571 + fprintf(file,
5572 + "total bytes allocated = %10u\n"
5573 + "total bytes in use bytes = %10u\n"
5574 + "total non-mmapped bytes allocated = %10d\n"
5575 + "number of mmapped regions = %10d\n"
5576 + "total allocated mmap space = %10d\n"
5577 + "total allocated sbrk space = %10d\n"
5578 #if 0
5579 - fprintf(file, "number of free chunks = %10d\n", mi.ordblks);
5580 - fprintf(file, "number of fastbin blocks = %10d\n", mi.smblks);
5581 - fprintf(file, "space in freed fastbin blocks = %10d\n", mi.fsmblks);
5582 + "number of free chunks = %10d\n"
5583 + "number of fastbin blocks = %10d\n"
5584 + "space in freed fastbin blocks = %10d\n"
5585 #endif
5586 - fprintf(file, "maximum total allocated space = %10d\n", mi.usmblks);
5587 - fprintf(file, "total free space = %10d\n", mi.fordblks);
5588 - fprintf(file, "memory releasable via malloc_trim = %10d\n", mi.keepcost);
5589 + "maximum total allocated space = %10d\n"
5590 + "total free space = %10d\n"
5591 + "memory releasable via malloc_trim = %10d\n",
5593 + (unsigned int)(mi.arena + mi.hblkhd),
5594 + (unsigned int)(mi.uordblks + mi.hblkhd),
5595 + mi.arena,
5596 + mi.hblks,
5597 + mi.hblkhd,
5598 + mi.uordblks,
5599 +#if 0
5600 + mi.ordblks,
5601 + mi.smblks,
5602 + mi.fsmblks,
5603 +#endif
5604 + mi.usmblks,
5605 + mi.fordblks,
5606 + mi.keepcost
5607 + );
5610 diff --git a/libc/stdlib/malloc-standard/malloc.c b/libc/stdlib/malloc-standard/malloc.c
5611 index 7025e83..60494a0 100644
5612 --- a/libc/stdlib/malloc-standard/malloc.c
5613 +++ b/libc/stdlib/malloc-standard/malloc.c
5614 @@ -8,7 +8,7 @@
5615 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
5617 Note: There may be an updated version of this malloc obtainable at
5618 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
5619 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
5620 Check before installing!
5622 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
5623 @@ -17,17 +17,14 @@
5624 #define _GNU_SOURCE
5625 #include "malloc.h"
5628 -#ifdef __UCLIBC_HAS_THREADS__
5629 -pthread_mutex_t __malloc_lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
5630 -#endif
5631 +__UCLIBC_MUTEX_INIT(__malloc_lock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
5634 - There is exactly one instance of this struct in this malloc.
5635 - If you are adapting this malloc in a way that does NOT use a static
5636 - malloc_state, you MUST explicitly zero-fill it before using. This
5637 - malloc relies on the property that malloc_state is initialized to
5638 - all zeroes (as is true of C statics).
5639 + There is exactly one instance of this struct in this malloc.
5640 + If you are adapting this malloc in a way that does NOT use a static
5641 + malloc_state, you MUST explicitly zero-fill it before using. This
5642 + malloc relies on the property that malloc_state is initialized to
5643 + all zeroes (as is true of C statics).
5645 struct malloc_state __malloc_state; /* never directly referenced */
5647 @@ -77,30 +74,30 @@ void __do_check_chunk(mchunkptr p)
5649 if (!chunk_is_mmapped(p)) {
5651 - /* Has legal address ... */
5652 - if (p != av->top) {
5653 - if (contiguous(av)) {
5654 - assert(((char*)p) >= min_address);
5655 - assert(((char*)p + sz) <= ((char*)(av->top)));
5658 - else {
5659 - /* top size is always at least MINSIZE */
5660 - assert((unsigned long)(sz) >= MINSIZE);
5661 - /* top predecessor always marked inuse */
5662 - assert(prev_inuse(p));
5664 + /* Has legal address ... */
5665 + if (p != av->top) {
5666 + if (contiguous(av)) {
5667 + assert(((char*)p) >= min_address);
5668 + assert(((char*)p + sz) <= ((char*)(av->top)));
5671 + else {
5672 + /* top size is always at least MINSIZE */
5673 + assert((unsigned long)(sz) >= MINSIZE);
5674 + /* top predecessor always marked inuse */
5675 + assert(prev_inuse(p));
5679 else {
5680 - /* address is outside main heap */
5681 - if (contiguous(av) && av->top != initial_top(av)) {
5682 - assert(((char*)p) < min_address || ((char*)p) > max_address);
5684 - /* chunk is page-aligned */
5685 - assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
5686 - /* mem is aligned */
5687 - assert(aligned_OK(chunk2mem(p)));
5688 + /* address is outside main heap */
5689 + if (contiguous(av) && av->top != initial_top(av)) {
5690 + assert(((char*)p) < min_address || ((char*)p) > max_address);
5692 + /* chunk is page-aligned */
5693 + assert(((p->prev_size + sz) & (av->pagesize-1)) == 0);
5694 + /* mem is aligned */
5695 + assert(aligned_OK(chunk2mem(p)));
5699 @@ -121,21 +118,21 @@ void __do_check_free_chunk(mchunkptr p)
5701 /* Unless a special marker, must have OK fields */
5702 if ((unsigned long)(sz) >= MINSIZE)
5704 - assert((sz & MALLOC_ALIGN_MASK) == 0);
5705 - assert(aligned_OK(chunk2mem(p)));
5706 - /* ... matching footer field */
5707 - assert(next->prev_size == sz);
5708 - /* ... and is fully consolidated */
5709 - assert(prev_inuse(p));
5710 - assert (next == av->top || inuse(next));
5712 - /* ... and has minimally sane links */
5713 - assert(p->fd->bk == p);
5714 - assert(p->bk->fd == p);
5717 + assert((sz & MALLOC_ALIGN_MASK) == 0);
5718 + assert(aligned_OK(chunk2mem(p)));
5719 + /* ... matching footer field */
5720 + assert(next->prev_size == sz);
5721 + /* ... and is fully consolidated */
5722 + assert(prev_inuse(p));
5723 + assert (next == av->top || inuse(next));
5725 + /* ... and has minimally sane links */
5726 + assert(p->fd->bk == p);
5727 + assert(p->bk->fd == p);
5729 else /* markers are always of size (sizeof(size_t)) */
5730 - assert(sz == (sizeof(size_t)));
5731 + assert(sz == (sizeof(size_t)));
5734 /* Properties of inuse chunks */
5735 @@ -146,7 +143,7 @@ void __do_check_inuse_chunk(mchunkptr p)
5736 __do_check_chunk(p);
5738 if (chunk_is_mmapped(p))
5739 - return; /* mmapped chunks have no next/prev */
5740 + return; /* mmapped chunks have no next/prev */
5742 /* Check whether it claims to be in use ... */
5743 assert(inuse(p));
5744 @@ -156,20 +153,20 @@ void __do_check_inuse_chunk(mchunkptr p)
5745 /* ... and is surrounded by OK chunks.
5746 Since more things can be checked with free chunks than inuse ones,
5747 if an inuse chunk borders them and debug is on, it's worth doing them.
5748 - */
5749 + */
5750 if (!prev_inuse(p)) {
5751 - /* Note that we cannot even look at prev unless it is not inuse */
5752 - mchunkptr prv = prev_chunk(p);
5753 - assert(next_chunk(prv) == p);
5754 - __do_check_free_chunk(prv);
5755 + /* Note that we cannot even look at prev unless it is not inuse */
5756 + mchunkptr prv = prev_chunk(p);
5757 + assert(next_chunk(prv) == p);
5758 + __do_check_free_chunk(prv);
5761 if (next == av->top) {
5762 - assert(prev_inuse(next));
5763 - assert(chunksize(next) >= MINSIZE);
5764 + assert(prev_inuse(next));
5765 + assert(chunksize(next) >= MINSIZE);
5767 else if (!inuse(next))
5768 - __do_check_free_chunk(next);
5769 + __do_check_free_chunk(next);
5772 /* Properties of chunks recycled from fastbins */
5773 @@ -198,14 +195,14 @@ void __do_check_malloced_chunk(mchunkptr
5774 __do_check_remalloced_chunk(p, s);
5777 - ... plus, must obey implementation invariant that prev_inuse is
5778 - always true of any allocated chunk; i.e., that each allocated
5779 - chunk borders either a previously allocated and still in-use
5780 - chunk, or the base of its memory arena. This is ensured
5781 - by making all allocations from the the `lowest' part of any found
5782 - chunk. This does not necessarily hold however for chunks
5783 - recycled via fastbins.
5784 - */
5785 + ... plus, must obey implementation invariant that prev_inuse is
5786 + always true of any allocated chunk; i.e., that each allocated
5787 + chunk borders either a previously allocated and still in-use
5788 + chunk, or the base of its memory arena. This is ensured
5789 + by making all allocations from the the `lowest' part of any found
5790 + chunk. This does not necessarily hold however for chunks
5791 + recycled via fastbins.
5792 + */
5794 assert(prev_inuse(p));
5796 @@ -243,7 +240,7 @@ void __do_check_malloc_state(void)
5798 /* cannot run remaining checks until fully initialized */
5799 if (av->top == 0 || av->top == initial_top(av))
5800 - return;
5801 + return;
5803 /* pagesize is a power of 2 */
5804 assert((av->pagesize & (av->pagesize-1)) == 0);
5805 @@ -256,64 +253,64 @@ void __do_check_malloc_state(void)
5806 max_fast_bin = fastbin_index(av->max_fast);
5808 for (i = 0; i < NFASTBINS; ++i) {
5809 - p = av->fastbins[i];
5810 + p = av->fastbins[i];
5812 - /* all bins past max_fast are empty */
5813 - if (i > max_fast_bin)
5814 - assert(p == 0);
5816 - while (p != 0) {
5817 - /* each chunk claims to be inuse */
5818 - __do_check_inuse_chunk(p);
5819 - total += chunksize(p);
5820 - /* chunk belongs in this bin */
5821 - assert(fastbin_index(chunksize(p)) == i);
5822 - p = p->fd;
5824 + /* all bins past max_fast are empty */
5825 + if (i > max_fast_bin)
5826 + assert(p == 0);
5828 + while (p != 0) {
5829 + /* each chunk claims to be inuse */
5830 + __do_check_inuse_chunk(p);
5831 + total += chunksize(p);
5832 + /* chunk belongs in this bin */
5833 + assert(fastbin_index(chunksize(p)) == i);
5834 + p = p->fd;
5838 if (total != 0)
5839 - assert(have_fastchunks(av));
5840 + assert(have_fastchunks(av));
5841 else if (!have_fastchunks(av))
5842 - assert(total == 0);
5843 + assert(total == 0);
5845 /* check normal bins */
5846 for (i = 1; i < NBINS; ++i) {
5847 - b = bin_at(av,i);
5848 + b = bin_at(av,i);
5850 - /* binmap is accurate (except for bin 1 == unsorted_chunks) */
5851 - if (i >= 2) {
5852 - binbit = get_binmap(av,i);
5853 - empty = last(b) == b;
5854 - if (!binbit)
5855 - assert(empty);
5856 - else if (!empty)
5857 - assert(binbit);
5860 - for (p = last(b); p != b; p = p->bk) {
5861 - /* each chunk claims to be free */
5862 - __do_check_free_chunk(p);
5863 - size = chunksize(p);
5864 - total += size;
5865 - if (i >= 2) {
5866 - /* chunk belongs in bin */
5867 - idx = bin_index(size);
5868 - assert(idx == i);
5869 - /* lists are sorted */
5870 - if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
5871 - assert(p->bk == b ||
5872 - (unsigned long)chunksize(p->bk) >=
5873 - (unsigned long)chunksize(p));
5876 - /* chunk is followed by a legal chain of inuse chunks */
5877 - for (q = next_chunk(p);
5878 - (q != av->top && inuse(q) &&
5879 - (unsigned long)(chunksize(q)) >= MINSIZE);
5880 - q = next_chunk(q))
5881 - __do_check_inuse_chunk(q);
5883 + /* binmap is accurate (except for bin 1 == unsorted_chunks) */
5884 + if (i >= 2) {
5885 + binbit = get_binmap(av,i);
5886 + empty = last(b) == b;
5887 + if (!binbit)
5888 + assert(empty);
5889 + else if (!empty)
5890 + assert(binbit);
5893 + for (p = last(b); p != b; p = p->bk) {
5894 + /* each chunk claims to be free */
5895 + __do_check_free_chunk(p);
5896 + size = chunksize(p);
5897 + total += size;
5898 + if (i >= 2) {
5899 + /* chunk belongs in bin */
5900 + idx = bin_index(size);
5901 + assert(idx == i);
5902 + /* lists are sorted */
5903 + if ((unsigned long) size >= (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
5904 + assert(p->bk == b ||
5905 + (unsigned long)chunksize(p->bk) >=
5906 + (unsigned long)chunksize(p));
5909 + /* chunk is followed by a legal chain of inuse chunks */
5910 + for (q = next_chunk(p);
5911 + (q != av->top && inuse(q) &&
5912 + (unsigned long)(chunksize(q)) >= MINSIZE);
5913 + q = next_chunk(q))
5914 + __do_check_inuse_chunk(q);
5918 /* top chunk is OK */
5919 @@ -326,13 +323,13 @@ void __do_check_malloc_state(void)
5920 assert(av->n_mmaps <= av->max_n_mmaps);
5922 assert((unsigned long)(av->sbrked_mem) <=
5923 - (unsigned long)(av->max_sbrked_mem));
5924 + (unsigned long)(av->max_sbrked_mem));
5926 assert((unsigned long)(av->mmapped_mem) <=
5927 - (unsigned long)(av->max_mmapped_mem));
5928 + (unsigned long)(av->max_mmapped_mem));
5930 assert((unsigned long)(av->max_total_mem) >=
5931 - (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
5932 + (unsigned long)(av->mmapped_mem) + (unsigned long)(av->sbrked_mem));
5934 #endif
5936 @@ -370,84 +367,84 @@ static void* __malloc_alloc(size_t nb, m
5937 size_t pagemask = av->pagesize - 1;
5940 - If there is space available in fastbins, consolidate and retry
5941 - malloc from scratch rather than getting memory from system. This
5942 - can occur only if nb is in smallbin range so we didn't consolidate
5943 - upon entry to malloc. It is much easier to handle this case here
5944 - than in malloc proper.
5945 - */
5946 + If there is space available in fastbins, consolidate and retry
5947 + malloc from scratch rather than getting memory from system. This
5948 + can occur only if nb is in smallbin range so we didn't consolidate
5949 + upon entry to malloc. It is much easier to handle this case here
5950 + than in malloc proper.
5951 + */
5953 if (have_fastchunks(av)) {
5954 - assert(in_smallbin_range(nb));
5955 - __malloc_consolidate(av);
5956 - return malloc(nb - MALLOC_ALIGN_MASK);
5957 + assert(in_smallbin_range(nb));
5958 + __malloc_consolidate(av);
5959 + return malloc(nb - MALLOC_ALIGN_MASK);
5964 - If have mmap, and the request size meets the mmap threshold, and
5965 - the system supports mmap, and there are few enough currently
5966 - allocated mmapped regions, try to directly map this request
5967 - rather than expanding top.
5968 - */
5969 + If have mmap, and the request size meets the mmap threshold, and
5970 + the system supports mmap, and there are few enough currently
5971 + allocated mmapped regions, try to directly map this request
5972 + rather than expanding top.
5973 + */
5975 if ((unsigned long)(nb) >= (unsigned long)(av->mmap_threshold) &&
5976 (av->n_mmaps < av->n_mmaps_max)) {
5978 - char* mm; /* return value from mmap call*/
5980 - /*
5981 - Round up size to nearest page. For mmapped chunks, the overhead
5982 - is one (sizeof(size_t)) unit larger than for normal chunks, because there
5983 - is no following chunk whose prev_size field could be used.
5984 - */
5985 - size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
5987 - /* Don't try if size wraps around 0 */
5988 - if ((unsigned long)(size) > (unsigned long)(nb)) {
5990 - mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
5992 - if (mm != (char*)(MORECORE_FAILURE)) {
5993 + char* mm; /* return value from mmap call*/
5996 - The offset to the start of the mmapped region is stored
5997 - in the prev_size field of the chunk. This allows us to adjust
5998 - returned start address to meet alignment requirements here
5999 - and in memalign(), and still be able to compute proper
6000 - address argument for later munmap in free() and realloc().
6001 - */
6003 - front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
6004 - if (front_misalign > 0) {
6005 - correction = MALLOC_ALIGNMENT - front_misalign;
6006 - p = (mchunkptr)(mm + correction);
6007 - p->prev_size = correction;
6008 - set_head(p, (size - correction) |IS_MMAPPED);
6010 - else {
6011 - p = (mchunkptr)mm;
6012 - p->prev_size = 0;
6013 - set_head(p, size|IS_MMAPPED);
6015 + Round up size to nearest page. For mmapped chunks, the overhead
6016 + is one (sizeof(size_t)) unit larger than for normal chunks, because there
6017 + is no following chunk whose prev_size field could be used.
6018 + */
6019 + size = (nb + (sizeof(size_t)) + MALLOC_ALIGN_MASK + pagemask) & ~pagemask;
6021 + /* Don't try if size wraps around 0 */
6022 + if ((unsigned long)(size) > (unsigned long)(nb)) {
6024 + mm = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
6026 + if (mm != (char*)(MORECORE_FAILURE)) {
6028 + /*
6029 + The offset to the start of the mmapped region is stored
6030 + in the prev_size field of the chunk. This allows us to adjust
6031 + returned start address to meet alignment requirements here
6032 + and in memalign(), and still be able to compute proper
6033 + address argument for later munmap in free() and realloc().
6034 + */
6036 + front_misalign = (size_t)chunk2mem(mm) & MALLOC_ALIGN_MASK;
6037 + if (front_misalign > 0) {
6038 + correction = MALLOC_ALIGNMENT - front_misalign;
6039 + p = (mchunkptr)(mm + correction);
6040 + p->prev_size = correction;
6041 + set_head(p, (size - correction) |IS_MMAPPED);
6043 + else {
6044 + p = (mchunkptr)mm;
6045 + p->prev_size = 0;
6046 + set_head(p, size|IS_MMAPPED);
6049 + /* update statistics */
6051 + if (++av->n_mmaps > av->max_n_mmaps)
6052 + av->max_n_mmaps = av->n_mmaps;
6054 + sum = av->mmapped_mem += size;
6055 + if (sum > (unsigned long)(av->max_mmapped_mem))
6056 + av->max_mmapped_mem = sum;
6057 + sum += av->sbrked_mem;
6058 + if (sum > (unsigned long)(av->max_total_mem))
6059 + av->max_total_mem = sum;
6061 - /* update statistics */
6062 + check_chunk(p);
6064 - if (++av->n_mmaps > av->max_n_mmaps)
6065 - av->max_n_mmaps = av->n_mmaps;
6067 - sum = av->mmapped_mem += size;
6068 - if (sum > (unsigned long)(av->max_mmapped_mem))
6069 - av->max_mmapped_mem = sum;
6070 - sum += av->sbrked_mem;
6071 - if (sum > (unsigned long)(av->max_total_mem))
6072 - av->max_total_mem = sum;
6074 - check_chunk(p);
6076 - return chunk2mem(p);
6079 + return chunk2mem(p);
6084 /* Record incoming configuration of top */
6085 @@ -462,8 +459,8 @@ static void* __malloc_alloc(size_t nb, m
6086 * be at least MINSIZE and to have prev_inuse set. */
6088 assert((old_top == initial_top(av) && old_size == 0) ||
6089 - ((unsigned long) (old_size) >= MINSIZE &&
6090 - prev_inuse(old_top)));
6091 + ((unsigned long) (old_size) >= MINSIZE &&
6092 + prev_inuse(old_top)));
6094 /* Precondition: not enough current space to satisfy nb request */
6095 assert((unsigned long)(old_size) < (unsigned long)(nb + MINSIZE));
6096 @@ -477,272 +474,272 @@ static void* __malloc_alloc(size_t nb, m
6097 size = nb + av->top_pad + MINSIZE;
6100 - If contiguous, we can subtract out existing space that we hope to
6101 - combine with new space. We add it back later only if
6102 - we don't actually get contiguous space.
6103 - */
6104 + If contiguous, we can subtract out existing space that we hope to
6105 + combine with new space. We add it back later only if
6106 + we don't actually get contiguous space.
6107 + */
6109 if (contiguous(av))
6110 - size -= old_size;
6111 + size -= old_size;
6114 - Round to a multiple of page size.
6115 - If MORECORE is not contiguous, this ensures that we only call it
6116 - with whole-page arguments. And if MORECORE is contiguous and
6117 - this is not first time through, this preserves page-alignment of
6118 - previous calls. Otherwise, we correct to page-align below.
6119 - */
6120 + Round to a multiple of page size.
6121 + If MORECORE is not contiguous, this ensures that we only call it
6122 + with whole-page arguments. And if MORECORE is contiguous and
6123 + this is not first time through, this preserves page-alignment of
6124 + previous calls. Otherwise, we correct to page-align below.
6125 + */
6127 size = (size + pagemask) & ~pagemask;
6130 - Don't try to call MORECORE if argument is so big as to appear
6131 - negative. Note that since mmap takes size_t arg, it may succeed
6132 - below even if we cannot call MORECORE.
6133 - */
6134 + Don't try to call MORECORE if argument is so big as to appear
6135 + negative. Note that since mmap takes size_t arg, it may succeed
6136 + below even if we cannot call MORECORE.
6137 + */
6139 if (size > 0)
6140 - brk = (char*)(MORECORE(size));
6141 + brk = (char*)(MORECORE(size));
6144 - If have mmap, try using it as a backup when MORECORE fails or
6145 - cannot be used. This is worth doing on systems that have "holes" in
6146 - address space, so sbrk cannot extend to give contiguous space, but
6147 - space is available elsewhere. Note that we ignore mmap max count
6148 - and threshold limits, since the space will not be used as a
6149 - segregated mmap region.
6150 - */
6151 + If have mmap, try using it as a backup when MORECORE fails or
6152 + cannot be used. This is worth doing on systems that have "holes" in
6153 + address space, so sbrk cannot extend to give contiguous space, but
6154 + space is available elsewhere. Note that we ignore mmap max count
6155 + and threshold limits, since the space will not be used as a
6156 + segregated mmap region.
6157 + */
6159 if (brk == (char*)(MORECORE_FAILURE)) {
6161 - /* Cannot merge with old top, so add its size back in */
6162 - if (contiguous(av))
6163 - size = (size + old_size + pagemask) & ~pagemask;
6165 - /* If we are relying on mmap as backup, then use larger units */
6166 - if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
6167 - size = MMAP_AS_MORECORE_SIZE;
6169 - /* Don't try if size wraps around 0 */
6170 - if ((unsigned long)(size) > (unsigned long)(nb)) {
6172 - brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
6174 - if (brk != (char*)(MORECORE_FAILURE)) {
6176 - /* We do not need, and cannot use, another sbrk call to find end */
6177 - snd_brk = brk + size;
6179 - /* Record that we no longer have a contiguous sbrk region.
6180 - After the first time mmap is used as backup, we do not
6181 - ever rely on contiguous space since this could incorrectly
6182 - bridge regions.
6183 - */
6184 - set_noncontiguous(av);
6187 + /* Cannot merge with old top, so add its size back in */
6188 + if (contiguous(av))
6189 + size = (size + old_size + pagemask) & ~pagemask;
6191 + /* If we are relying on mmap as backup, then use larger units */
6192 + if ((unsigned long)(size) < (unsigned long)(MMAP_AS_MORECORE_SIZE))
6193 + size = MMAP_AS_MORECORE_SIZE;
6195 + /* Don't try if size wraps around 0 */
6196 + if ((unsigned long)(size) > (unsigned long)(nb)) {
6198 + brk = (char*)(MMAP(0, size, PROT_READ|PROT_WRITE));
6200 + if (brk != (char*)(MORECORE_FAILURE)) {
6202 + /* We do not need, and cannot use, another sbrk call to find end */
6203 + snd_brk = brk + size;
6205 + /* Record that we no longer have a contiguous sbrk region.
6206 + After the first time mmap is used as backup, we do not
6207 + ever rely on contiguous space since this could incorrectly
6208 + bridge regions.
6209 + */
6210 + set_noncontiguous(av);
6215 if (brk != (char*)(MORECORE_FAILURE)) {
6216 - av->sbrked_mem += size;
6217 + av->sbrked_mem += size;
6219 - /*
6220 - If MORECORE extends previous space, we can likewise extend top size.
6221 - */
6223 - if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
6224 - set_head(old_top, (size + old_size) | PREV_INUSE);
6227 - /*
6228 - Otherwise, make adjustments:
6230 - * If the first time through or noncontiguous, we need to call sbrk
6231 - just to find out where the end of memory lies.
6233 - * We need to ensure that all returned chunks from malloc will meet
6234 - MALLOC_ALIGNMENT
6236 - * If there was an intervening foreign sbrk, we need to adjust sbrk
6237 - request size to account for fact that we will not be able to
6238 - combine new space with existing space in old_top.
6240 - * Almost all systems internally allocate whole pages at a time, in
6241 - which case we might as well use the whole last page of request.
6242 - So we allocate enough more memory to hit a page boundary now,
6243 - which in turn causes future contiguous calls to page-align.
6244 - */
6246 - else {
6247 - front_misalign = 0;
6248 - end_misalign = 0;
6249 - correction = 0;
6250 - aligned_brk = brk;
6252 - /*
6253 - If MORECORE returns an address lower than we have seen before,
6254 - we know it isn't really contiguous. This and some subsequent
6255 - checks help cope with non-conforming MORECORE functions and
6256 - the presence of "foreign" calls to MORECORE from outside of
6257 - malloc or by other threads. We cannot guarantee to detect
6258 - these in all cases, but cope with the ones we do detect.
6259 - */
6260 - if (contiguous(av) && old_size != 0 && brk < old_end) {
6261 - set_noncontiguous(av);
6264 - /* handle contiguous cases */
6265 - if (contiguous(av)) {
6267 - /* We can tolerate forward non-contiguities here (usually due
6268 - to foreign calls) but treat them as part of our space for
6269 - stats reporting. */
6270 - if (old_size != 0)
6271 - av->sbrked_mem += brk - old_end;
6273 - /* Guarantee alignment of first new chunk made from this space */
6275 - front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
6276 - if (front_misalign > 0) {
6278 - /*
6279 - Skip over some bytes to arrive at an aligned position.
6280 - We don't need to specially mark these wasted front bytes.
6281 - They will never be accessed anyway because
6282 - prev_inuse of av->top (and any chunk created from its start)
6283 - is always true after initialization.
6284 - */
6285 + /*
6286 + If MORECORE extends previous space, we can likewise extend top size.
6287 + */
6289 - correction = MALLOC_ALIGNMENT - front_misalign;
6290 - aligned_brk += correction;
6291 + if (brk == old_end && snd_brk == (char*)(MORECORE_FAILURE)) {
6292 + set_head(old_top, (size + old_size) | PREV_INUSE);
6296 - If this isn't adjacent to existing space, then we will not
6297 - be able to merge with old_top space, so must add to 2nd request.
6298 - */
6300 - correction += old_size;
6302 - /* Extend the end address to hit a page boundary */
6303 - end_misalign = (size_t)(brk + size + correction);
6304 - correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
6306 - assert(correction >= 0);
6307 - snd_brk = (char*)(MORECORE(correction));
6309 - if (snd_brk == (char*)(MORECORE_FAILURE)) {
6310 - /*
6311 - If can't allocate correction, try to at least find out current
6312 - brk. It might be enough to proceed without failing.
6313 - */
6314 - correction = 0;
6315 - snd_brk = (char*)(MORECORE(0));
6317 - else if (snd_brk < brk) {
6318 - /*
6319 - If the second call gives noncontiguous space even though
6320 - it says it won't, the only course of action is to ignore
6321 - results of second call, and conservatively estimate where
6322 - the first call left us. Also set noncontiguous, so this
6323 - won't happen again, leaving at most one hole.
6325 - Note that this check is intrinsically incomplete. Because
6326 - MORECORE is allowed to give more space than we ask for,
6327 - there is no reliable way to detect a noncontiguity
6328 - producing a forward gap for the second call.
6329 - */
6330 - snd_brk = brk + size;
6331 - correction = 0;
6332 - set_noncontiguous(av);
6337 - /* handle non-contiguous cases */
6338 - else {
6339 - /* MORECORE/mmap must correctly align */
6340 - assert(aligned_OK(chunk2mem(brk)));
6342 - /* Find out current end of memory */
6343 - if (snd_brk == (char*)(MORECORE_FAILURE)) {
6344 - snd_brk = (char*)(MORECORE(0));
6345 - av->sbrked_mem += snd_brk - brk - size;
6349 - /* Adjust top based on results of second sbrk */
6350 - if (snd_brk != (char*)(MORECORE_FAILURE)) {
6351 - av->top = (mchunkptr)aligned_brk;
6352 - set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
6353 - av->sbrked_mem += correction;
6354 + Otherwise, make adjustments:
6356 - /*
6357 - If not the first time through, we either have a
6358 - gap due to foreign sbrk or a non-contiguous region. Insert a
6359 - double fencepost at old_top to prevent consolidation with space
6360 - we don't own. These fenceposts are artificial chunks that are
6361 - marked as inuse and are in any case too small to use. We need
6362 - two to make sizes and alignments work out.
6363 - */
6365 - if (old_size != 0) {
6366 - /* Shrink old_top to insert fenceposts, keeping size a
6367 - multiple of MALLOC_ALIGNMENT. We know there is at least
6368 - enough space in old_top to do this.
6369 - */
6370 - old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK;
6371 - set_head(old_top, old_size | PREV_INUSE);
6373 - /*
6374 - Note that the following assignments completely overwrite
6375 - old_top when old_size was previously MINSIZE. This is
6376 - intentional. We need the fencepost, even if old_top otherwise gets
6377 - lost.
6378 - */
6379 - chunk_at_offset(old_top, old_size )->size =
6380 - (sizeof(size_t))|PREV_INUSE;
6382 - chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size =
6383 - (sizeof(size_t))|PREV_INUSE;
6385 - /* If possible, release the rest, suppressing trimming. */
6386 - if (old_size >= MINSIZE) {
6387 - size_t tt = av->trim_threshold;
6388 - av->trim_threshold = (size_t)(-1);
6389 - free(chunk2mem(old_top));
6390 - av->trim_threshold = tt;
6396 - /* Update statistics */
6397 - sum = av->sbrked_mem;
6398 - if (sum > (unsigned long)(av->max_sbrked_mem))
6399 - av->max_sbrked_mem = sum;
6401 - sum += av->mmapped_mem;
6402 - if (sum > (unsigned long)(av->max_total_mem))
6403 - av->max_total_mem = sum;
6405 - check_malloc_state();
6407 - /* finally, do the allocation */
6409 - p = av->top;
6410 - size = chunksize(p);
6412 - /* check that one of the above allocation paths succeeded */
6413 - if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
6414 - remainder_size = size - nb;
6415 - remainder = chunk_at_offset(p, nb);
6416 - av->top = remainder;
6417 - set_head(p, nb | PREV_INUSE);
6418 - set_head(remainder, remainder_size | PREV_INUSE);
6419 - check_malloced_chunk(p, nb);
6420 - return chunk2mem(p);
6422 + * If the first time through or noncontiguous, we need to call sbrk
6423 + just to find out where the end of memory lies.
6425 + * We need to ensure that all returned chunks from malloc will meet
6426 + MALLOC_ALIGNMENT
6428 + * If there was an intervening foreign sbrk, we need to adjust sbrk
6429 + request size to account for fact that we will not be able to
6430 + combine new space with existing space in old_top.
6432 + * Almost all systems internally allocate whole pages at a time, in
6433 + which case we might as well use the whole last page of request.
6434 + So we allocate enough more memory to hit a page boundary now,
6435 + which in turn causes future contiguous calls to page-align.
6436 + */
6438 + else {
6439 + front_misalign = 0;
6440 + end_misalign = 0;
6441 + correction = 0;
6442 + aligned_brk = brk;
6444 + /*
6445 + If MORECORE returns an address lower than we have seen before,
6446 + we know it isn't really contiguous. This and some subsequent
6447 + checks help cope with non-conforming MORECORE functions and
6448 + the presence of "foreign" calls to MORECORE from outside of
6449 + malloc or by other threads. We cannot guarantee to detect
6450 + these in all cases, but cope with the ones we do detect.
6451 + */
6452 + if (contiguous(av) && old_size != 0 && brk < old_end) {
6453 + set_noncontiguous(av);
6456 + /* handle contiguous cases */
6457 + if (contiguous(av)) {
6459 + /* We can tolerate forward non-contiguities here (usually due
6460 + to foreign calls) but treat them as part of our space for
6461 + stats reporting. */
6462 + if (old_size != 0)
6463 + av->sbrked_mem += brk - old_end;
6465 + /* Guarantee alignment of first new chunk made from this space */
6467 + front_misalign = (size_t)chunk2mem(brk) & MALLOC_ALIGN_MASK;
6468 + if (front_misalign > 0) {
6470 + /*
6471 + Skip over some bytes to arrive at an aligned position.
6472 + We don't need to specially mark these wasted front bytes.
6473 + They will never be accessed anyway because
6474 + prev_inuse of av->top (and any chunk created from its start)
6475 + is always true after initialization.
6476 + */
6478 + correction = MALLOC_ALIGNMENT - front_misalign;
6479 + aligned_brk += correction;
6482 + /*
6483 + If this isn't adjacent to existing space, then we will not
6484 + be able to merge with old_top space, so must add to 2nd request.
6485 + */
6487 + correction += old_size;
6489 + /* Extend the end address to hit a page boundary */
6490 + end_misalign = (size_t)(brk + size + correction);
6491 + correction += ((end_misalign + pagemask) & ~pagemask) - end_misalign;
6493 + assert(correction >= 0);
6494 + snd_brk = (char*)(MORECORE(correction));
6496 + if (snd_brk == (char*)(MORECORE_FAILURE)) {
6497 + /*
6498 + If can't allocate correction, try to at least find out current
6499 + brk. It might be enough to proceed without failing.
6500 + */
6501 + correction = 0;
6502 + snd_brk = (char*)(MORECORE(0));
6504 + else if (snd_brk < brk) {
6505 + /*
6506 + If the second call gives noncontiguous space even though
6507 + it says it won't, the only course of action is to ignore
6508 + results of second call, and conservatively estimate where
6509 + the first call left us. Also set noncontiguous, so this
6510 + won't happen again, leaving at most one hole.
6512 + Note that this check is intrinsically incomplete. Because
6513 + MORECORE is allowed to give more space than we ask for,
6514 + there is no reliable way to detect a noncontiguity
6515 + producing a forward gap for the second call.
6516 + */
6517 + snd_brk = brk + size;
6518 + correction = 0;
6519 + set_noncontiguous(av);
6524 + /* handle non-contiguous cases */
6525 + else {
6526 + /* MORECORE/mmap must correctly align */
6527 + assert(aligned_OK(chunk2mem(brk)));
6529 + /* Find out current end of memory */
6530 + if (snd_brk == (char*)(MORECORE_FAILURE)) {
6531 + snd_brk = (char*)(MORECORE(0));
6532 + av->sbrked_mem += snd_brk - brk - size;
6536 + /* Adjust top based on results of second sbrk */
6537 + if (snd_brk != (char*)(MORECORE_FAILURE)) {
6538 + av->top = (mchunkptr)aligned_brk;
6539 + set_head(av->top, (snd_brk - aligned_brk + correction) | PREV_INUSE);
6540 + av->sbrked_mem += correction;
6542 + /*
6543 + If not the first time through, we either have a
6544 + gap due to foreign sbrk or a non-contiguous region. Insert a
6545 + double fencepost at old_top to prevent consolidation with space
6546 + we don't own. These fenceposts are artificial chunks that are
6547 + marked as inuse and are in any case too small to use. We need
6548 + two to make sizes and alignments work out.
6549 + */
6551 + if (old_size != 0) {
6552 + /* Shrink old_top to insert fenceposts, keeping size a
6553 + multiple of MALLOC_ALIGNMENT. We know there is at least
6554 + enough space in old_top to do this.
6555 + */
6556 + old_size = (old_size - 3*(sizeof(size_t))) & ~MALLOC_ALIGN_MASK;
6557 + set_head(old_top, old_size | PREV_INUSE);
6559 + /*
6560 + Note that the following assignments completely overwrite
6561 + old_top when old_size was previously MINSIZE. This is
6562 + intentional. We need the fencepost, even if old_top otherwise gets
6563 + lost.
6564 + */
6565 + chunk_at_offset(old_top, old_size )->size =
6566 + (sizeof(size_t))|PREV_INUSE;
6568 + chunk_at_offset(old_top, old_size + (sizeof(size_t)))->size =
6569 + (sizeof(size_t))|PREV_INUSE;
6571 + /* If possible, release the rest, suppressing trimming. */
6572 + if (old_size >= MINSIZE) {
6573 + size_t tt = av->trim_threshold;
6574 + av->trim_threshold = (size_t)(-1);
6575 + free(chunk2mem(old_top));
6576 + av->trim_threshold = tt;
6582 + /* Update statistics */
6583 + sum = av->sbrked_mem;
6584 + if (sum > (unsigned long)(av->max_sbrked_mem))
6585 + av->max_sbrked_mem = sum;
6587 + sum += av->mmapped_mem;
6588 + if (sum > (unsigned long)(av->max_total_mem))
6589 + av->max_total_mem = sum;
6591 + check_malloc_state();
6593 + /* finally, do the allocation */
6595 + p = av->top;
6596 + size = chunksize(p);
6598 + /* check that one of the above allocation paths succeeded */
6599 + if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
6600 + remainder_size = size - nb;
6601 + remainder = chunk_at_offset(p, nb);
6602 + av->top = remainder;
6603 + set_head(p, nb | PREV_INUSE);
6604 + set_head(remainder, remainder_size | PREV_INUSE);
6605 + check_malloced_chunk(p, nb);
6606 + return chunk2mem(p);
6611 @@ -767,25 +764,25 @@ static int __malloc_largebin_index(unsig
6612 #if defined(__GNUC__) && defined(i386)
6614 __asm__("bsrl %1,%0\n\t"
6615 - : "=r" (m)
6616 - : "g" (x));
6617 + : "=r" (m)
6618 + : "g" (x));
6620 #else
6622 - /*
6623 - Based on branch-free nlz algorithm in chapter 5 of Henry
6624 - S. Warren Jr's book "Hacker's Delight".
6625 - */
6627 - unsigned int n = ((x - 0x100) >> 16) & 8;
6628 - x <<= n;
6629 - m = ((x - 0x1000) >> 16) & 4;
6630 - n += m;
6631 - x <<= m;
6632 - m = ((x - 0x4000) >> 16) & 2;
6633 - n += m;
6634 - x = (x << m) >> 14;
6635 - m = 13 - n + (x & ~(x>>1));
6636 + /*
6637 + Based on branch-free nlz algorithm in chapter 5 of Henry
6638 + S. Warren Jr's book "Hacker's Delight".
6639 + */
6641 + unsigned int n = ((x - 0x100) >> 16) & 8;
6642 + x <<= n;
6643 + m = ((x - 0x1000) >> 16) & 4;
6644 + n += m;
6645 + x <<= m;
6646 + m = ((x - 0x4000) >> 16) & 2;
6647 + n += m;
6648 + x = (x << m) >> 14;
6649 + m = 13 - n + (x & ~(x>>1));
6651 #endif
6653 @@ -826,69 +823,70 @@ void* malloc(size_t bytes)
6654 mchunkptr fwd; /* misc temp for linking */
6655 mchunkptr bck; /* misc temp for linking */
6656 void * sysmem;
6657 + void * retval;
6659 #if !defined(__MALLOC_GLIBC_COMPAT__)
6660 if (!bytes) return NULL;
6661 #endif
6663 - LOCK;
6664 + __MALLOC_LOCK;
6665 av = get_malloc_state();
6667 - Convert request size to internal form by adding (sizeof(size_t)) bytes
6668 - overhead plus possibly more to obtain necessary alignment and/or
6669 - to obtain a size of at least MINSIZE, the smallest allocatable
6670 - size. Also, checked_request2size traps (returning 0) request sizes
6671 - that are so large that they wrap around zero when padded and
6672 - aligned.
6673 - */
6674 + Convert request size to internal form by adding (sizeof(size_t)) bytes
6675 + overhead plus possibly more to obtain necessary alignment and/or
6676 + to obtain a size of at least MINSIZE, the smallest allocatable
6677 + size. Also, checked_request2size traps (returning 0) request sizes
6678 + that are so large that they wrap around zero when padded and
6679 + aligned.
6680 + */
6682 checked_request2size(bytes, nb);
6685 - Bypass search if no frees yet
6686 - */
6687 + Bypass search if no frees yet
6688 + */
6689 if (!have_anychunks(av)) {
6690 - if (av->max_fast == 0) /* initialization check */
6691 - __malloc_consolidate(av);
6692 - goto use_top;
6693 + if (av->max_fast == 0) /* initialization check */
6694 + __malloc_consolidate(av);
6695 + goto use_top;
6699 - If the size qualifies as a fastbin, first check corresponding bin.
6700 - */
6701 + If the size qualifies as a fastbin, first check corresponding bin.
6702 + */
6704 if ((unsigned long)(nb) <= (unsigned long)(av->max_fast)) {
6705 - fb = &(av->fastbins[(fastbin_index(nb))]);
6706 - if ( (victim = *fb) != 0) {
6707 - *fb = victim->fd;
6708 - check_remalloced_chunk(victim, nb);
6709 - UNLOCK;
6710 - return chunk2mem(victim);
6712 + fb = &(av->fastbins[(fastbin_index(nb))]);
6713 + if ( (victim = *fb) != 0) {
6714 + *fb = victim->fd;
6715 + check_remalloced_chunk(victim, nb);
6716 + retval = chunk2mem(victim);
6717 + goto DONE;
6722 - If a small request, check regular bin. Since these "smallbins"
6723 - hold one size each, no searching within bins is necessary.
6724 - (For a large request, we need to wait until unsorted chunks are
6725 - processed to find best fit. But for small ones, fits are exact
6726 - anyway, so we can check now, which is faster.)
6727 - */
6728 + If a small request, check regular bin. Since these "smallbins"
6729 + hold one size each, no searching within bins is necessary.
6730 + (For a large request, we need to wait until unsorted chunks are
6731 + processed to find best fit. But for small ones, fits are exact
6732 + anyway, so we can check now, which is faster.)
6733 + */
6735 if (in_smallbin_range(nb)) {
6736 - idx = smallbin_index(nb);
6737 - bin = bin_at(av,idx);
6738 + idx = smallbin_index(nb);
6739 + bin = bin_at(av,idx);
6741 - if ( (victim = last(bin)) != bin) {
6742 - bck = victim->bk;
6743 - set_inuse_bit_at_offset(victim, nb);
6744 - bin->bk = bck;
6745 - bck->fd = bin;
6747 - check_malloced_chunk(victim, nb);
6748 - UNLOCK;
6749 - return chunk2mem(victim);
6751 + if ( (victim = last(bin)) != bin) {
6752 + bck = victim->bk;
6753 + set_inuse_bit_at_offset(victim, nb);
6754 + bin->bk = bck;
6755 + bck->fd = bin;
6757 + check_malloced_chunk(victim, nb);
6758 + retval = chunk2mem(victim);
6759 + goto DONE;
6763 /* If this is a large request, consolidate fastbins before continuing.
6764 @@ -899,154 +897,154 @@ void* malloc(size_t bytes)
6765 large requests, but less often mixtures, so consolidation is not
6766 invoked all that often in most programs. And the programs that
6767 it is called frequently in otherwise tend to fragment.
6768 - */
6769 + */
6771 else {
6772 - idx = __malloc_largebin_index(nb);
6773 - if (have_fastchunks(av))
6774 - __malloc_consolidate(av);
6775 + idx = __malloc_largebin_index(nb);
6776 + if (have_fastchunks(av))
6777 + __malloc_consolidate(av);
6781 - Process recently freed or remaindered chunks, taking one only if
6782 - it is exact fit, or, if this a small request, the chunk is remainder from
6783 - the most recent non-exact fit. Place other traversed chunks in
6784 - bins. Note that this step is the only place in any routine where
6785 - chunks are placed in bins.
6786 - */
6787 + Process recently freed or remaindered chunks, taking one only if
6788 + it is exact fit, or, if this a small request, the chunk is remainder from
6789 + the most recent non-exact fit. Place other traversed chunks in
6790 + bins. Note that this step is the only place in any routine where
6791 + chunks are placed in bins.
6792 + */
6794 while ( (victim = unsorted_chunks(av)->bk) != unsorted_chunks(av)) {
6795 - bck = victim->bk;
6796 - size = chunksize(victim);
6797 + bck = victim->bk;
6798 + size = chunksize(victim);
6800 + /* If a small request, try to use last remainder if it is the
6801 + only chunk in unsorted bin. This helps promote locality for
6802 + runs of consecutive small requests. This is the only
6803 + exception to best-fit, and applies only when there is
6804 + no exact fit for a small chunk.
6805 + */
6807 + if (in_smallbin_range(nb) &&
6808 + bck == unsorted_chunks(av) &&
6809 + victim == av->last_remainder &&
6810 + (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
6812 + /* split and reattach remainder */
6813 + remainder_size = size - nb;
6814 + remainder = chunk_at_offset(victim, nb);
6815 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
6816 + av->last_remainder = remainder;
6817 + remainder->bk = remainder->fd = unsorted_chunks(av);
6819 + set_head(victim, nb | PREV_INUSE);
6820 + set_head(remainder, remainder_size | PREV_INUSE);
6821 + set_foot(remainder, remainder_size);
6823 + check_malloced_chunk(victim, nb);
6824 + retval = chunk2mem(victim);
6825 + goto DONE;
6828 + /* remove from unsorted list */
6829 + unsorted_chunks(av)->bk = bck;
6830 + bck->fd = unsorted_chunks(av);
6832 + /* Take now instead of binning if exact fit */
6834 + if (size == nb) {
6835 + set_inuse_bit_at_offset(victim, size);
6836 + check_malloced_chunk(victim, nb);
6837 + retval = chunk2mem(victim);
6838 + goto DONE;
6841 + /* place chunk in bin */
6843 - /* If a small request, try to use last remainder if it is the
6844 - only chunk in unsorted bin. This helps promote locality for
6845 - runs of consecutive small requests. This is the only
6846 - exception to best-fit, and applies only when there is
6847 - no exact fit for a small chunk.
6848 - */
6850 - if (in_smallbin_range(nb) &&
6851 - bck == unsorted_chunks(av) &&
6852 - victim == av->last_remainder &&
6853 - (unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
6855 - /* split and reattach remainder */
6856 - remainder_size = size - nb;
6857 - remainder = chunk_at_offset(victim, nb);
6858 - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
6859 - av->last_remainder = remainder;
6860 - remainder->bk = remainder->fd = unsorted_chunks(av);
6862 - set_head(victim, nb | PREV_INUSE);
6863 - set_head(remainder, remainder_size | PREV_INUSE);
6864 - set_foot(remainder, remainder_size);
6866 - check_malloced_chunk(victim, nb);
6867 - UNLOCK;
6868 - return chunk2mem(victim);
6871 - /* remove from unsorted list */
6872 - unsorted_chunks(av)->bk = bck;
6873 - bck->fd = unsorted_chunks(av);
6875 - /* Take now instead of binning if exact fit */
6877 - if (size == nb) {
6878 - set_inuse_bit_at_offset(victim, size);
6879 - check_malloced_chunk(victim, nb);
6880 - UNLOCK;
6881 - return chunk2mem(victim);
6884 - /* place chunk in bin */
6886 - if (in_smallbin_range(size)) {
6887 - victim_index = smallbin_index(size);
6888 - bck = bin_at(av, victim_index);
6889 - fwd = bck->fd;
6891 - else {
6892 - victim_index = __malloc_largebin_index(size);
6893 - bck = bin_at(av, victim_index);
6894 - fwd = bck->fd;
6896 - if (fwd != bck) {
6897 - /* if smaller than smallest, place first */
6898 - if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
6899 - fwd = bck;
6900 - bck = bck->bk;
6902 - else if ((unsigned long)(size) >=
6903 - (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
6905 - /* maintain large bins in sorted order */
6906 - size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
6907 - while ((unsigned long)(size) < (unsigned long)(fwd->size))
6908 - fwd = fwd->fd;
6909 - bck = fwd->bk;
6914 - mark_bin(av, victim_index);
6915 - victim->bk = bck;
6916 - victim->fd = fwd;
6917 - fwd->bk = victim;
6918 - bck->fd = victim;
6919 + if (in_smallbin_range(size)) {
6920 + victim_index = smallbin_index(size);
6921 + bck = bin_at(av, victim_index);
6922 + fwd = bck->fd;
6924 + else {
6925 + victim_index = __malloc_largebin_index(size);
6926 + bck = bin_at(av, victim_index);
6927 + fwd = bck->fd;
6929 + if (fwd != bck) {
6930 + /* if smaller than smallest, place first */
6931 + if ((unsigned long)(size) < (unsigned long)(bck->bk->size)) {
6932 + fwd = bck;
6933 + bck = bck->bk;
6935 + else if ((unsigned long)(size) >=
6936 + (unsigned long)(FIRST_SORTED_BIN_SIZE)) {
6938 + /* maintain large bins in sorted order */
6939 + size |= PREV_INUSE; /* Or with inuse bit to speed comparisons */
6940 + while ((unsigned long)(size) < (unsigned long)(fwd->size))
6941 + fwd = fwd->fd;
6942 + bck = fwd->bk;
6947 + mark_bin(av, victim_index);
6948 + victim->bk = bck;
6949 + victim->fd = fwd;
6950 + fwd->bk = victim;
6951 + bck->fd = victim;
6955 - If a large request, scan through the chunks of current bin to
6956 - find one that fits. (This will be the smallest that fits unless
6957 - FIRST_SORTED_BIN_SIZE has been changed from default.) This is
6958 - the only step where an unbounded number of chunks might be
6959 - scanned without doing anything useful with them. However the
6960 - lists tend to be short.
6961 - */
6962 + If a large request, scan through the chunks of current bin to
6963 + find one that fits. (This will be the smallest that fits unless
6964 + FIRST_SORTED_BIN_SIZE has been changed from default.) This is
6965 + the only step where an unbounded number of chunks might be
6966 + scanned without doing anything useful with them. However the
6967 + lists tend to be short.
6968 + */
6970 if (!in_smallbin_range(nb)) {
6971 - bin = bin_at(av, idx);
6973 - for (victim = last(bin); victim != bin; victim = victim->bk) {
6974 - size = chunksize(victim);
6975 + bin = bin_at(av, idx);
6977 - if ((unsigned long)(size) >= (unsigned long)(nb)) {
6978 - remainder_size = size - nb;
6979 - unlink(victim, bck, fwd);
6980 + for (victim = last(bin); victim != bin; victim = victim->bk) {
6981 + size = chunksize(victim);
6983 - /* Exhaust */
6984 - if (remainder_size < MINSIZE) {
6985 - set_inuse_bit_at_offset(victim, size);
6986 - check_malloced_chunk(victim, nb);
6987 - UNLOCK;
6988 - return chunk2mem(victim);
6990 - /* Split */
6991 - else {
6992 - remainder = chunk_at_offset(victim, nb);
6993 - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
6994 - remainder->bk = remainder->fd = unsorted_chunks(av);
6995 - set_head(victim, nb | PREV_INUSE);
6996 - set_head(remainder, remainder_size | PREV_INUSE);
6997 - set_foot(remainder, remainder_size);
6998 - check_malloced_chunk(victim, nb);
6999 - UNLOCK;
7000 - return chunk2mem(victim);
7001 + if ((unsigned long)(size) >= (unsigned long)(nb)) {
7002 + remainder_size = size - nb;
7003 + unlink(victim, bck, fwd);
7005 + /* Exhaust */
7006 + if (remainder_size < MINSIZE) {
7007 + set_inuse_bit_at_offset(victim, size);
7008 + check_malloced_chunk(victim, nb);
7009 + retval = chunk2mem(victim);
7010 + goto DONE;
7012 + /* Split */
7013 + else {
7014 + remainder = chunk_at_offset(victim, nb);
7015 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
7016 + remainder->bk = remainder->fd = unsorted_chunks(av);
7017 + set_head(victim, nb | PREV_INUSE);
7018 + set_head(remainder, remainder_size | PREV_INUSE);
7019 + set_foot(remainder, remainder_size);
7020 + check_malloced_chunk(victim, nb);
7021 + retval = chunk2mem(victim);
7022 + goto DONE;
7031 - Search for a chunk by scanning bins, starting with next largest
7032 - bin. This search is strictly by best-fit; i.e., the smallest
7033 - (with ties going to approximately the least recently used) chunk
7034 - that fits is selected.
7035 + Search for a chunk by scanning bins, starting with next largest
7036 + bin. This search is strictly by best-fit; i.e., the smallest
7037 + (with ties going to approximately the least recently used) chunk
7038 + that fits is selected.
7040 - The bitmap avoids needing to check that most blocks are nonempty.
7041 - */
7042 + The bitmap avoids needing to check that most blocks are nonempty.
7043 + */
7045 ++idx;
7046 bin = bin_at(av,idx);
7047 @@ -1056,109 +1054,111 @@ void* malloc(size_t bytes)
7049 for (;;) {
7051 - /* Skip rest of block if there are no more set bits in this block. */
7052 - if (bit > map || bit == 0) {
7053 - do {
7054 - if (++block >= BINMAPSIZE) /* out of bins */
7055 - goto use_top;
7056 - } while ( (map = av->binmap[block]) == 0);
7058 - bin = bin_at(av, (block << BINMAPSHIFT));
7059 - bit = 1;
7062 - /* Advance to bin with set bit. There must be one. */
7063 - while ((bit & map) == 0) {
7064 - bin = next_bin(bin);
7065 - bit <<= 1;
7066 - assert(bit != 0);
7069 - /* Inspect the bin. It is likely to be non-empty */
7070 - victim = last(bin);
7072 - /* If a false alarm (empty bin), clear the bit. */
7073 - if (victim == bin) {
7074 - av->binmap[block] = map &= ~bit; /* Write through */
7075 - bin = next_bin(bin);
7076 - bit <<= 1;
7079 - else {
7080 - size = chunksize(victim);
7082 - /* We know the first chunk in this bin is big enough to use. */
7083 - assert((unsigned long)(size) >= (unsigned long)(nb));
7085 - remainder_size = size - nb;
7087 - /* unlink */
7088 - bck = victim->bk;
7089 - bin->bk = bck;
7090 - bck->fd = bin;
7092 - /* Exhaust */
7093 - if (remainder_size < MINSIZE) {
7094 - set_inuse_bit_at_offset(victim, size);
7095 - check_malloced_chunk(victim, nb);
7096 - UNLOCK;
7097 - return chunk2mem(victim);
7099 + /* Skip rest of block if there are no more set bits in this block. */
7100 + if (bit > map || bit == 0) {
7101 + do {
7102 + if (++block >= BINMAPSIZE) /* out of bins */
7103 + goto use_top;
7104 + } while ( (map = av->binmap[block]) == 0);
7106 - /* Split */
7107 - else {
7108 - remainder = chunk_at_offset(victim, nb);
7109 + bin = bin_at(av, (block << BINMAPSHIFT));
7110 + bit = 1;
7113 - unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
7114 - remainder->bk = remainder->fd = unsorted_chunks(av);
7115 - /* advertise as last remainder */
7116 - if (in_smallbin_range(nb))
7117 - av->last_remainder = remainder;
7118 + /* Advance to bin with set bit. There must be one. */
7119 + while ((bit & map) == 0) {
7120 + bin = next_bin(bin);
7121 + bit <<= 1;
7122 + assert(bit != 0);
7125 - set_head(victim, nb | PREV_INUSE);
7126 - set_head(remainder, remainder_size | PREV_INUSE);
7127 - set_foot(remainder, remainder_size);
7128 - check_malloced_chunk(victim, nb);
7129 - UNLOCK;
7130 - return chunk2mem(victim);
7133 + /* Inspect the bin. It is likely to be non-empty */
7134 + victim = last(bin);
7136 + /* If a false alarm (empty bin), clear the bit. */
7137 + if (victim == bin) {
7138 + av->binmap[block] = map &= ~bit; /* Write through */
7139 + bin = next_bin(bin);
7140 + bit <<= 1;
7143 + else {
7144 + size = chunksize(victim);
7146 + /* We know the first chunk in this bin is big enough to use. */
7147 + assert((unsigned long)(size) >= (unsigned long)(nb));
7149 + remainder_size = size - nb;
7151 + /* unlink */
7152 + bck = victim->bk;
7153 + bin->bk = bck;
7154 + bck->fd = bin;
7156 + /* Exhaust */
7157 + if (remainder_size < MINSIZE) {
7158 + set_inuse_bit_at_offset(victim, size);
7159 + check_malloced_chunk(victim, nb);
7160 + retval = chunk2mem(victim);
7161 + goto DONE;
7164 + /* Split */
7165 + else {
7166 + remainder = chunk_at_offset(victim, nb);
7168 + unsorted_chunks(av)->bk = unsorted_chunks(av)->fd = remainder;
7169 + remainder->bk = remainder->fd = unsorted_chunks(av);
7170 + /* advertise as last remainder */
7171 + if (in_smallbin_range(nb))
7172 + av->last_remainder = remainder;
7174 + set_head(victim, nb | PREV_INUSE);
7175 + set_head(remainder, remainder_size | PREV_INUSE);
7176 + set_foot(remainder, remainder_size);
7177 + check_malloced_chunk(victim, nb);
7178 + retval = chunk2mem(victim);
7179 + goto DONE;
7184 -use_top:
7185 + use_top:
7187 - If large enough, split off the chunk bordering the end of memory
7188 - (held in av->top). Note that this is in accord with the best-fit
7189 - search rule. In effect, av->top is treated as larger (and thus
7190 - less well fitting) than any other available chunk since it can
7191 - be extended to be as large as necessary (up to system
7192 - limitations).
7194 - We require that av->top always exists (i.e., has size >=
7195 - MINSIZE) after initialization, so if it would otherwise be
7196 - exhuasted by current request, it is replenished. (The main
7197 - reason for ensuring it exists is that we may need MINSIZE space
7198 - to put in fenceposts in sysmalloc.)
7199 - */
7200 + If large enough, split off the chunk bordering the end of memory
7201 + (held in av->top). Note that this is in accord with the best-fit
7202 + search rule. In effect, av->top is treated as larger (and thus
7203 + less well fitting) than any other available chunk since it can
7204 + be extended to be as large as necessary (up to system
7205 + limitations).
7207 + We require that av->top always exists (i.e., has size >=
7208 + MINSIZE) after initialization, so if it would otherwise be
7209 + exhuasted by current request, it is replenished. (The main
7210 + reason for ensuring it exists is that we may need MINSIZE space
7211 + to put in fenceposts in sysmalloc.)
7212 + */
7214 victim = av->top;
7215 size = chunksize(victim);
7217 if ((unsigned long)(size) >= (unsigned long)(nb + MINSIZE)) {
7218 - remainder_size = size - nb;
7219 - remainder = chunk_at_offset(victim, nb);
7220 - av->top = remainder;
7221 - set_head(victim, nb | PREV_INUSE);
7222 - set_head(remainder, remainder_size | PREV_INUSE);
7224 - check_malloced_chunk(victim, nb);
7225 - UNLOCK;
7226 - return chunk2mem(victim);
7227 + remainder_size = size - nb;
7228 + remainder = chunk_at_offset(victim, nb);
7229 + av->top = remainder;
7230 + set_head(victim, nb | PREV_INUSE);
7231 + set_head(remainder, remainder_size | PREV_INUSE);
7233 + check_malloced_chunk(victim, nb);
7234 + retval = chunk2mem(victim);
7235 + goto DONE;
7238 /* If no space in top, relay to handle system-dependent cases */
7239 sysmem = __malloc_alloc(nb, av);
7240 - UNLOCK;
7241 - return sysmem;
7242 + retval = sysmem;
7243 + DONE:
7244 + __MALLOC_UNLOCK;
7245 + return retval;
7248 diff --git a/libc/stdlib/malloc-standard/malloc.h b/libc/stdlib/malloc-standard/malloc.h
7249 index fbc1492..14a0dd9 100644
7250 --- a/libc/stdlib/malloc-standard/malloc.h
7251 +++ b/libc/stdlib/malloc-standard/malloc.h
7252 @@ -22,16 +22,12 @@
7253 #include <malloc.h>
7254 #include <stdlib.h>
7256 +#include <bits/uClibc_mutex.h>
7258 -#ifdef __UCLIBC_HAS_THREADS__
7259 -#include <pthread.h>
7260 -extern pthread_mutex_t __malloc_lock;
7261 -# define LOCK __pthread_mutex_lock(&__malloc_lock)
7262 -# define UNLOCK __pthread_mutex_unlock(&__malloc_lock);
7263 -#else
7264 -# define LOCK
7265 -# define UNLOCK
7266 -#endif
7267 +__UCLIBC_MUTEX_EXTERN(__malloc_lock);
7269 +#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
7270 +#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
7274 diff --git a/libc/stdlib/malloc-standard/mallopt.c b/libc/stdlib/malloc-standard/mallopt.c
7275 index e287920..41aa614 100644
7276 --- a/libc/stdlib/malloc-standard/mallopt.c
7277 +++ b/libc/stdlib/malloc-standard/mallopt.c
7278 @@ -8,7 +8,7 @@
7279 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
7281 Note: There may be an updated version of this malloc obtainable at
7282 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7283 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7284 Check before installing!
7286 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
7287 @@ -25,40 +25,40 @@ int mallopt(int param_number, int value)
7289 ret = 0;
7291 - LOCK;
7292 + __MALLOC_LOCK;
7293 av = get_malloc_state();
7294 /* Ensure initialization/consolidation */
7295 __malloc_consolidate(av);
7297 switch(param_number) {
7298 - case M_MXFAST:
7299 - if (value >= 0 && value <= MAX_FAST_SIZE) {
7300 - set_max_fast(av, value);
7301 - ret = 1;
7303 - break;
7305 - case M_TRIM_THRESHOLD:
7306 - av->trim_threshold = value;
7307 - ret = 1;
7308 - break;
7310 - case M_TOP_PAD:
7311 - av->top_pad = value;
7312 - ret = 1;
7313 - break;
7315 - case M_MMAP_THRESHOLD:
7316 - av->mmap_threshold = value;
7317 - ret = 1;
7318 - break;
7320 - case M_MMAP_MAX:
7321 - av->n_mmaps_max = value;
7322 - ret = 1;
7323 - break;
7324 + case M_MXFAST:
7325 + if (value >= 0 && value <= MAX_FAST_SIZE) {
7326 + set_max_fast(av, value);
7327 + ret = 1;
7329 + break;
7331 + case M_TRIM_THRESHOLD:
7332 + av->trim_threshold = value;
7333 + ret = 1;
7334 + break;
7336 + case M_TOP_PAD:
7337 + av->top_pad = value;
7338 + ret = 1;
7339 + break;
7341 + case M_MMAP_THRESHOLD:
7342 + av->mmap_threshold = value;
7343 + ret = 1;
7344 + break;
7346 + case M_MMAP_MAX:
7347 + av->n_mmaps_max = value;
7348 + ret = 1;
7349 + break;
7351 - UNLOCK;
7352 + __MALLOC_UNLOCK;
7353 return ret;
7356 diff --git a/libc/stdlib/malloc-standard/memalign.c b/libc/stdlib/malloc-standard/memalign.c
7357 index bd95362..e78d752 100644
7358 --- a/libc/stdlib/malloc-standard/memalign.c
7359 +++ b/libc/stdlib/malloc-standard/memalign.c
7360 @@ -8,7 +8,7 @@
7361 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
7363 Note: There may be an updated version of this malloc obtainable at
7364 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7365 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7366 Check before installing!
7368 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
7369 @@ -35,6 +35,7 @@ void* memalign(size_t alignment, size_t
7370 mchunkptr remainder; /* spare room at end to split off */
7371 unsigned long remainder_size; /* its size */
7372 size_t size;
7373 + void *retval;
7375 /* If need less alignment than we give anyway, just relay to malloc */
7377 @@ -46,12 +47,12 @@ void* memalign(size_t alignment, size_t
7379 /* Make sure alignment is power of 2 (in case MINSIZE is not). */
7380 if ((alignment & (alignment - 1)) != 0) {
7381 - size_t a = MALLOC_ALIGNMENT * 2;
7382 - while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
7383 - alignment = a;
7384 + size_t a = MALLOC_ALIGNMENT * 2;
7385 + while ((unsigned long)a < (unsigned long)alignment) a <<= 1;
7386 + alignment = a;
7389 - LOCK;
7390 + __MALLOC_LOCK;
7391 checked_request2size(bytes, nb);
7393 /* Strategy: find a spot within that chunk that meets the alignment
7394 @@ -63,64 +64,67 @@ void* memalign(size_t alignment, size_t
7395 m = (char*)(malloc(nb + alignment + MINSIZE));
7397 if (m == 0) {
7398 - UNLOCK;
7399 - return 0; /* propagate failure */
7400 + retval = 0; /* propagate failure */
7401 + goto DONE;
7404 p = mem2chunk(m);
7406 if ((((unsigned long)(m)) % alignment) != 0) { /* misaligned */
7408 - /*
7409 - Find an aligned spot inside chunk. Since we need to give back
7410 - leading space in a chunk of at least MINSIZE, if the first
7411 - calculation places us at a spot with less than MINSIZE leader,
7412 - we can move to the next aligned spot -- we've allocated enough
7413 - total room so that this is always possible.
7414 - */
7416 - brk = (char*)mem2chunk((unsigned long)(((unsigned long)(m + alignment - 1)) &
7417 - -((signed long) alignment)));
7418 - if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
7419 - brk += alignment;
7421 - newp = (mchunkptr)brk;
7422 - leadsize = brk - (char*)(p);
7423 - newsize = chunksize(p) - leadsize;
7425 - /* For mmapped chunks, just adjust offset */
7426 - if (chunk_is_mmapped(p)) {
7427 - newp->prev_size = p->prev_size + leadsize;
7428 - set_head(newp, newsize|IS_MMAPPED);
7429 - UNLOCK;
7430 - return chunk2mem(newp);
7433 - /* Otherwise, give back leader, use the rest */
7434 - set_head(newp, newsize | PREV_INUSE);
7435 - set_inuse_bit_at_offset(newp, newsize);
7436 - set_head_size(p, leadsize);
7437 - free(chunk2mem(p));
7438 - p = newp;
7439 + /*
7440 + Find an aligned spot inside chunk. Since we need to give back
7441 + leading space in a chunk of at least MINSIZE, if the first
7442 + calculation places us at a spot with less than MINSIZE leader,
7443 + we can move to the next aligned spot -- we've allocated enough
7444 + total room so that this is always possible.
7445 + */
7447 + brk = (char*)mem2chunk((unsigned long)(((unsigned long)(m + alignment - 1)) &
7448 + -((signed long) alignment)));
7449 + if ((unsigned long)(brk - (char*)(p)) < MINSIZE)
7450 + brk += alignment;
7452 + newp = (mchunkptr)brk;
7453 + leadsize = brk - (char*)(p);
7454 + newsize = chunksize(p) - leadsize;
7456 + /* For mmapped chunks, just adjust offset */
7457 + if (chunk_is_mmapped(p)) {
7458 + newp->prev_size = p->prev_size + leadsize;
7459 + set_head(newp, newsize|IS_MMAPPED);
7460 + retval = chunk2mem(newp);
7461 + goto DONE;
7464 + /* Otherwise, give back leader, use the rest */
7465 + set_head(newp, newsize | PREV_INUSE);
7466 + set_inuse_bit_at_offset(newp, newsize);
7467 + set_head_size(p, leadsize);
7468 + free(chunk2mem(p));
7469 + p = newp;
7471 - assert (newsize >= nb &&
7472 - (((unsigned long)(chunk2mem(p))) % alignment) == 0);
7473 + assert (newsize >= nb &&
7474 + (((unsigned long)(chunk2mem(p))) % alignment) == 0);
7477 /* Also give back spare room at the end */
7478 if (!chunk_is_mmapped(p)) {
7479 - size = chunksize(p);
7480 - if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
7481 - remainder_size = size - nb;
7482 - remainder = chunk_at_offset(p, nb);
7483 - set_head(remainder, remainder_size | PREV_INUSE);
7484 - set_head_size(p, nb);
7485 - free(chunk2mem(remainder));
7487 + size = chunksize(p);
7488 + if ((unsigned long)(size) > (unsigned long)(nb + MINSIZE)) {
7489 + remainder_size = size - nb;
7490 + remainder = chunk_at_offset(p, nb);
7491 + set_head(remainder, remainder_size | PREV_INUSE);
7492 + set_head_size(p, nb);
7493 + free(chunk2mem(remainder));
7497 check_inuse_chunk(p);
7498 - UNLOCK;
7499 - return chunk2mem(p);
7500 + retval = chunk2mem(p);
7502 + DONE:
7503 + __MALLOC_UNLOCK;
7504 + return retval;
7507 diff --git a/libc/stdlib/malloc-standard/realloc.c b/libc/stdlib/malloc-standard/realloc.c
7508 index 1950130..9ca4b26 100644
7509 --- a/libc/stdlib/malloc-standard/realloc.c
7510 +++ b/libc/stdlib/malloc-standard/realloc.c
7511 @@ -8,7 +8,7 @@
7512 VERSION 2.7.2 Sat Aug 17 09:07:30 2002 Doug Lea (dl at gee)
7514 Note: There may be an updated version of this malloc obtainable at
7515 - ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7516 + ftp://gee.cs.oswego.edu/pub/misc/malloc.c
7517 Check before installing!
7519 Hacked up for uClibc by Erik Andersen <andersen@codepoet.org>
7520 @@ -23,14 +23,14 @@ void* realloc(void* oldmem, size_t bytes
7522 mstate av;
7524 - size_t nb; /* padded request size */
7525 + size_t nb; /* padded request size */
7527 mchunkptr oldp; /* chunk corresponding to oldmem */
7528 - size_t oldsize; /* its size */
7529 + size_t oldsize; /* its size */
7531 mchunkptr newp; /* chunk to return */
7532 - size_t newsize; /* its size */
7533 - void* newmem; /* corresponding user mem */
7534 + size_t newsize; /* its size */
7535 + void* newmem; /* corresponding user mem */
7537 mchunkptr next; /* next contiguous chunk after oldp */
7539 @@ -40,21 +40,23 @@ void* realloc(void* oldmem, size_t bytes
7540 mchunkptr bck; /* misc temp for linking */
7541 mchunkptr fwd; /* misc temp for linking */
7543 - unsigned long copysize; /* bytes to copy */
7544 + unsigned long copysize; /* bytes to copy */
7545 unsigned int ncopies; /* size_t words to copy */
7546 - size_t* s; /* copy source */
7547 - size_t* d; /* copy destination */
7548 + size_t* s; /* copy source */
7549 + size_t* d; /* copy destination */
7551 + void *retval;
7554 /* Check for special cases. */
7555 if (! oldmem)
7556 - return malloc(bytes);
7557 + return malloc(bytes);
7558 if (! bytes) {
7559 - free (oldmem);
7560 - return malloc(bytes);
7561 + free (oldmem);
7562 + return malloc(bytes);
7565 - LOCK;
7566 + __MALLOC_LOCK;
7567 av = get_malloc_state();
7568 checked_request2size(bytes, nb);
7570 @@ -65,173 +67,176 @@ void* realloc(void* oldmem, size_t bytes
7572 if (!chunk_is_mmapped(oldp)) {
7574 - if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
7575 - /* already big enough; split below */
7576 - newp = oldp;
7577 - newsize = oldsize;
7580 - else {
7581 - next = chunk_at_offset(oldp, oldsize);
7583 - /* Try to expand forward into top */
7584 - if (next == av->top &&
7585 - (unsigned long)(newsize = oldsize + chunksize(next)) >=
7586 - (unsigned long)(nb + MINSIZE)) {
7587 - set_head_size(oldp, nb);
7588 - av->top = chunk_at_offset(oldp, nb);
7589 - set_head(av->top, (newsize - nb) | PREV_INUSE);
7590 - UNLOCK;
7591 - return chunk2mem(oldp);
7594 - /* Try to expand forward into next chunk; split off remainder below */
7595 - else if (next != av->top &&
7596 - !inuse(next) &&
7597 - (unsigned long)(newsize = oldsize + chunksize(next)) >=
7598 - (unsigned long)(nb)) {
7599 - newp = oldp;
7600 - unlink(next, bck, fwd);
7603 - /* allocate, copy, free */
7604 - else {
7605 - newmem = malloc(nb - MALLOC_ALIGN_MASK);
7606 - if (newmem == 0) {
7607 - UNLOCK;
7608 - return 0; /* propagate failure */
7611 - newp = mem2chunk(newmem);
7612 - newsize = chunksize(newp);
7614 - /*
7615 - Avoid copy if newp is next chunk after oldp.
7616 - */
7617 - if (newp == next) {
7618 - newsize += oldsize;
7619 - newp = oldp;
7620 + if ((unsigned long)(oldsize) >= (unsigned long)(nb)) {
7621 + /* already big enough; split below */
7622 + newp = oldp;
7623 + newsize = oldsize;
7626 else {
7627 - /*
7628 - Unroll copy of <= 36 bytes (72 if 8byte sizes)
7629 - We know that contents have an odd number of
7630 - size_t-sized words; minimally 3.
7631 - */
7633 - copysize = oldsize - (sizeof(size_t));
7634 - s = (size_t*)(oldmem);
7635 - d = (size_t*)(newmem);
7636 - ncopies = copysize / sizeof(size_t);
7637 - assert(ncopies >= 3);
7639 - if (ncopies > 9)
7640 - memcpy(d, s, copysize);
7642 - else {
7643 - *(d+0) = *(s+0);
7644 - *(d+1) = *(s+1);
7645 - *(d+2) = *(s+2);
7646 - if (ncopies > 4) {
7647 - *(d+3) = *(s+3);
7648 - *(d+4) = *(s+4);
7649 - if (ncopies > 6) {
7650 - *(d+5) = *(s+5);
7651 - *(d+6) = *(s+6);
7652 - if (ncopies > 8) {
7653 - *(d+7) = *(s+7);
7654 - *(d+8) = *(s+8);
7655 + next = chunk_at_offset(oldp, oldsize);
7657 + /* Try to expand forward into top */
7658 + if (next == av->top &&
7659 + (unsigned long)(newsize = oldsize + chunksize(next)) >=
7660 + (unsigned long)(nb + MINSIZE)) {
7661 + set_head_size(oldp, nb);
7662 + av->top = chunk_at_offset(oldp, nb);
7663 + set_head(av->top, (newsize - nb) | PREV_INUSE);
7664 + retval = chunk2mem(oldp);
7665 + goto DONE;
7668 + /* Try to expand forward into next chunk; split off remainder below */
7669 + else if (next != av->top &&
7670 + !inuse(next) &&
7671 + (unsigned long)(newsize = oldsize + chunksize(next)) >=
7672 + (unsigned long)(nb)) {
7673 + newp = oldp;
7674 + unlink(next, bck, fwd);
7677 + /* allocate, copy, free */
7678 + else {
7679 + newmem = malloc(nb - MALLOC_ALIGN_MASK);
7680 + if (newmem == 0) {
7681 + retval = 0; /* propagate failure */
7682 + goto DONE;
7685 + newp = mem2chunk(newmem);
7686 + newsize = chunksize(newp);
7688 + /*
7689 + Avoid copy if newp is next chunk after oldp.
7690 + */
7691 + if (newp == next) {
7692 + newsize += oldsize;
7693 + newp = oldp;
7695 + else {
7696 + /*
7697 + Unroll copy of <= 36 bytes (72 if 8byte sizes)
7698 + We know that contents have an odd number of
7699 + size_t-sized words; minimally 3.
7700 + */
7702 + copysize = oldsize - (sizeof(size_t));
7703 + s = (size_t*)(oldmem);
7704 + d = (size_t*)(newmem);
7705 + ncopies = copysize / sizeof(size_t);
7706 + assert(ncopies >= 3);
7708 + if (ncopies > 9)
7709 + memcpy(d, s, copysize);
7711 + else {
7712 + *(d+0) = *(s+0);
7713 + *(d+1) = *(s+1);
7714 + *(d+2) = *(s+2);
7715 + if (ncopies > 4) {
7716 + *(d+3) = *(s+3);
7717 + *(d+4) = *(s+4);
7718 + if (ncopies > 6) {
7719 + *(d+5) = *(s+5);
7720 + *(d+6) = *(s+6);
7721 + if (ncopies > 8) {
7722 + *(d+7) = *(s+7);
7723 + *(d+8) = *(s+8);
7729 + free(oldmem);
7730 + check_inuse_chunk(newp);
7731 + retval = chunk2mem(newp);
7732 + goto DONE;
7739 + /* If possible, free extra space in old or extended chunk */
7741 + assert((unsigned long)(newsize) >= (unsigned long)(nb));
7743 + remainder_size = newsize - nb;
7745 - free(oldmem);
7746 - check_inuse_chunk(newp);
7747 - UNLOCK;
7748 - return chunk2mem(newp);
7753 - /* If possible, free extra space in old or extended chunk */
7755 - assert((unsigned long)(newsize) >= (unsigned long)(nb));
7757 - remainder_size = newsize - nb;
7759 - if (remainder_size < MINSIZE) { /* not enough extra to split off */
7760 - set_head_size(newp, newsize);
7761 - set_inuse_bit_at_offset(newp, newsize);
7763 - else { /* split remainder */
7764 - remainder = chunk_at_offset(newp, nb);
7765 - set_head_size(newp, nb);
7766 - set_head(remainder, remainder_size | PREV_INUSE);
7767 - /* Mark remainder as inuse so free() won't complain */
7768 - set_inuse_bit_at_offset(remainder, remainder_size);
7769 - free(chunk2mem(remainder));
7772 - check_inuse_chunk(newp);
7773 - UNLOCK;
7774 - return chunk2mem(newp);
7775 + if (remainder_size < MINSIZE) { /* not enough extra to split off */
7776 + set_head_size(newp, newsize);
7777 + set_inuse_bit_at_offset(newp, newsize);
7779 + else { /* split remainder */
7780 + remainder = chunk_at_offset(newp, nb);
7781 + set_head_size(newp, nb);
7782 + set_head(remainder, remainder_size | PREV_INUSE);
7783 + /* Mark remainder as inuse so free() won't complain */
7784 + set_inuse_bit_at_offset(remainder, remainder_size);
7785 + free(chunk2mem(remainder));
7788 + check_inuse_chunk(newp);
7789 + retval = chunk2mem(newp);
7790 + goto DONE;
7794 - Handle mmap cases
7795 - */
7796 + Handle mmap cases
7797 + */
7799 else {
7800 - size_t offset = oldp->prev_size;
7801 - size_t pagemask = av->pagesize - 1;
7802 - char *cp;
7803 - unsigned long sum;
7805 - /* Note the extra (sizeof(size_t)) overhead */
7806 - newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
7808 - /* don't need to remap if still within same page */
7809 - if (oldsize == newsize - offset) {
7810 - UNLOCK;
7811 - return oldmem;
7814 - cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
7816 - if (cp != (char*)MORECORE_FAILURE) {
7818 - newp = (mchunkptr)(cp + offset);
7819 - set_head(newp, (newsize - offset)|IS_MMAPPED);
7821 - assert(aligned_OK(chunk2mem(newp)));
7822 - assert((newp->prev_size == offset));
7824 - /* update statistics */
7825 - sum = av->mmapped_mem += newsize - oldsize;
7826 - if (sum > (unsigned long)(av->max_mmapped_mem))
7827 - av->max_mmapped_mem = sum;
7828 - sum += av->sbrked_mem;
7829 - if (sum > (unsigned long)(av->max_total_mem))
7830 - av->max_total_mem = sum;
7832 - UNLOCK;
7833 - return chunk2mem(newp);
7836 - /* Note the extra (sizeof(size_t)) overhead. */
7837 - if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
7838 - newmem = oldmem; /* do nothing */
7839 - else {
7840 - /* Must alloc, copy, free. */
7841 - newmem = malloc(nb - MALLOC_ALIGN_MASK);
7842 - if (newmem != 0) {
7843 - memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
7844 - free(oldmem);
7847 - UNLOCK;
7848 - return newmem;
7849 + size_t offset = oldp->prev_size;
7850 + size_t pagemask = av->pagesize - 1;
7851 + char *cp;
7852 + unsigned long sum;
7854 + /* Note the extra (sizeof(size_t)) overhead */
7855 + newsize = (nb + offset + (sizeof(size_t)) + pagemask) & ~pagemask;
7857 + /* don't need to remap if still within same page */
7858 + if (oldsize == newsize - offset) {
7859 + retval = oldmem;
7860 + goto DONE;
7863 + cp = (char*)mremap((char*)oldp - offset, oldsize + offset, newsize, 1);
7865 + if (cp != (char*)MORECORE_FAILURE) {
7867 + newp = (mchunkptr)(cp + offset);
7868 + set_head(newp, (newsize - offset)|IS_MMAPPED);
7870 + assert(aligned_OK(chunk2mem(newp)));
7871 + assert((newp->prev_size == offset));
7873 + /* update statistics */
7874 + sum = av->mmapped_mem += newsize - oldsize;
7875 + if (sum > (unsigned long)(av->max_mmapped_mem))
7876 + av->max_mmapped_mem = sum;
7877 + sum += av->sbrked_mem;
7878 + if (sum > (unsigned long)(av->max_total_mem))
7879 + av->max_total_mem = sum;
7881 + retval = chunk2mem(newp);
7882 + goto DONE;
7885 + /* Note the extra (sizeof(size_t)) overhead. */
7886 + if ((unsigned long)(oldsize) >= (unsigned long)(nb + (sizeof(size_t))))
7887 + newmem = oldmem; /* do nothing */
7888 + else {
7889 + /* Must alloc, copy, free. */
7890 + newmem = malloc(nb - MALLOC_ALIGN_MASK);
7891 + if (newmem != 0) {
7892 + memcpy(newmem, oldmem, oldsize - 2*(sizeof(size_t)));
7893 + free(oldmem);
7896 + retval = newmem;
7899 + DONE:
7900 + __MALLOC_UNLOCK;
7901 + return retval;
7904 diff --git a/libc/stdlib/random.c b/libc/stdlib/random.c
7905 index b0a00e1..1bd63bc 100644
7906 --- a/libc/stdlib/random.c
7907 +++ b/libc/stdlib/random.c
7908 @@ -27,16 +27,14 @@
7909 #include <limits.h>
7910 #include <stddef.h>
7911 #include <stdlib.h>
7912 -#ifdef __UCLIBC_HAS_THREADS__
7913 -#include <pthread.h>
7915 /* POSIX.1c requires that there is mutual exclusion for the `rand' and
7916 `srand' functions to prevent concurrent calls from modifying common
7917 data. */
7918 -static pthread_mutex_t lock = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
7919 -#else
7920 -#define __pthread_mutex_lock(x)
7921 -#define __pthread_mutex_unlock(x)
7922 -#endif
7924 +#include <bits/uClibc_mutex.h>
7926 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP);
7928 /* An improved random number generation package. In addition to the standard
7929 rand()/srand() like interface, this package also has a special state info
7930 @@ -184,9 +182,9 @@ static struct random_data unsafe_state =
7931 for default usage relies on values produced by this routine. */
7932 void srandom (unsigned int x)
7934 - __pthread_mutex_lock(&lock);
7935 + __UCLIBC_MUTEX_LOCK(mylock);
7936 srandom_r (x, &unsafe_state);
7937 - __pthread_mutex_unlock(&lock);
7938 + __UCLIBC_MUTEX_UNLOCK(mylock);
7940 weak_alias (srandom, srand)
7942 @@ -205,10 +203,10 @@ char * initstate (unsigned int seed, cha
7944 int32_t *ostate;
7946 - __pthread_mutex_lock(&lock);
7947 + __UCLIBC_MUTEX_LOCK(mylock);
7948 ostate = &unsafe_state.state[-1];
7949 initstate_r (seed, arg_state, n, &unsafe_state);
7950 - __pthread_mutex_unlock(&lock);
7951 + __UCLIBC_MUTEX_UNLOCK(mylock);
7952 return (char *) ostate;
7955 @@ -224,11 +222,11 @@ char * setstate (char *arg_state)
7957 int32_t *ostate;
7959 - __pthread_mutex_lock(&lock);
7960 + __UCLIBC_MUTEX_LOCK(mylock);
7961 ostate = &unsafe_state.state[-1];
7962 if (setstate_r (arg_state, &unsafe_state) < 0)
7963 ostate = NULL;
7964 - __pthread_mutex_unlock(&lock);
7965 + __UCLIBC_MUTEX_UNLOCK(mylock);
7966 return (char *) ostate;
7969 @@ -247,9 +245,9 @@ long int random ()
7971 int32_t retval;
7973 - __pthread_mutex_lock(&lock);
7974 + __UCLIBC_MUTEX_LOCK(mylock);
7975 random_r (&unsafe_state, &retval);
7976 - __pthread_mutex_unlock(&lock);
7977 + __UCLIBC_MUTEX_UNLOCK(mylock);
7978 return retval;
7981 diff --git a/libc/stdlib/setenv.c b/libc/stdlib/setenv.c
7982 index d0cfe52..2d899cc 100644
7983 --- a/libc/stdlib/setenv.c
7984 +++ b/libc/stdlib/setenv.c
7985 @@ -17,7 +17,7 @@
7986 02111-1307 USA.
7988 modified for uClibc by Erik Andersen <andersen@codepoet.org>
7989 - */
7992 #define _GNU_SOURCE
7993 #include <features.h>
7994 @@ -26,16 +26,9 @@
7995 #include <string.h>
7996 #include <unistd.h>
7998 -#ifdef __UCLIBC_HAS_THREADS__
7999 -#include <pthread.h>
8000 -static pthread_mutex_t mylock = PTHREAD_MUTEX_INITIALIZER;
8001 -# define LOCK __pthread_mutex_lock(&mylock)
8002 -# define UNLOCK __pthread_mutex_unlock(&mylock);
8003 -#else
8004 -# define LOCK
8005 -# define UNLOCK
8006 -#endif
8007 +#include <bits/uClibc_mutex.h>
8009 +__UCLIBC_MUTEX_STATIC(mylock, PTHREAD_MUTEX_INITIALIZER);
8011 /* If this variable is not a null pointer we allocated the current
8012 environment. */
8013 @@ -49,14 +42,15 @@ static char **last_environ;
8014 to reuse values once generated for a `setenv' call since we can never
8015 free the strings. */
8016 int __add_to_environ (const char *name, const char *value,
8017 - const char *combined, int replace)
8018 + const char *combined, int replace)
8020 register char **ep;
8021 register size_t size;
8022 const size_t namelen = strlen (name);
8023 const size_t vallen = value != NULL ? strlen (value) + 1 : 0;
8024 + int rv = -1;
8026 - LOCK;
8027 + __UCLIBC_MUTEX_LOCK(mylock);
8029 /* We have to get the pointer now that we have the lock and not earlier
8030 since another thread might have created a new environment. */
8031 @@ -64,72 +58,72 @@ int __add_to_environ (const char *name,
8033 size = 0;
8034 if (ep != NULL) {
8035 - for (; *ep != NULL; ++ep) {
8036 - if (!strncmp (*ep, name, namelen) && (*ep)[namelen] == '=')
8037 - break;
8038 - else
8039 - ++size;
8041 + for (; *ep != NULL; ++ep) {
8042 + if (!strncmp (*ep, name, namelen) && (*ep)[namelen] == '=')
8043 + break;
8044 + else
8045 + ++size;
8049 if (ep == NULL || *ep == NULL) {
8050 - char **new_environ;
8051 + char **new_environ;
8053 - /* We allocated this space; we can extend it. */
8054 - new_environ = (char **) realloc (last_environ,
8055 - (size + 2) * sizeof (char *));
8056 - if (new_environ == NULL) {
8057 - UNLOCK;
8058 - return -1;
8061 - /* If the whole entry is given add it. */
8062 - if (combined != NULL) {
8063 - /* We must not add the string to the search tree since it belongs
8064 - to the user. */
8065 - new_environ[size] = (char *) combined;
8066 - } else {
8067 - /* See whether the value is already known. */
8068 - new_environ[size] = (char *) malloc (namelen + 1 + vallen);
8069 - if (new_environ[size] == NULL) {
8070 - __set_errno (ENOMEM);
8071 - UNLOCK;
8072 - return -1;
8075 - memcpy (new_environ[size], name, namelen);
8076 - new_environ[size][namelen] = '=';
8077 - memcpy (&new_environ[size][namelen + 1], value, vallen);
8080 - if (__environ != last_environ) {
8081 - memcpy ((char *) new_environ, (char *) __environ,
8082 - size * sizeof (char *));
8084 + /* We allocated this space; we can extend it. */
8085 + new_environ = (char **) realloc (last_environ,
8086 + (size + 2) * sizeof (char *));
8087 + if (new_environ == NULL) {
8088 + goto DONE;
8091 + /* If the whole entry is given add it. */
8092 + if (combined != NULL) {
8093 + /* We must not add the string to the search tree since it belongs
8094 + to the user. */
8095 + new_environ[size] = (char *) combined;
8096 + } else {
8097 + /* See whether the value is already known. */
8098 + new_environ[size] = (char *) malloc (namelen + 1 + vallen);
8099 + if (new_environ[size] == NULL) {
8100 + __set_errno (ENOMEM);
8101 + goto DONE;
8104 + memcpy (new_environ[size], name, namelen);
8105 + new_environ[size][namelen] = '=';
8106 + memcpy (&new_environ[size][namelen + 1], value, vallen);
8109 + if (__environ != last_environ) {
8110 + memcpy ((char *) new_environ, (char *) __environ,
8111 + size * sizeof (char *));
8114 - new_environ[size + 1] = NULL;
8115 - last_environ = __environ = new_environ;
8116 + new_environ[size + 1] = NULL;
8117 + last_environ = __environ = new_environ;
8118 } else if (replace) {
8119 - char *np;
8120 + char *np;
8122 - /* Use the user string if given. */
8123 - if (combined != NULL) {
8124 - np = (char *) combined;
8125 - } else {
8126 - np = malloc (namelen + 1 + vallen);
8127 - if (np == NULL) {
8128 - UNLOCK;
8129 - return -1;
8131 - memcpy (np, name, namelen);
8132 - np[namelen] = '=';
8133 - memcpy (&np[namelen + 1], value, vallen);
8135 - *ep = np;
8138 - UNLOCK;
8139 - return 0;
8140 + /* Use the user string if given. */
8141 + if (combined != NULL) {
8142 + np = (char *) combined;
8143 + } else {
8144 + np = malloc (namelen + 1 + vallen);
8145 + if (np == NULL) {
8146 + goto DONE;
8148 + memcpy (np, name, namelen);
8149 + np[namelen] = '=';
8150 + memcpy (&np[namelen + 1], value, vallen);
8152 + *ep = np;
8155 + rv = 0;
8157 + DONE:
8158 + __UCLIBC_MUTEX_UNLOCK(mylock);
8159 + return rv;
8162 int setenv (const char *name, const char *value, int replace)
8163 @@ -143,26 +137,26 @@ int unsetenv (const char *name)
8164 char **ep;
8166 if (name == NULL || *name == '\0' || strchr (name, '=') != NULL) {
8167 - __set_errno (EINVAL);
8168 - return -1;
8169 + __set_errno (EINVAL);
8170 + return -1;
8173 len = strlen (name);
8174 - LOCK;
8175 + __UCLIBC_MUTEX_LOCK(mylock);
8176 ep = __environ;
8177 while (*ep != NULL) {
8178 - if (!strncmp (*ep, name, len) && (*ep)[len] == '=') {
8179 - /* Found it. Remove this pointer by moving later ones back. */
8180 - char **dp = ep;
8181 - do {
8182 - dp[0] = dp[1];
8183 - } while (*dp++);
8184 - /* Continue the loop in case NAME appears again. */
8185 - } else {
8186 - ++ep;
8188 + if (!strncmp (*ep, name, len) && (*ep)[len] == '=') {
8189 + /* Found it. Remove this pointer by moving later ones back. */
8190 + char **dp = ep;
8191 + do {
8192 + dp[0] = dp[1];
8193 + } while (*dp++);
8194 + /* Continue the loop in case NAME appears again. */
8195 + } else {
8196 + ++ep;
8199 - UNLOCK;
8200 + __UCLIBC_MUTEX_UNLOCK(mylock);
8201 return 0;
8204 @@ -171,15 +165,15 @@ int unsetenv (const char *name)
8205 for Fortran 77) requires this function. */
8206 int clearenv (void)
8208 - LOCK;
8209 + __UCLIBC_MUTEX_LOCK(mylock);
8210 if (__environ == last_environ && __environ != NULL) {
8211 - /* We allocated this environment so we can free it. */
8212 - free (__environ);
8213 - last_environ = NULL;
8214 + /* We allocated this environment so we can free it. */
8215 + free (__environ);
8216 + last_environ = NULL;
8218 /* Clear the environment pointer removes the whole environment. */
8219 __environ = NULL;
8220 - UNLOCK;
8221 + __UCLIBC_MUTEX_UNLOCK(mylock);
8222 return 0;
8225 @@ -190,10 +184,10 @@ int putenv (char *string)
8226 const char *const name_end = strchr (string, '=');
8228 if (name_end != NULL) {
8229 - char *name = strndup(string, name_end - string);
8230 - result = __add_to_environ (name, NULL, string, 1);
8231 - free(name);
8232 - return(result);
8233 + char *name = strndup(string, name_end - string);
8234 + result = __add_to_environ (name, NULL, string, 1);
8235 + free(name);
8236 + return(result);
8238 unsetenv (string);
8239 return 0;
8240 diff --git a/libc/sysdeps/linux/common/bits/uClibc_stdio.h b/libc/sysdeps/linux/common/bits/uClibc_stdio.h
8241 index 40cd5fe..3c6911e 100644
8242 --- a/libc/sysdeps/linux/common/bits/uClibc_stdio.h
8243 +++ b/libc/sysdeps/linux/common/bits/uClibc_stdio.h
8244 @@ -116,9 +116,7 @@
8245 #endif
8247 /**********************************************************************/
8248 -#ifdef __UCLIBC_HAS_THREADS__
8249 -/* Need this for pthread_mutex_t. */
8250 -#include <bits/pthreadtypes.h>
8251 +#include <bits/uClibc_mutex.h>
8253 /* user_locking
8254 * 0 : do auto locking/unlocking
8255 @@ -132,43 +130,37 @@
8256 * This way, we avoid calling the weak lock/unlock functions.
8259 -#define __STDIO_AUTO_THREADLOCK_VAR int __infunc_user_locking
8261 -#define __STDIO_AUTO_THREADLOCK(__stream) \
8262 - if ((__infunc_user_locking = (__stream)->__user_locking) == 0) { \
8263 - __pthread_mutex_lock(&(__stream)->__lock); \
8266 -#define __STDIO_AUTO_THREADUNLOCK(__stream) \
8267 - if (__infunc_user_locking == 0) { \
8268 - __pthread_mutex_unlock(&(__stream)->__lock); \
8270 +#define __STDIO_AUTO_THREADLOCK_VAR \
8271 + __UCLIBC_MUTEX_AUTO_LOCK_VAR(__infunc_user_locking)
8273 -#define __STDIO_SET_USER_LOCKING(__stream) ((__stream)->__user_locking = 1)
8274 +#define __STDIO_AUTO_THREADLOCK(__stream) \
8275 + __UCLIBC_MUTEX_AUTO_LOCK((__stream)->__lock, __infunc_user_locking, \
8276 + (__stream)->__user_locking)
8278 -#define __STDIO_ALWAYS_THREADLOCK(__stream) \
8279 - __pthread_mutex_lock(&(__stream)->__lock)
8280 +#define __STDIO_AUTO_THREADUNLOCK(__stream) \
8281 + __UCLIBC_MUTEX_AUTO_UNLOCK((__stream)->__lock, __infunc_user_locking)
8283 -#define __STDIO_ALWAYS_THREADTRYLOCK(__stream) \
8284 - __pthread_mutex_trylock(&(__stream)->__lock)
8285 +#define __STDIO_ALWAYS_THREADLOCK(__stream) \
8286 + __UCLIBC_MUTEX_LOCK((__stream)->__lock)
8288 -#define __STDIO_ALWAYS_THREADUNLOCK(__stream) \
8289 - __pthread_mutex_unlock(&(__stream)->__lock)
8290 +#define __STDIO_ALWAYS_THREADUNLOCK(__stream) \
8291 + __UCLIBC_MUTEX_UNLOCK((__stream)->__lock)
8293 -#else /* __UCLIBC_HAS_THREADS__ */
8294 +#define __STDIO_ALWAYS_THREADLOCK_CANCEL_UNSAFE(__stream) \
8295 + __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE((__stream)->__lock)
8297 -#define __STDIO_AUTO_THREADLOCK_VAR ((void)0)
8298 +#define __STDIO_ALWAYS_THREADTRYLOCK_CANCEL_UNSAFE(__stream) \
8299 + __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE((__stream)->__lock)
8301 -#define __STDIO_AUTO_THREADLOCK(__stream) ((void)0)
8302 -#define __STDIO_AUTO_THREADUNLOCK(__stream) ((void)0)
8303 +#define __STDIO_ALWAYS_THREADUNLOCK_CANCEL_UNSAFE(__stream) \
8304 + __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE((__stream)->__lock)
8306 +#ifdef __UCLIBC_HAS_THREADS__
8307 +#define __STDIO_SET_USER_LOCKING(__stream) ((__stream)->__user_locking = 1)
8308 +#else
8309 #define __STDIO_SET_USER_LOCKING(__stream) ((void)0)
8310 +#endif
8312 -#define __STDIO_ALWAYS_THREADLOCK(__stream) ((void)0)
8313 -#define __STDIO_ALWAYS_THREADTRYLOCK(__stream) (0) /* Always succeed. */
8314 -#define __STDIO_ALWAYS_THREADUNLOCK(__stream) ((void)0)
8316 -#endif /* __UCLIBC_HAS_THREADS__ */
8317 /**********************************************************************/
8319 #define __STDIO_IOFBF 0 /* Fully buffered. */
8320 @@ -283,7 +275,7 @@ struct __STDIO_FILE_STRUCT {
8321 #endif
8322 #ifdef __UCLIBC_HAS_THREADS__
8323 int __user_locking;
8324 - pthread_mutex_t __lock;
8325 + __UCLIBC_MUTEX(__lock);
8326 #endif
8327 /* Everything after this is unimplemented... and may be trashed. */
8328 #if __STDIO_BUILTIN_BUF_SIZE > 0
8329 @@ -358,10 +350,14 @@ extern void _stdio_term(void);
8330 extern struct __STDIO_FILE_STRUCT *_stdio_openlist;
8332 #ifdef __UCLIBC_HAS_THREADS__
8333 -extern pthread_mutex_t _stdio_openlist_lock;
8334 -extern int _stdio_openlist_delflag;
8335 +__UCLIBC_MUTEX_EXTERN(_stdio_openlist_add_lock);
8336 +#ifdef __STDIO_BUFFERS
8337 +__UCLIBC_MUTEX_EXTERN(_stdio_openlist_del_lock);
8338 +extern volatile int _stdio_openlist_use_count; /* _stdio_openlist_del_lock */
8339 +extern int _stdio_openlist_del_count; /* _stdio_openlist_del_lock */
8340 +#endif
8341 extern int _stdio_user_locking;
8342 -extern void __stdio_init_mutex(pthread_mutex_t *m);
8343 +extern void __stdio_init_mutex(__UCLIBC_MUTEX_TYPE *m);
8344 #endif
8346 #endif
8347 diff --git a/libc/sysdeps/linux/common/getdents.c b/libc/sysdeps/linux/common/getdents.c
8348 index ab6a276..23463e5 100644
8349 --- a/libc/sysdeps/linux/common/getdents.c
8350 +++ b/libc/sysdeps/linux/common/getdents.c
8351 @@ -30,8 +30,6 @@
8352 #include <sys/syscall.h>
8355 -#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
8357 struct kernel_dirent
8359 long d_ino;
8360 diff --git a/libc/sysdeps/linux/common/sigprocmask.c b/libc/sysdeps/linux/common/sigprocmask.c
8361 index 70ff366..565318d 100644
8362 --- a/libc/sysdeps/linux/common/sigprocmask.c
8363 +++ b/libc/sysdeps/linux/common/sigprocmask.c
8364 @@ -23,6 +23,8 @@ int sigprocmask(int how, const sigset_t
8365 if (set &&
8366 #if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2)
8367 (((unsigned int) how) > 2)
8368 +#elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3)
8369 + (((unsigned int)(how-1)) > 2)
8370 #else
8371 #warning "compile time assumption violated.. slow path..."
8372 ((how != SIG_BLOCK) && (how != SIG_UNBLOCK)
8373 @@ -48,6 +50,8 @@ int sigprocmask(int how, const sigset_t
8374 if (set &&
8375 #if (SIG_BLOCK == 0) && (SIG_UNBLOCK == 1) && (SIG_SETMASK == 2)
8376 (((unsigned int) how) > 2)
8377 +#elif (SIG_BLOCK == 1) && (SIG_UNBLOCK == 2) && (SIG_SETMASK == 3)
8378 + (((unsigned int)(how-1)) > 2)
8379 #else
8380 #warning "compile time assumption violated.. slow path..."
8381 ((how != SIG_BLOCK) && (how != SIG_UNBLOCK)
8382 diff --git a/libc/sysdeps/linux/mips/bits/kernel_sigaction.h b/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
8383 index b6f52cc..317e5b3 100644
8384 --- a/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
8385 +++ b/libc/sysdeps/linux/mips/bits/kernel_sigaction.h
8386 @@ -38,3 +38,6 @@ struct kernel_sigaction {
8387 void (*sa_restorer)(void);
8388 int s_resv[1]; /* reserved */
8391 +extern int __syscall_rt_sigaction (int, const struct kernel_sigaction *__unbounded,
8392 + struct kernel_sigaction *__unbounded, size_t);
8393 diff --git a/libc/sysdeps/linux/mips/pipe.S b/libc/sysdeps/linux/mips/pipe.S
8394 index c3afae5..cd88074 100644
8395 --- a/libc/sysdeps/linux/mips/pipe.S
8396 +++ b/libc/sysdeps/linux/mips/pipe.S
8397 @@ -7,25 +7,36 @@
8398 #include <asm/unistd.h>
8399 #include <asm/regdef.h>
8401 - .globl pipe
8402 - .ent pipe, 0
8403 + .globl pipe
8404 + .ent pipe, 0
8405 pipe:
8406 - addiu sp,sp,-24
8407 - sw a0,16(sp)
8408 - li v0,__NR_pipe
8409 - syscall
8410 - beqz a3, 1f
8411 - la t3, errno
8412 - sw v0, (t3)
8413 - li v0, -1
8414 - b 2f
8415 + .frame sp, 24, sp
8416 +#ifdef __PIC__
8417 + .set noreorder
8418 + .cpload $25
8419 + .set reorder
8420 + addiu sp,sp,-24
8421 + .cprestore 16
8422 +#else
8423 + addiu sp,sp,-24
8424 +#endif
8425 + sw a0,16(sp)
8426 + li v0,__NR_pipe
8427 + syscall
8428 + beqz a3, 1f
8429 +#ifdef __PIC__
8430 + la t0, __syscall_error
8431 + jr t9
8432 +#else
8433 + j __syscall_error
8434 +#endif
8436 - lw a0, 16(sp)
8437 - sw v0, 0(a0)
8438 - sw v1, 4(a0)
8439 - li v0, 0
8440 + lw a0, 16(sp)
8441 + sw v0, 0(a0)
8442 + sw v1, 4(a0)
8443 + li v0, 0
8445 - addiu sp,sp,24
8446 - j ra
8447 - .end pipe
8448 - .size pipe,.-pipe
8449 + addiu sp,sp,24
8450 + j ra
8451 + .end pipe
8452 + .size pipe,.-pipe
8453 diff --git a/libcrypt/des.c b/libcrypt/des.c
8454 index 3b49a7a..f7a6be1 100644
8455 --- a/libcrypt/des.c
8456 +++ b/libcrypt/des.c
8457 @@ -504,7 +504,7 @@ do_des( u_int32_t l_in, u_int32_t r_in,
8458 kl = kl1;
8459 kr = kr1;
8460 round = 16;
8461 - while (round--) {
8462 + do {
8464 * Expand R to 48 bits (simulate the E-box).
8466 @@ -540,7 +540,7 @@ do_des( u_int32_t l_in, u_int32_t r_in,
8467 f ^= l;
8468 l = r;
8469 r = f;
8471 + } while (--round);
8472 r = l;
8473 l = f;
8475 diff --git a/libpthread/linuxthreads/ptfork.c b/libpthread/linuxthreads/ptfork.c
8476 index eb544f3..cfec2b7 100644
8477 --- a/libpthread/linuxthreads/ptfork.c
8478 +++ b/libpthread/linuxthreads/ptfork.c
8479 @@ -26,6 +26,15 @@
8480 #include "pthread.h"
8481 #include "internals.h"
8483 +#warning hack alert... should be sufficent for system(), but what about other libc mutexes?
8484 +#include <bits/uClibc_mutex.h>
8486 +__UCLIBC_MUTEX_EXTERN(__malloc_lock);
8488 +#define __MALLOC_LOCK __UCLIBC_MUTEX_LOCK(__malloc_lock)
8489 +#define __MALLOC_UNLOCK __UCLIBC_MUTEX_UNLOCK(__malloc_lock)
8490 +#warning hack alert block end
8492 struct handler_list {
8493 void (*handler)(void);
8494 struct handler_list * next;
8495 @@ -91,9 +100,18 @@ pid_t __fork(void)
8496 parent = pthread_atfork_parent;
8497 pthread_mutex_unlock(&pthread_atfork_lock);
8498 pthread_call_handlers(prepare);
8500 +#warning hack alert
8501 + __MALLOC_LOCK;
8503 pid = __libc_fork();
8505 +#warning hack alert
8506 + __MALLOC_UNLOCK;
8508 if (pid == 0) {
8509 __pthread_reset_main_thread();
8510 +#warning need to reconsider __fresetlockfiles!
8511 __fresetlockfiles();
8512 pthread_call_handlers(child);
8513 } else {
8514 diff -urN -x .git uClibc-0.9.28/libc/sysdeps/linux/common/bits/uClibc_mutex.h uClibc-mjn3/libc/sysdeps/linux/common/bits/uClibc_mutex.h
8515 --- uClibc-0.9.28/libc/sysdeps/linux/common/bits/uClibc_mutex.h 1969-12-31 17:00:00.000000000 -0700
8516 +++ uClibc-mjn3/libc/sysdeps/linux/common/bits/uClibc_mutex.h 2006-03-08 11:21:58.000000000 -0700
8517 @@ -0,0 +1,87 @@
8518 +/* Copyright (C) 2006 Manuel Novoa III <mjn3@codepoet.org>
8520 + * GNU Library General Public License (LGPL) version 2 or later.
8522 + * Dedicated to Toni. See uClibc/DEDICATION.mjn3 for details.
8523 + */
8525 +#ifndef _UCLIBC_MUTEX_H
8526 +#define _UCLIBC_MUTEX_H
8528 +#include <features.h>
8530 +#ifdef __UCLIBC_HAS_THREADS__
8532 +#include <pthread.h>
8534 +#define __UCLIBC_MUTEX_TYPE pthread_mutex_t
8536 +#define __UCLIBC_MUTEX(M) pthread_mutex_t M
8537 +#define __UCLIBC_MUTEX_INIT(M,I) pthread_mutex_t M = I
8538 +#define __UCLIBC_MUTEX_STATIC(M,I) static pthread_mutex_t M = I
8539 +#define __UCLIBC_MUTEX_EXTERN(M) extern pthread_mutex_t M
8541 +#define __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) \
8542 + __pthread_mutex_lock(&(M))
8544 +#define __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) \
8545 + __pthread_mutex_unlock(&(M))
8547 +#define __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) \
8548 + __pthread_mutex_trylock(&(M))
8550 +#define __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,C) \
8551 + do { \
8552 + struct _pthread_cleanup_buffer __infunc_pthread_cleanup_buffer; \
8553 + if (C) { \
8554 + _pthread_cleanup_push_defer(&__infunc_pthread_cleanup_buffer, \
8555 + __pthread_mutex_unlock, \
8556 + &(M)); \
8557 + __pthread_mutex_lock(&(M)); \
8558 + } \
8559 + ((void)0)
8561 +#define __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,C) \
8562 + if (C) { \
8563 + _pthread_cleanup_pop_restore(&__infunc_pthread_cleanup_buffer,1);\
8564 + } \
8565 + } while (0)
8567 +#define __UCLIBC_MUTEX_AUTO_LOCK_VAR(A) int A
8569 +#define __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) \
8570 + __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,((A=(V)) == 0))
8572 +#define __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) \
8573 + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,(A == 0))
8575 +#define __UCLIBC_MUTEX_LOCK(M) \
8576 + __UCLIBC_MUTEX_CONDITIONAL_LOCK(M, 1)
8578 +#define __UCLIBC_MUTEX_UNLOCK(M) \
8579 + __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M, 1)
8581 +#else
8583 +#define __UCLIBC_MUTEX(M) void *__UCLIBC_MUTEX_DUMMY_ ## M
8584 +#define __UCLIBC_MUTEX_INIT(M,I) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
8585 +#define __UCLIBC_MUTEX_STATIC(M) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
8586 +#define __UCLIBC_MUTEX_EXTERN(M) extern void *__UCLIBC_MUTEX_DUMMY_ ## M
8588 +#define __UCLIBC_MUTEX_LOCK_CANCEL_UNSAFE(M) ((void)0)
8589 +#define __UCLIBC_MUTEX_UNLOCK_CANCEL_UNSAFE(M) ((void)0)
8590 +#define __UCLIBC_MUTEX_TRYLOCK_CANCEL_UNSAFE(M) (0) /* Always succeed? */
8592 +#define __UCLIBC_MUTEX_CONDITIONAL_LOCK(M,C) ((void)0)
8593 +#define __UCLIBC_MUTEX_CONDITIONAL_UNLOCK(M,C) ((void)0)
8595 +#define __UCLIBC_MUTEX_AUTO_LOCK_VAR(A) ((void)0)
8596 +#define __UCLIBC_MUTEX_AUTO_LOCK(M,A,V) ((void)0)
8597 +#define __UCLIBC_MUTEX_AUTO_UNLOCK(M,A) ((void)0)
8599 +#define __UCLIBC_MUTEX_LOCK(M) ((void)0)
8600 +#define __UCLIBC_MUTEX_UNLOCK(M) ((void)0)
8602 +#endif
8604 +#endif /* _UCLIBC_MUTEX_H */
8605 diff -urN -x .git uClibc-0.9.28/libc/sysdeps/linux/mips/pipe.c uClibc-mjn3/libc/sysdeps/linux/mips/pipe.c
8606 --- uClibc-0.9.28/libc/sysdeps/linux/mips/pipe.c 2005-08-17 16:49:44.000000000 -0600
8607 +++ uClibc-mjn3/libc/sysdeps/linux/mips/pipe.c 1969-12-31 17:00:00.000000000 -0700
8608 @@ -1,23 +0,0 @@
8609 -/* pipe system call for Linux/MIPS */
8611 -/*see uClibc's sh/pipe.c and glibc-2.2.4's mips/pipe.S */
8613 -#include <errno.h>
8614 -#include <unistd.h>
8615 -#include <syscall.h>
8617 -int pipe(int *fd)
8619 - register long int res __asm__ ("$2"); // v0
8620 - register long int res2 __asm__ ("$3"); // v1
8622 - asm ("move\t$4,%2\n\t" // $4 = a0
8623 - "syscall" /* Perform the system call. */
8624 - : "=r" (res)
8625 - : "0" (__NR_pipe), "r" (fd)
8626 - : "$4", "$7");
8628 - fd[0] = res;
8629 - fd[1] = res2;
8630 - return(0);