1 /* Copyright (C) 1993, 1995, 1997-1999, 2000 Free Software Foundation, Inc.
2 This file is part of the GNU IO Library.
4 This library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License as
6 published by the Free Software Foundation; either version 2, or (at
7 your option) any later version.
9 This library is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this library; see the file COPYING. If not, write to
16 the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 As a special exception, if you link this library with files
20 compiled with a GNU compiler to produce an executable, this does
21 not cause the resulting executable to be covered by the GNU General
22 Public License. This exception does not however invalidate any
23 other reasons why the executable file might be covered by the GNU
24 General Public License. */
26 /* Generic or default I/O operations. */
35 static _IO_lock_t list_all_lock
= _IO_lock_initializer
;
40 struct _IO_FILE_plus
*fp
;
42 if (fp
->file
._flags
& _IO_LINKED
)
44 struct _IO_FILE_plus
**f
;
46 _IO_lock_lock (list_all_lock
);
48 for (f
= &_IO_list_all
; *f
; f
= (struct _IO_FILE_plus
**) &(*f
)->file
._chain
)
52 *f
= (struct _IO_FILE_plus
*) fp
->file
._chain
;
57 _IO_lock_unlock (list_all_lock
);
59 fp
->file
._flags
&= ~_IO_LINKED
;
65 struct _IO_FILE_plus
*fp
;
67 if ((fp
->file
._flags
& _IO_LINKED
) == 0)
69 fp
->file
._flags
|= _IO_LINKED
;
71 _IO_lock_lock (list_all_lock
);
73 fp
->file
._chain
= (_IO_FILE
*) _IO_list_all
;
76 _IO_lock_unlock (list_all_lock
);
81 /* Return minimum _pos markers
82 Assumes the current get area is the main get area. */
83 _IO_ssize_t _IO_least_marker
__P ((_IO_FILE
*fp
, char *end_p
));
86 _IO_least_marker (fp
, end_p
)
90 _IO_ssize_t least_so_far
= end_p
- fp
->_IO_read_base
;
91 struct _IO_marker
*mark
;
92 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
93 if (mark
->_pos
< least_so_far
)
94 least_so_far
= mark
->_pos
;
98 /* Switch current get area from backup buffer to (start of) main get area. */
101 _IO_switch_to_main_get_area (fp
)
105 fp
->_flags
&= ~_IO_IN_BACKUP
;
106 /* Swap _IO_read_end and _IO_save_end. */
107 tmp
= fp
->_IO_read_end
;
108 fp
->_IO_read_end
= fp
->_IO_save_end
;
109 fp
->_IO_save_end
= tmp
;
110 /* Swap _IO_read_base and _IO_save_base. */
111 tmp
= fp
->_IO_read_base
;
112 fp
->_IO_read_base
= fp
->_IO_save_base
;
113 fp
->_IO_save_base
= tmp
;
114 /* Set _IO_read_ptr. */
115 fp
->_IO_read_ptr
= fp
->_IO_read_base
;
118 /* Switch current get area from main get area to (end of) backup area. */
121 _IO_switch_to_backup_area (fp
)
125 fp
->_flags
|= _IO_IN_BACKUP
;
126 /* Swap _IO_read_end and _IO_save_end. */
127 tmp
= fp
->_IO_read_end
;
128 fp
->_IO_read_end
= fp
->_IO_save_end
;
129 fp
->_IO_save_end
= tmp
;
130 /* Swap _IO_read_base and _IO_save_base. */
131 tmp
= fp
->_IO_read_base
;
132 fp
->_IO_read_base
= fp
->_IO_save_base
;
133 fp
->_IO_save_base
= tmp
;
134 /* Set _IO_read_ptr. */
135 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
139 _IO_switch_to_get_mode (fp
)
142 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
)
143 if (_IO_OVERFLOW (fp
, EOF
) == EOF
)
145 if (_IO_in_backup (fp
))
146 fp
->_IO_read_base
= fp
->_IO_backup_base
;
149 fp
->_IO_read_base
= fp
->_IO_buf_base
;
150 if (fp
->_IO_write_ptr
> fp
->_IO_read_end
)
151 fp
->_IO_read_end
= fp
->_IO_write_ptr
;
153 fp
->_IO_read_ptr
= fp
->_IO_write_ptr
;
155 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= fp
->_IO_read_ptr
;
157 fp
->_flags
&= ~_IO_CURRENTLY_PUTTING
;
162 _IO_free_backup_area (fp
)
165 if (_IO_in_backup (fp
))
166 _IO_switch_to_main_get_area (fp
); /* Just in case. */
167 free (fp
->_IO_save_base
);
168 fp
->_IO_save_base
= NULL
;
169 fp
->_IO_save_end
= NULL
;
170 fp
->_IO_backup_base
= NULL
;
175 _IO_switch_to_put_mode (fp
)
178 fp
->_IO_write_base
= fp
->_IO_read_ptr
;
179 fp
->_IO_write_ptr
= fp
->_IO_read_ptr
;
180 /* Following is wrong if line- or un-buffered? */
181 fp
->_IO_write_end
= (fp
->_flags
& _IO_IN_BACKUP
182 ? fp
->_IO_read_end
: fp
->_IO_buf_end
);
184 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
185 fp
->_IO_read_base
= fp
->_IO_read_end
;
187 fp
->_flags
|= _IO_CURRENTLY_PUTTING
;
197 /* This is a single-byte stream. */
200 return _IO_OVERFLOW (f
, ch
);
203 static int save_for_backup
__P ((_IO_FILE
*fp
, char *end_p
))
213 save_for_backup (fp
, end_p
)
217 /* Append [_IO_read_base..end_p] to backup area. */
218 _IO_ssize_t least_mark
= _IO_least_marker (fp
, end_p
);
219 /* needed_size is how much space we need in the backup area. */
220 _IO_size_t needed_size
= (end_p
- fp
->_IO_read_base
) - least_mark
;
221 /* FIXME: Dubious arithmetic if pointers are NULL */
222 _IO_size_t current_Bsize
= fp
->_IO_save_end
- fp
->_IO_save_base
;
223 _IO_size_t avail
; /* Extra space available for future expansion. */
225 struct _IO_marker
*mark
;
226 if (needed_size
> current_Bsize
)
230 new_buffer
= (char *) malloc (avail
+ needed_size
);
231 if (new_buffer
== NULL
)
232 return EOF
; /* FIXME */
236 __mempcpy (__mempcpy (new_buffer
+ avail
,
237 fp
->_IO_save_end
+ least_mark
,
240 end_p
- fp
->_IO_read_base
);
242 memcpy (new_buffer
+ avail
,
243 fp
->_IO_save_end
+ least_mark
,
245 memcpy (new_buffer
+ avail
- least_mark
,
247 end_p
- fp
->_IO_read_base
);
251 memcpy (new_buffer
+ avail
,
252 fp
->_IO_read_base
+ least_mark
,
254 if (fp
->_IO_save_base
)
255 free (fp
->_IO_save_base
);
256 fp
->_IO_save_base
= new_buffer
;
257 fp
->_IO_save_end
= new_buffer
+ avail
+ needed_size
;
261 avail
= current_Bsize
- needed_size
;
264 memmove (fp
->_IO_save_base
+ avail
,
265 fp
->_IO_save_end
+ least_mark
,
267 memcpy (fp
->_IO_save_base
+ avail
- least_mark
,
269 end_p
- fp
->_IO_read_base
);
271 else if (needed_size
> 0)
272 memcpy (fp
->_IO_save_base
+ avail
,
273 fp
->_IO_read_base
+ least_mark
,
276 fp
->_IO_backup_base
= fp
->_IO_save_base
+ avail
;
277 /* Adjust all the streammarkers. */
278 delta
= end_p
- fp
->_IO_read_base
;
279 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
288 if (fp
->_vtable_offset
== 0 && _IO_fwide (fp
, -1) != -1)
293 if (_IO_in_put_mode (fp
))
294 if (_IO_switch_to_get_mode (fp
) == EOF
)
296 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
297 return *(unsigned char *) fp
->_IO_read_ptr
;
298 if (_IO_in_backup (fp
))
300 _IO_switch_to_main_get_area (fp
);
301 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
302 return *(unsigned char *) fp
->_IO_read_ptr
;
304 if (_IO_have_markers (fp
))
306 if (save_for_backup (fp
, fp
->_IO_read_end
))
309 else if (_IO_have_backup (fp
))
310 _IO_free_backup_area (fp
);
311 return _IO_UNDERFLOW (fp
);
318 if (fp
->_vtable_offset
== 0 && _IO_fwide (fp
, -1) != -1)
323 if (_IO_in_put_mode (fp
))
324 if (_IO_switch_to_get_mode (fp
) == EOF
)
326 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
327 return *(unsigned char *) fp
->_IO_read_ptr
++;
328 if (_IO_in_backup (fp
))
330 _IO_switch_to_main_get_area (fp
);
331 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
332 return *(unsigned char *) fp
->_IO_read_ptr
++;
334 if (_IO_have_markers (fp
))
336 if (save_for_backup (fp
, fp
->_IO_read_end
))
339 else if (_IO_have_backup (fp
))
340 _IO_free_backup_area (fp
);
341 return _IO_UFLOW (fp
);
345 _IO_setb (f
, b
, eb
, a
)
351 if (f
->_IO_buf_base
&& !(f
->_flags
& _IO_USER_BUF
))
352 FREE_BUF (f
->_IO_buf_base
, _IO_blen (f
));
356 f
->_flags
&= ~_IO_USER_BUF
;
358 f
->_flags
|= _IO_USER_BUF
;
365 if (fp
->_IO_buf_base
)
367 if (!(fp
->_flags
& _IO_UNBUFFERED
))
368 if (_IO_DOALLOCATE (fp
) != EOF
)
370 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
374 _IO_default_underflow (fp
)
381 _IO_default_uflow (fp
)
384 int ch
= _IO_UNDERFLOW (fp
);
387 return *(unsigned char *) fp
->_IO_read_ptr
++;
391 _IO_default_xsputn (f
, data
, n
)
396 const char *s
= (char *) data
;
402 /* Space available. */
403 _IO_ssize_t count
= f
->_IO_write_end
- f
->_IO_write_ptr
;
406 if ((_IO_size_t
) count
> more
)
411 f
->_IO_write_ptr
= __mempcpy (f
->_IO_write_ptr
, s
, count
);
413 memcpy (f
->_IO_write_ptr
, s
, count
);
414 f
->_IO_write_ptr
+= count
;
422 char *p
= f
->_IO_write_ptr
;
424 for (i
= count
; --i
>= 0; )
426 f
->_IO_write_ptr
= p
;
430 if (more
== 0 || _IO_OVERFLOW (f
, (unsigned char) *s
++) == EOF
)
438 _IO_sgetn (fp
, data
, n
)
443 /* FIXME handle putback buffer here! */
444 return _IO_XSGETN (fp
, data
, n
);
448 _IO_default_xsgetn (fp
, data
, n
)
454 char *s
= (char*) data
;
457 /* Data available. */
458 _IO_ssize_t count
= fp
->_IO_read_end
- fp
->_IO_read_ptr
;
461 if ((_IO_size_t
) count
> more
)
466 s
= __mempcpy (s
, fp
->_IO_read_ptr
, count
);
468 memcpy (s
, fp
->_IO_read_ptr
, count
);
471 fp
->_IO_read_ptr
+= count
;
477 char *p
= fp
->_IO_read_ptr
;
481 fp
->_IO_read_ptr
= p
;
485 if (more
== 0 || __underflow (fp
) == EOF
)
492 /* Seems not to be needed. --drepper */
502 _IO_default_setbuf (fp
, p
, len
)
507 if (_IO_SYNC (fp
) == EOF
)
509 if (p
== NULL
|| len
== 0)
511 fp
->_flags
|= _IO_UNBUFFERED
;
512 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
516 fp
->_flags
&= ~_IO_UNBUFFERED
;
517 _IO_setb (fp
, p
, p
+len
, 0);
519 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= 0;
520 fp
->_IO_read_base
= fp
->_IO_read_ptr
= fp
->_IO_read_end
= 0;
525 _IO_default_seekpos (fp
, pos
, mode
)
530 return _IO_SEEKOFF (fp
, pos
, 0, mode
);
534 _IO_default_doallocate (fp
)
539 ALLOC_BUF (buf
, _IO_BUFSIZ
, EOF
);
540 _IO_setb (fp
, buf
, buf
+_IO_BUFSIZ
, 1);
549 _IO_no_init (fp
, flags
, -1, NULL
, NULL
);
553 _IO_no_init (fp
, flags
, orientation
, wd
, jmp
)
557 struct _IO_wide_data
*wd
;
558 struct _IO_jump_t
*jmp
;
560 fp
->_flags
= _IO_MAGIC
|flags
;
561 fp
->_IO_buf_base
= NULL
;
562 fp
->_IO_buf_end
= NULL
;
563 fp
->_IO_read_base
= NULL
;
564 fp
->_IO_read_ptr
= NULL
;
565 fp
->_IO_read_end
= NULL
;
566 fp
->_IO_write_base
= NULL
;
567 fp
->_IO_write_ptr
= NULL
;
568 fp
->_IO_write_end
= NULL
;
569 fp
->_chain
= NULL
; /* Not necessary. */
571 fp
->_IO_save_base
= NULL
;
572 fp
->_IO_backup_base
= NULL
;
573 fp
->_IO_save_end
= NULL
;
577 fp
->_vtable_offset
= 0;
580 _IO_lock_init (*fp
->_lock
);
582 fp
->_mode
= orientation
;
583 if (orientation
>= 0)
586 fp
->_wide_data
->_IO_buf_base
= NULL
;
587 fp
->_wide_data
->_IO_buf_end
= NULL
;
588 fp
->_wide_data
->_IO_read_base
= NULL
;
589 fp
->_wide_data
->_IO_read_ptr
= NULL
;
590 fp
->_wide_data
->_IO_read_end
= NULL
;
591 fp
->_wide_data
->_IO_write_base
= NULL
;
592 fp
->_wide_data
->_IO_write_ptr
= NULL
;
593 fp
->_wide_data
->_IO_write_end
= NULL
;
594 fp
->_wide_data
->_IO_save_base
= NULL
;
595 fp
->_wide_data
->_IO_backup_base
= NULL
;
596 fp
->_wide_data
->_IO_save_end
= NULL
;
598 fp
->_wide_data
->_wide_vtable
= jmp
;
603 _IO_default_sync (fp
)
609 /* The way the C++ classes are mapped into the C functions in the
610 current implementation, this function can get called twice! */
613 _IO_default_finish (fp
, dummy
)
617 struct _IO_marker
*mark
;
618 if (fp
->_IO_buf_base
&& !(fp
->_flags
& _IO_USER_BUF
))
620 FREE_BUF (fp
->_IO_buf_base
, _IO_blen (fp
));
621 fp
->_IO_buf_base
= fp
->_IO_buf_end
= NULL
;
624 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
627 if (fp
->_IO_save_base
)
629 free (fp
->_IO_save_base
);
630 fp
->_IO_save_base
= NULL
;
634 _IO_lock_fini (*fp
->_lock
);
637 _IO_un_link ((struct _IO_FILE_plus
*) fp
);
641 _IO_default_seekoff (fp
, offset
, dir
, mode
)
651 _IO_sputbackc (fp
, c
)
657 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
658 && (unsigned char)fp
->_IO_read_ptr
[-1] == (unsigned char)c
)
661 result
= (unsigned char) c
;
664 result
= _IO_PBACKFAIL (fp
, c
);
667 fp
->_flags
&= ~_IO_EOF_SEEN
;
678 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
681 result
= (unsigned char) *fp
->_IO_read_ptr
;
684 result
= _IO_PBACKFAIL (fp
, EOF
);
687 fp
->_flags
&= ~_IO_EOF_SEEN
;
692 #if 0 /* Work in progress */
693 /* Seems not to be needed. */
696 _IO_set_column (fp
, c
)
703 fp
->_column
= c
- (fp
->_IO_write_ptr
- fp
->_IO_write_base
);
707 _IO_set_column (fp
, i
)
711 fp
->_cur_column
= i
+ 1;
719 _IO_adjust_column (start
, line
, count
)
724 const char *ptr
= line
+ count
;
727 return line
+ count
- ptr
- 1;
728 return start
+ count
;
732 /* Seems not to be needed. --drepper */
738 return _IO_adjust_column (fp
->_cur_column
- 1,
740 fp
->_IO_write_ptr
- fp
->_IO_write_base
);
750 for (fp
= (_IO_FILE
*) _IO_list_all
; fp
; fp
= fp
->_chain
)
751 if (((fp
->_mode
< 0 && fp
->_IO_write_ptr
> fp
->_IO_write_base
)
752 || (fp
->_vtable_offset
== 0
753 && fp
->_mode
> 0 && (fp
->_wide_data
->_IO_write_ptr
754 > fp
->_wide_data
->_IO_write_base
)))
755 && _IO_OVERFLOW (fp
, EOF
) == EOF
)
761 _IO_flush_all_linebuffered ()
764 for (fp
= (_IO_FILE
*) _IO_list_all
; fp
; fp
= fp
->_chain
)
765 if ((fp
->_flags
& _IO_NO_WRITES
) == 0 && fp
->_flags
& _IO_LINE_BUF
)
766 _IO_OVERFLOW (fp
, EOF
);
769 static void _IO_unbuffer_write
__P ((void));
772 _IO_unbuffer_write ()
775 for (fp
= (_IO_FILE
*) _IO_list_all
; fp
; fp
= fp
->_chain
)
776 if (! (fp
->_flags
& _IO_UNBUFFERED
)
777 && (! (fp
->_flags
& _IO_NO_WRITES
)
778 || (fp
->_flags
& _IO_IS_APPENDING
)))
779 _IO_SETBUF (fp
, NULL
, 0);
785 int result
= _IO_flush_all ();
787 /* We currently don't have a reliable mechanism for making sure that
788 C++ static destructors are executed in the correct order.
789 So it is possible that other static destructors might want to
790 write to cout - and they're supposed to be able to do so.
792 The following will make the standard streambufs be unbuffered,
793 which forces any output from late destructors to be written out. */
794 _IO_unbuffer_write ();
801 _IO_init_marker (marker
, fp
)
802 struct _IO_marker
*marker
;
806 if (_IO_in_put_mode (fp
))
807 _IO_switch_to_get_mode (fp
);
808 if (_IO_in_backup (fp
))
809 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_end
;
811 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_base
;
813 /* Should perhaps sort the chain? */
814 marker
->_next
= fp
->_markers
;
815 fp
->_markers
= marker
;
819 _IO_remove_marker (marker
)
820 struct _IO_marker
*marker
;
822 /* Unlink from sb's chain. */
823 struct _IO_marker
**ptr
= &marker
->_sbuf
->_markers
;
824 for (; ; ptr
= &(*ptr
)->_next
)
828 else if (*ptr
== marker
)
830 *ptr
= marker
->_next
;
835 if _sbuf has a backup area that is no longer needed
, should we
delete
836 it now
, or wait until the next underflow
?
840 #define BAD_DELTA EOF
843 _IO_marker_difference (mark1
, mark2
)
844 struct _IO_marker
*mark1
;
845 struct _IO_marker
*mark2
;
847 return mark1
->_pos
- mark2
->_pos
;
850 /* Return difference between MARK and current position of MARK's stream. */
852 _IO_marker_delta (mark
)
853 struct _IO_marker
*mark
;
856 if (mark
->_sbuf
== NULL
)
858 if (_IO_in_backup (mark
->_sbuf
))
859 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_end
;
861 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_base
;
862 return mark
->_pos
- cur_pos
;
866 _IO_seekmark (fp
, mark
, delta
)
868 struct _IO_marker
*mark
;
871 if (mark
->_sbuf
!= fp
)
875 if (_IO_in_backup (fp
))
876 _IO_switch_to_main_get_area (fp
);
877 fp
->_IO_read_ptr
= fp
->_IO_read_base
+ mark
->_pos
;
881 if (!_IO_in_backup (fp
))
882 _IO_switch_to_backup_area (fp
);
883 fp
->_IO_read_ptr
= fp
->_IO_read_end
+ mark
->_pos
;
889 _IO_unsave_markers (fp
)
892 struct _IO_marker
*mark
= fp
->_markers
;
896 streampos offset
= seekoff (0, ios::cur
, ios::in
);
899 offset
+= eGptr () - Gbase ();
900 for ( ; mark
!= NULL
; mark
= mark
->_next
)
901 mark
->set_streampos (mark
->_pos
+ offset
);
905 for ( ; mark
!= NULL
; mark
= mark
->_next
)
906 mark
->set_streampos (EOF
);
912 if (_IO_have_backup (fp
))
913 _IO_free_backup_area (fp
);
917 /* Seems not to be needed. --drepper */
919 _IO_nobackup_pbackfail (fp
, c
)
923 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
925 if (c
!= EOF
&& *fp
->_IO_read_ptr
!= c
)
926 *fp
->_IO_read_ptr
= c
;
927 return (unsigned char) c
;
932 _IO_default_pbackfail (fp
, c
)
936 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& !_IO_in_backup (fp
)
937 && (unsigned char) fp
->_IO_read_ptr
[-1] == c
)
941 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
942 if (!_IO_in_backup (fp
))
944 /* We need to keep the invariant that the main get area
945 logically follows the backup area. */
946 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& _IO_have_backup (fp
))
948 if (save_for_backup (fp
, fp
->_IO_read_ptr
))
951 else if (!_IO_have_backup (fp
))
953 /* No backup buffer: allocate one. */
954 /* Use nshort buffer, if unused? (probably not) FIXME */
955 int backup_size
= 128;
956 char *bbuf
= (char *) malloc (backup_size
);
959 fp
->_IO_save_base
= bbuf
;
960 fp
->_IO_save_end
= fp
->_IO_save_base
+ backup_size
;
961 fp
->_IO_backup_base
= fp
->_IO_save_end
;
963 fp
->_IO_read_base
= fp
->_IO_read_ptr
;
964 _IO_switch_to_backup_area (fp
);
966 else if (fp
->_IO_read_ptr
<= fp
->_IO_read_base
)
968 /* Increase size of existing backup buffer. */
970 _IO_size_t old_size
= fp
->_IO_read_end
- fp
->_IO_read_base
;
972 new_size
= 2 * old_size
;
973 new_buf
= (char *) malloc (new_size
);
976 memcpy (new_buf
+ (new_size
- old_size
), fp
->_IO_read_base
,
978 free (fp
->_IO_read_base
);
979 _IO_setg (fp
, new_buf
, new_buf
+ (new_size
- old_size
),
981 fp
->_IO_backup_base
= fp
->_IO_read_ptr
;
984 *--fp
->_IO_read_ptr
= c
;
986 return (unsigned char) c
;
990 _IO_default_seek (fp
, offset
, dir
)
999 _IO_default_stat (fp
, st
)
1007 _IO_default_read (fp
, data
, n
)
1016 _IO_default_write (fp
, data
, n
)
1025 _IO_default_showmanyc (fp
)
1032 _IO_default_imbue (fp
, locale
)
1041 return (_IO_ITER
) _IO_list_all
;
1054 return iter
->_chain
;
1067 #ifdef _IO_MTSAFE_IO
1068 _IO_lock_lock (list_all_lock
);
1075 #ifdef _IO_MTSAFE_IO
1076 _IO_lock_unlock (list_all_lock
);
1081 _IO_list_resetlock()
1083 #ifdef _IO_MTSAFE_IO
1084 _IO_lock_init (list_all_lock
);
1091 #define IO_CLEANUP ;
1099 ~__io_defs() { _IO_cleanup (); }
1101 __io_defs io_defs__
;
1107 weak_alias (_IO_cleanup
, _cleanup
)
1110 #ifdef text_set_element
1111 text_set_element(__libc_atexit
, _cleanup
);