1 /* Copyright (C) 1993,1995,1997-1999,2000,2001 Free Software Foundation, Inc.
2 This file is part of the GNU C Library.
4 The GNU C Library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU Lesser General Public
6 License as published by the Free Software Foundation; either
7 version 2.1 of the License, or (at your option) any later version.
9 The GNU C Library is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 Lesser General Public License for more details.
14 You should have received a copy of the GNU Lesser General Public
15 License along with the GNU C Library; if not, write to the Free
16 Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA
19 As a special exception, if you link the code in this file with
20 files compiled with a GNU compiler to produce an executable,
21 that does not cause the resulting executable to be covered by
22 the GNU Lesser General Public License. This exception does not
23 however invalidate any other reasons why the executable file
24 might be covered by the GNU Lesser General Public License.
25 This exception applies to code released by its copyright holders
26 in files containing the exception. */
28 /* Generic or default I/O operations. */
37 static _IO_lock_t list_all_lock
= _IO_lock_initializer
;
40 /* Used to signal modifications to the list of FILE decriptors. */
41 static int _IO_list_all_stamp
;
44 static _IO_FILE
*run_fp
;
47 flush_cleanup (void *not_used
)
50 _IO_funlockfile (run_fp
);
51 _IO_lock_unlock (list_all_lock
);
56 struct _IO_FILE_plus
*fp
;
58 if (fp
->file
._flags
& _IO_LINKED
)
60 struct _IO_FILE_plus
**f
;
62 _IO_cleanup_region_start_noarg (flush_cleanup
);
63 _IO_lock_lock (list_all_lock
);
64 run_fp
= (_IO_FILE
*) fp
;
65 _IO_flockfile ((_IO_FILE
*) fp
);
67 for (f
= &_IO_list_all
; *f
; f
= (struct _IO_FILE_plus
**) &(*f
)->file
._chain
)
71 *f
= (struct _IO_FILE_plus
*) fp
->file
._chain
;
76 fp
->file
._flags
&= ~_IO_LINKED
;
78 _IO_funlockfile ((_IO_FILE
*) fp
);
80 _IO_lock_unlock (list_all_lock
);
81 _IO_cleanup_region_end (0);
88 struct _IO_FILE_plus
*fp
;
90 if ((fp
->file
._flags
& _IO_LINKED
) == 0)
92 fp
->file
._flags
|= _IO_LINKED
;
94 _IO_cleanup_region_start_noarg (flush_cleanup
);
95 _IO_lock_lock (list_all_lock
);
96 run_fp
= (_IO_FILE
*) fp
;
97 _IO_flockfile ((_IO_FILE
*) fp
);
99 fp
->file
._chain
= (_IO_FILE
*) _IO_list_all
;
101 ++_IO_list_all_stamp
;
103 _IO_funlockfile ((_IO_FILE
*) fp
);
105 _IO_lock_unlock (list_all_lock
);
106 _IO_cleanup_region_end (0);
111 /* Return minimum _pos markers
112 Assumes the current get area is the main get area. */
113 _IO_ssize_t _IO_least_marker
__P ((_IO_FILE
*fp
, char *end_p
));
116 _IO_least_marker (fp
, end_p
)
120 _IO_ssize_t least_so_far
= end_p
- fp
->_IO_read_base
;
121 struct _IO_marker
*mark
;
122 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
123 if (mark
->_pos
< least_so_far
)
124 least_so_far
= mark
->_pos
;
128 /* Switch current get area from backup buffer to (start of) main get area. */
131 _IO_switch_to_main_get_area (fp
)
135 fp
->_flags
&= ~_IO_IN_BACKUP
;
136 /* Swap _IO_read_end and _IO_save_end. */
137 tmp
= fp
->_IO_read_end
;
138 fp
->_IO_read_end
= fp
->_IO_save_end
;
139 fp
->_IO_save_end
= tmp
;
140 /* Swap _IO_read_base and _IO_save_base. */
141 tmp
= fp
->_IO_read_base
;
142 fp
->_IO_read_base
= fp
->_IO_save_base
;
143 fp
->_IO_save_base
= tmp
;
144 /* Set _IO_read_ptr. */
145 fp
->_IO_read_ptr
= fp
->_IO_read_base
;
148 /* Switch current get area from main get area to (end of) backup area. */
151 _IO_switch_to_backup_area (fp
)
155 fp
->_flags
|= _IO_IN_BACKUP
;
156 /* Swap _IO_read_end and _IO_save_end. */
157 tmp
= fp
->_IO_read_end
;
158 fp
->_IO_read_end
= fp
->_IO_save_end
;
159 fp
->_IO_save_end
= tmp
;
160 /* Swap _IO_read_base and _IO_save_base. */
161 tmp
= fp
->_IO_read_base
;
162 fp
->_IO_read_base
= fp
->_IO_save_base
;
163 fp
->_IO_save_base
= tmp
;
164 /* Set _IO_read_ptr. */
165 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
169 _IO_switch_to_get_mode (fp
)
172 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
)
173 if (_IO_OVERFLOW (fp
, EOF
) == EOF
)
175 if (_IO_in_backup (fp
))
176 fp
->_IO_read_base
= fp
->_IO_backup_base
;
179 fp
->_IO_read_base
= fp
->_IO_buf_base
;
180 if (fp
->_IO_write_ptr
> fp
->_IO_read_end
)
181 fp
->_IO_read_end
= fp
->_IO_write_ptr
;
183 fp
->_IO_read_ptr
= fp
->_IO_write_ptr
;
185 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= fp
->_IO_read_ptr
;
187 fp
->_flags
&= ~_IO_CURRENTLY_PUTTING
;
192 _IO_free_backup_area (fp
)
195 if (_IO_in_backup (fp
))
196 _IO_switch_to_main_get_area (fp
); /* Just in case. */
197 free (fp
->_IO_save_base
);
198 fp
->_IO_save_base
= NULL
;
199 fp
->_IO_save_end
= NULL
;
200 fp
->_IO_backup_base
= NULL
;
205 _IO_switch_to_put_mode (fp
)
208 fp
->_IO_write_base
= fp
->_IO_read_ptr
;
209 fp
->_IO_write_ptr
= fp
->_IO_read_ptr
;
210 /* Following is wrong if line- or un-buffered? */
211 fp
->_IO_write_end
= (fp
->_flags
& _IO_IN_BACKUP
212 ? fp
->_IO_read_end
: fp
->_IO_buf_end
);
214 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
215 fp
->_IO_read_base
= fp
->_IO_read_end
;
217 fp
->_flags
|= _IO_CURRENTLY_PUTTING
;
227 /* This is a single-byte stream. */
230 return _IO_OVERFLOW (f
, ch
);
233 static int save_for_backup
__P ((_IO_FILE
*fp
, char *end_p
))
243 save_for_backup (fp
, end_p
)
247 /* Append [_IO_read_base..end_p] to backup area. */
248 _IO_ssize_t least_mark
= _IO_least_marker (fp
, end_p
);
249 /* needed_size is how much space we need in the backup area. */
250 _IO_size_t needed_size
= (end_p
- fp
->_IO_read_base
) - least_mark
;
251 /* FIXME: Dubious arithmetic if pointers are NULL */
252 _IO_size_t current_Bsize
= fp
->_IO_save_end
- fp
->_IO_save_base
;
253 _IO_size_t avail
; /* Extra space available for future expansion. */
255 struct _IO_marker
*mark
;
256 if (needed_size
> current_Bsize
)
260 new_buffer
= (char *) malloc (avail
+ needed_size
);
261 if (new_buffer
== NULL
)
262 return EOF
; /* FIXME */
266 __mempcpy (__mempcpy (new_buffer
+ avail
,
267 fp
->_IO_save_end
+ least_mark
,
270 end_p
- fp
->_IO_read_base
);
272 memcpy (new_buffer
+ avail
,
273 fp
->_IO_save_end
+ least_mark
,
275 memcpy (new_buffer
+ avail
- least_mark
,
277 end_p
- fp
->_IO_read_base
);
281 memcpy (new_buffer
+ avail
,
282 fp
->_IO_read_base
+ least_mark
,
284 if (fp
->_IO_save_base
)
285 free (fp
->_IO_save_base
);
286 fp
->_IO_save_base
= new_buffer
;
287 fp
->_IO_save_end
= new_buffer
+ avail
+ needed_size
;
291 avail
= current_Bsize
- needed_size
;
294 memmove (fp
->_IO_save_base
+ avail
,
295 fp
->_IO_save_end
+ least_mark
,
297 memcpy (fp
->_IO_save_base
+ avail
- least_mark
,
299 end_p
- fp
->_IO_read_base
);
301 else if (needed_size
> 0)
302 memcpy (fp
->_IO_save_base
+ avail
,
303 fp
->_IO_read_base
+ least_mark
,
306 fp
->_IO_backup_base
= fp
->_IO_save_base
+ avail
;
307 /* Adjust all the streammarkers. */
308 delta
= end_p
- fp
->_IO_read_base
;
309 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
318 #if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T
319 if (fp
->_vtable_offset
== 0 && _IO_fwide (fp
, -1) != -1)
325 if (_IO_in_put_mode (fp
))
326 if (_IO_switch_to_get_mode (fp
) == EOF
)
328 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
329 return *(unsigned char *) fp
->_IO_read_ptr
;
330 if (_IO_in_backup (fp
))
332 _IO_switch_to_main_get_area (fp
);
333 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
334 return *(unsigned char *) fp
->_IO_read_ptr
;
336 if (_IO_have_markers (fp
))
338 if (save_for_backup (fp
, fp
->_IO_read_end
))
341 else if (_IO_have_backup (fp
))
342 _IO_free_backup_area (fp
);
343 return _IO_UNDERFLOW (fp
);
350 #if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T
351 if (fp
->_vtable_offset
== 0 && _IO_fwide (fp
, -1) != -1)
357 if (_IO_in_put_mode (fp
))
358 if (_IO_switch_to_get_mode (fp
) == EOF
)
360 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
361 return *(unsigned char *) fp
->_IO_read_ptr
++;
362 if (_IO_in_backup (fp
))
364 _IO_switch_to_main_get_area (fp
);
365 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
366 return *(unsigned char *) fp
->_IO_read_ptr
++;
368 if (_IO_have_markers (fp
))
370 if (save_for_backup (fp
, fp
->_IO_read_end
))
373 else if (_IO_have_backup (fp
))
374 _IO_free_backup_area (fp
);
375 return _IO_UFLOW (fp
);
379 _IO_setb (f
, b
, eb
, a
)
385 if (f
->_IO_buf_base
&& !(f
->_flags
& _IO_USER_BUF
))
386 FREE_BUF (f
->_IO_buf_base
, _IO_blen (f
));
390 f
->_flags
&= ~_IO_USER_BUF
;
392 f
->_flags
|= _IO_USER_BUF
;
399 if (fp
->_IO_buf_base
)
401 if (!(fp
->_flags
& _IO_UNBUFFERED
) || fp
->_mode
> 0)
402 if (_IO_DOALLOCATE (fp
) != EOF
)
404 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
408 _IO_default_underflow (fp
)
415 _IO_default_uflow (fp
)
418 int ch
= _IO_UNDERFLOW (fp
);
421 return *(unsigned char *) fp
->_IO_read_ptr
++;
425 _IO_default_xsputn (f
, data
, n
)
430 const char *s
= (char *) data
;
436 /* Space available. */
437 _IO_ssize_t count
= f
->_IO_write_end
- f
->_IO_write_ptr
;
440 if ((_IO_size_t
) count
> more
)
445 f
->_IO_write_ptr
= __mempcpy (f
->_IO_write_ptr
, s
, count
);
447 memcpy (f
->_IO_write_ptr
, s
, count
);
448 f
->_IO_write_ptr
+= count
;
456 char *p
= f
->_IO_write_ptr
;
458 for (i
= count
; --i
>= 0; )
460 f
->_IO_write_ptr
= p
;
464 if (more
== 0 || _IO_OVERFLOW (f
, (unsigned char) *s
++) == EOF
)
472 _IO_sgetn (fp
, data
, n
)
477 /* FIXME handle putback buffer here! */
478 return _IO_XSGETN (fp
, data
, n
);
482 _IO_default_xsgetn (fp
, data
, n
)
488 char *s
= (char*) data
;
491 /* Data available. */
492 _IO_ssize_t count
= fp
->_IO_read_end
- fp
->_IO_read_ptr
;
495 if ((_IO_size_t
) count
> more
)
500 s
= __mempcpy (s
, fp
->_IO_read_ptr
, count
);
502 memcpy (s
, fp
->_IO_read_ptr
, count
);
505 fp
->_IO_read_ptr
+= count
;
511 char *p
= fp
->_IO_read_ptr
;
515 fp
->_IO_read_ptr
= p
;
519 if (more
== 0 || __underflow (fp
) == EOF
)
526 /* Seems not to be needed. --drepper */
536 _IO_default_setbuf (fp
, p
, len
)
541 if (_IO_SYNC (fp
) == EOF
)
543 if (p
== NULL
|| len
== 0)
545 fp
->_flags
|= _IO_UNBUFFERED
;
546 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
550 fp
->_flags
&= ~_IO_UNBUFFERED
;
551 _IO_setb (fp
, p
, p
+len
, 0);
553 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= 0;
554 fp
->_IO_read_base
= fp
->_IO_read_ptr
= fp
->_IO_read_end
= 0;
559 _IO_default_seekpos (fp
, pos
, mode
)
564 return _IO_SEEKOFF (fp
, pos
, 0, mode
);
568 _IO_default_doallocate (fp
)
573 ALLOC_BUF (buf
, _IO_BUFSIZ
, EOF
);
574 _IO_setb (fp
, buf
, buf
+_IO_BUFSIZ
, 1);
583 _IO_no_init (fp
, flags
, -1, NULL
, NULL
);
587 _IO_no_init (fp
, flags
, orientation
, wd
, jmp
)
591 struct _IO_wide_data
*wd
;
592 struct _IO_jump_t
*jmp
;
594 fp
->_flags
= _IO_MAGIC
|flags
;
595 fp
->_IO_buf_base
= NULL
;
596 fp
->_IO_buf_end
= NULL
;
597 fp
->_IO_read_base
= NULL
;
598 fp
->_IO_read_ptr
= NULL
;
599 fp
->_IO_read_end
= NULL
;
600 fp
->_IO_write_base
= NULL
;
601 fp
->_IO_write_ptr
= NULL
;
602 fp
->_IO_write_end
= NULL
;
603 fp
->_chain
= NULL
; /* Not necessary. */
605 fp
->_IO_save_base
= NULL
;
606 fp
->_IO_backup_base
= NULL
;
607 fp
->_IO_save_end
= NULL
;
611 fp
->_vtable_offset
= 0;
614 _IO_lock_init (*fp
->_lock
);
616 fp
->_mode
= orientation
;
617 #if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T
618 if (orientation
>= 0)
621 fp
->_wide_data
->_IO_buf_base
= NULL
;
622 fp
->_wide_data
->_IO_buf_end
= NULL
;
623 fp
->_wide_data
->_IO_read_base
= NULL
;
624 fp
->_wide_data
->_IO_read_ptr
= NULL
;
625 fp
->_wide_data
->_IO_read_end
= NULL
;
626 fp
->_wide_data
->_IO_write_base
= NULL
;
627 fp
->_wide_data
->_IO_write_ptr
= NULL
;
628 fp
->_wide_data
->_IO_write_end
= NULL
;
629 fp
->_wide_data
->_IO_save_base
= NULL
;
630 fp
->_wide_data
->_IO_backup_base
= NULL
;
631 fp
->_wide_data
->_IO_save_end
= NULL
;
633 fp
->_wide_data
->_wide_vtable
= jmp
;
639 _IO_default_sync (fp
)
645 /* The way the C++ classes are mapped into the C functions in the
646 current implementation, this function can get called twice! */
649 _IO_default_finish (fp
, dummy
)
653 struct _IO_marker
*mark
;
654 if (fp
->_IO_buf_base
&& !(fp
->_flags
& _IO_USER_BUF
))
656 FREE_BUF (fp
->_IO_buf_base
, _IO_blen (fp
));
657 fp
->_IO_buf_base
= fp
->_IO_buf_end
= NULL
;
660 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
663 if (fp
->_IO_save_base
)
665 free (fp
->_IO_save_base
);
666 fp
->_IO_save_base
= NULL
;
670 _IO_lock_fini (*fp
->_lock
);
673 _IO_un_link ((struct _IO_FILE_plus
*) fp
);
677 _IO_default_seekoff (fp
, offset
, dir
, mode
)
687 _IO_sputbackc (fp
, c
)
693 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
694 && (unsigned char)fp
->_IO_read_ptr
[-1] == (unsigned char)c
)
697 result
= (unsigned char) c
;
700 result
= _IO_PBACKFAIL (fp
, c
);
703 fp
->_flags
&= ~_IO_EOF_SEEN
;
714 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
717 result
= (unsigned char) *fp
->_IO_read_ptr
;
720 result
= _IO_PBACKFAIL (fp
, EOF
);
723 fp
->_flags
&= ~_IO_EOF_SEEN
;
728 #if 0 /* Work in progress */
729 /* Seems not to be needed. */
732 _IO_set_column (fp
, c
)
739 fp
->_column
= c
- (fp
->_IO_write_ptr
- fp
->_IO_write_base
);
743 _IO_set_column (fp
, i
)
747 fp
->_cur_column
= i
+ 1;
755 _IO_adjust_column (start
, line
, count
)
760 const char *ptr
= line
+ count
;
763 return line
+ count
- ptr
- 1;
764 return start
+ count
;
768 /* Seems not to be needed. --drepper */
774 return _IO_adjust_column (fp
->_cur_column
- 1,
776 fp
->_IO_write_ptr
- fp
->_IO_write_base
);
790 _IO_cleanup_region_start_noarg (flush_cleanup
);
791 _IO_lock_lock (list_all_lock
);
794 last_stamp
= _IO_list_all_stamp
;
795 fp
= (_IO_FILE
*) _IO_list_all
;
801 if (((fp
->_mode
<= 0 && fp
->_IO_write_ptr
> fp
->_IO_write_base
)
802 #if defined _LIBC || defined _GLIBCPP_USE_WCHAR_T
803 || (fp
->_vtable_offset
== 0
804 && fp
->_mode
> 0 && (fp
->_wide_data
->_IO_write_ptr
805 > fp
->_wide_data
->_IO_write_base
))
808 && _IO_OVERFLOW (fp
, EOF
) == EOF
)
811 _IO_funlockfile (fp
);
814 if (last_stamp
!= _IO_list_all_stamp
)
816 /* Something was added to the list. Start all over again. */
817 fp
= (_IO_FILE
*) _IO_list_all
;
818 last_stamp
= _IO_list_all_stamp
;
825 _IO_lock_unlock (list_all_lock
);
826 _IO_cleanup_region_end (0);
833 _IO_flush_all_linebuffered ()
839 _IO_cleanup_region_start_noarg (flush_cleanup
);
840 _IO_lock_lock (list_all_lock
);
843 last_stamp
= _IO_list_all_stamp
;
844 fp
= (_IO_FILE
*) _IO_list_all
;
850 if ((fp
->_flags
& _IO_NO_WRITES
) == 0 && fp
->_flags
& _IO_LINE_BUF
)
851 _IO_OVERFLOW (fp
, EOF
);
853 _IO_funlockfile (fp
);
856 if (last_stamp
!= _IO_list_all_stamp
)
858 /* Something was added to the list. Start all over again. */
859 fp
= (_IO_FILE
*) _IO_list_all
;
860 last_stamp
= _IO_list_all_stamp
;
867 _IO_lock_unlock (list_all_lock
);
868 _IO_cleanup_region_end (0);
872 weak_alias (_IO_flush_all_linebuffered
, _flushlbf
)
875 static void _IO_unbuffer_write
__P ((void));
878 _IO_unbuffer_write ()
881 for (fp
= (_IO_FILE
*) _IO_list_all
; fp
; fp
= fp
->_chain
)
882 if (! (fp
->_flags
& _IO_UNBUFFERED
)
883 && (! (fp
->_flags
& _IO_NO_WRITES
)
884 || (fp
->_flags
& _IO_IS_APPENDING
))
885 /* Iff stream is un-orientated, it wasn't used. */
887 _IO_SETBUF (fp
, NULL
, 0);
893 int result
= _IO_flush_all ();
895 /* We currently don't have a reliable mechanism for making sure that
896 C++ static destructors are executed in the correct order.
897 So it is possible that other static destructors might want to
898 write to cout - and they're supposed to be able to do so.
900 The following will make the standard streambufs be unbuffered,
901 which forces any output from late destructors to be written out. */
902 _IO_unbuffer_write ();
909 _IO_init_marker (marker
, fp
)
910 struct _IO_marker
*marker
;
914 if (_IO_in_put_mode (fp
))
915 _IO_switch_to_get_mode (fp
);
916 if (_IO_in_backup (fp
))
917 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_end
;
919 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_base
;
921 /* Should perhaps sort the chain? */
922 marker
->_next
= fp
->_markers
;
923 fp
->_markers
= marker
;
927 _IO_remove_marker (marker
)
928 struct _IO_marker
*marker
;
930 /* Unlink from sb's chain. */
931 struct _IO_marker
**ptr
= &marker
->_sbuf
->_markers
;
932 for (; ; ptr
= &(*ptr
)->_next
)
936 else if (*ptr
== marker
)
938 *ptr
= marker
->_next
;
943 if _sbuf has a backup area that is no longer needed
, should we
delete
944 it now
, or wait until the next underflow
?
948 #define BAD_DELTA EOF
951 _IO_marker_difference (mark1
, mark2
)
952 struct _IO_marker
*mark1
;
953 struct _IO_marker
*mark2
;
955 return mark1
->_pos
- mark2
->_pos
;
958 /* Return difference between MARK and current position of MARK's stream. */
960 _IO_marker_delta (mark
)
961 struct _IO_marker
*mark
;
964 if (mark
->_sbuf
== NULL
)
966 if (_IO_in_backup (mark
->_sbuf
))
967 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_end
;
969 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_base
;
970 return mark
->_pos
- cur_pos
;
974 _IO_seekmark (fp
, mark
, delta
)
976 struct _IO_marker
*mark
;
979 if (mark
->_sbuf
!= fp
)
983 if (_IO_in_backup (fp
))
984 _IO_switch_to_main_get_area (fp
);
985 fp
->_IO_read_ptr
= fp
->_IO_read_base
+ mark
->_pos
;
989 if (!_IO_in_backup (fp
))
990 _IO_switch_to_backup_area (fp
);
991 fp
->_IO_read_ptr
= fp
->_IO_read_end
+ mark
->_pos
;
997 _IO_unsave_markers (fp
)
1000 struct _IO_marker
*mark
= fp
->_markers
;
1004 streampos offset
= seekoff (0, ios::cur
, ios::in
);
1007 offset
+= eGptr () - Gbase ();
1008 for ( ; mark
!= NULL
; mark
= mark
->_next
)
1009 mark
->set_streampos (mark
->_pos
+ offset
);
1013 for ( ; mark
!= NULL
; mark
= mark
->_next
)
1014 mark
->set_streampos (EOF
);
1020 if (_IO_have_backup (fp
))
1021 _IO_free_backup_area (fp
);
1025 /* Seems not to be needed. --drepper */
1027 _IO_nobackup_pbackfail (fp
, c
)
1031 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
1033 if (c
!= EOF
&& *fp
->_IO_read_ptr
!= c
)
1034 *fp
->_IO_read_ptr
= c
;
1035 return (unsigned char) c
;
1040 _IO_default_pbackfail (fp
, c
)
1044 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& !_IO_in_backup (fp
)
1045 && (unsigned char) fp
->_IO_read_ptr
[-1] == c
)
1049 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
1050 if (!_IO_in_backup (fp
))
1052 /* We need to keep the invariant that the main get area
1053 logically follows the backup area. */
1054 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& _IO_have_backup (fp
))
1056 if (save_for_backup (fp
, fp
->_IO_read_ptr
))
1059 else if (!_IO_have_backup (fp
))
1061 /* No backup buffer: allocate one. */
1062 /* Use nshort buffer, if unused? (probably not) FIXME */
1063 int backup_size
= 128;
1064 char *bbuf
= (char *) malloc (backup_size
);
1067 fp
->_IO_save_base
= bbuf
;
1068 fp
->_IO_save_end
= fp
->_IO_save_base
+ backup_size
;
1069 fp
->_IO_backup_base
= fp
->_IO_save_end
;
1071 fp
->_IO_read_base
= fp
->_IO_read_ptr
;
1072 _IO_switch_to_backup_area (fp
);
1074 else if (fp
->_IO_read_ptr
<= fp
->_IO_read_base
)
1076 /* Increase size of existing backup buffer. */
1077 _IO_size_t new_size
;
1078 _IO_size_t old_size
= fp
->_IO_read_end
- fp
->_IO_read_base
;
1080 new_size
= 2 * old_size
;
1081 new_buf
= (char *) malloc (new_size
);
1082 if (new_buf
== NULL
)
1084 memcpy (new_buf
+ (new_size
- old_size
), fp
->_IO_read_base
,
1086 free (fp
->_IO_read_base
);
1087 _IO_setg (fp
, new_buf
, new_buf
+ (new_size
- old_size
),
1088 new_buf
+ new_size
);
1089 fp
->_IO_backup_base
= fp
->_IO_read_ptr
;
1092 *--fp
->_IO_read_ptr
= c
;
1094 return (unsigned char) c
;
1098 _IO_default_seek (fp
, offset
, dir
)
1107 _IO_default_stat (fp
, st
)
1115 _IO_default_read (fp
, data
, n
)
1124 _IO_default_write (fp
, data
, n
)
1133 _IO_default_showmanyc (fp
)
1140 _IO_default_imbue (fp
, locale
)
1149 return (_IO_ITER
) _IO_list_all
;
1162 return iter
->_chain
;
1175 #ifdef _IO_MTSAFE_IO
1176 _IO_lock_lock (list_all_lock
);
1183 #ifdef _IO_MTSAFE_IO
1184 _IO_lock_unlock (list_all_lock
);
1189 _IO_list_resetlock()
1191 #ifdef _IO_MTSAFE_IO
1192 _IO_lock_init (list_all_lock
);
1199 #define IO_CLEANUP ;
1207 ~__io_defs() { _IO_cleanup (); }
1209 __io_defs io_defs__
;
1215 weak_alias (_IO_cleanup
, _cleanup
)
1218 #ifdef text_set_element
1219 text_set_element(__libc_atexit
, _cleanup
);