1 /* Copyright (C) 1993, 1995, 1997, 1998 Free Software Foundation, Inc.
2 This file is part of the GNU IO Library.
4 This library is free software; you can redistribute it and/or
5 modify it under the terms of the GNU General Public License as
6 published by the Free Software Foundation; either version 2, or (at
7 your option) any later version.
9 This library is distributed in the hope that it will be useful, but
10 WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
12 General Public License for more details.
14 You should have received a copy of the GNU General Public License
15 along with this library; see the file COPYING. If not, write to
16 the Free Software Foundation, 59 Temple Place - Suite 330, Boston,
19 As a special exception, if you link this library with files
20 compiled with a GNU compiler to produce an executable, this does
21 not cause the resulting executable to be covered by the GNU General
22 Public License. This exception does not however invalidate any
23 other reasons why the executable file might be covered by the GNU
24 General Public License. */
26 /* Generic or default I/O operations. */
38 if (fp
->_flags
& _IO_LINKED
)
41 for (f
= &_IO_list_all
; *f
!= NULL
; f
= &(*f
)->_chain
)
49 fp
->_flags
&= ~_IO_LINKED
;
57 if ((fp
->_flags
& _IO_LINKED
) == 0)
59 fp
->_flags
|= _IO_LINKED
;
60 fp
->_chain
= _IO_list_all
;
65 /* Return minimum _pos markers
66 Assumes the current get area is the main get area. */
67 static _IO_size_t _IO_least_marker
__P ((_IO_FILE
*fp
));
73 _IO_ssize_t least_so_far
= fp
->_IO_read_end
- fp
->_IO_read_base
;
74 struct _IO_marker
*mark
;
75 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
76 if (mark
->_pos
< least_so_far
)
77 least_so_far
= mark
->_pos
;
81 /* Switch current get area from backup buffer to (start of) main get area. */
84 _IO_switch_to_main_get_area (fp
)
88 fp
->_flags
&= ~_IO_IN_BACKUP
;
89 /* Swap _IO_read_end and _IO_save_end. */
90 tmp
= fp
->_IO_read_end
;
91 fp
->_IO_read_end
= fp
->_IO_save_end
;
92 fp
->_IO_save_end
= tmp
;
93 /* Swap _IO_read_base and _IO_save_base. */
94 tmp
= fp
->_IO_read_base
;
95 fp
->_IO_read_base
= fp
->_IO_save_base
;
96 fp
->_IO_save_base
= tmp
;
97 /* Swap _IO_read_base and _IO_save_ptr. */
98 tmp
= fp
->_IO_read_ptr
;
99 fp
->_IO_read_ptr
= fp
->_IO_save_ptr
;
100 fp
->_IO_save_ptr
= tmp
;
103 /* Switch current get area from main get area to (end of) backup area. */
106 _IO_switch_to_backup_area (fp
)
110 fp
->_flags
|= _IO_IN_BACKUP
;
111 /* Swap _IO_read_end and _IO_save_end. */
112 tmp
= fp
->_IO_read_end
;
113 fp
->_IO_read_end
= fp
->_IO_save_end
;
114 fp
->_IO_save_end
= tmp
;
115 /* Swap _gbase and _IO_save_base. */
116 tmp
= fp
->_IO_read_base
;
117 fp
->_IO_read_base
= fp
->_IO_save_base
;
118 fp
->_IO_save_base
= tmp
;
119 /* read _IO_read_ptr. */
120 fp
->_IO_save_ptr
= fp
->_IO_read_ptr
;
121 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
125 _IO_switch_to_get_mode (fp
)
128 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
)
129 if (_IO_OVERFLOW (fp
, EOF
) == EOF
)
131 if (_IO_in_backup (fp
))
132 fp
->_IO_read_base
= fp
->_IO_backup_base
;
135 fp
->_IO_read_base
= fp
->_IO_buf_base
;
136 if (fp
->_IO_write_ptr
> fp
->_IO_read_end
)
137 fp
->_IO_read_end
= fp
->_IO_write_ptr
;
139 fp
->_IO_read_ptr
= fp
->_IO_write_ptr
;
141 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= fp
->_IO_read_ptr
;
143 fp
->_flags
&= ~_IO_CURRENTLY_PUTTING
;
148 _IO_free_backup_area (fp
)
151 if (_IO_in_backup (fp
))
152 _IO_switch_to_main_get_area (fp
); /* Just in case. */
153 free (fp
->_IO_save_base
);
154 fp
->_IO_save_base
= NULL
;
155 fp
->_IO_save_end
= NULL
;
156 fp
->_IO_backup_base
= NULL
;
161 _IO_switch_to_put_mode (fp
)
164 fp
->_IO_write_base
= fp
->_IO_read_ptr
;
165 fp
->_IO_write_ptr
= fp
->_IO_read_ptr
;
166 /* Following is wrong if line- or un-buffered? */
167 fp
->_IO_write_end
= (fp
->_flags
& _IO_IN_BACKUP
168 ? fp
->_IO_read_end
: fp
->_IO_buf_end
);
170 fp
->_IO_read_ptr
= fp
->_IO_read_end
;
171 fp
->_IO_read_base
= fp
->_IO_read_end
;
173 fp
->_flags
|= _IO_CURRENTLY_PUTTING
;
183 return _IO_OVERFLOW (f
, ch
);
186 static int save_for_backup
__P ((_IO_FILE
*fp
))
199 /* Append [_IO_read_base.._IO_read_end] to backup area. */
200 int least_mark
= _IO_least_marker (fp
);
201 /* needed_size is how much space we need in the backup area. */
202 int needed_size
= (fp
->_IO_read_end
- fp
->_IO_read_base
) - least_mark
;
203 int current_Bsize
= fp
->_IO_save_end
- fp
->_IO_save_base
;
204 int avail
; /* Extra space available for future expansion. */
206 struct _IO_marker
*mark
;
207 if (needed_size
> current_Bsize
)
211 new_buffer
= (char *) malloc (avail
+ needed_size
);
212 if (new_buffer
== NULL
)
213 return EOF
; /* FIXME */
217 __mempcpy (__mempcpy (new_buffer
+ avail
,
218 fp
->_IO_save_end
+ least_mark
,
221 fp
->_IO_read_end
- fp
->_IO_read_base
);
223 memcpy (new_buffer
+ avail
,
224 fp
->_IO_save_end
+ least_mark
,
226 memcpy (new_buffer
+ avail
- least_mark
,
228 fp
->_IO_read_end
- fp
->_IO_read_base
);
232 memcpy (new_buffer
+ avail
,
233 fp
->_IO_read_base
+ least_mark
,
235 if (fp
->_IO_save_base
)
236 free (fp
->_IO_save_base
);
237 fp
->_IO_save_base
= new_buffer
;
238 fp
->_IO_save_end
= new_buffer
+ avail
+ needed_size
;
242 avail
= current_Bsize
- needed_size
;
245 memmove (fp
->_IO_save_base
+ avail
,
246 fp
->_IO_save_end
+ least_mark
,
248 memcpy (fp
->_IO_save_base
+ avail
- least_mark
,
250 fp
->_IO_read_end
- fp
->_IO_read_base
);
252 else if (needed_size
> 0)
253 memcpy (fp
->_IO_save_base
+ avail
,
254 fp
->_IO_read_base
+ least_mark
,
257 /* FIXME: Dubious arithmetic if pointers are NULL */
258 fp
->_IO_backup_base
= fp
->_IO_save_base
+ avail
;
259 /* Adjust all the streammarkers. */
260 delta
= fp
->_IO_read_end
- fp
->_IO_read_base
;
261 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
270 if (_IO_in_put_mode (fp
))
271 if (_IO_switch_to_get_mode (fp
) == EOF
)
273 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
274 return *(unsigned char *) fp
->_IO_read_ptr
;
275 if (_IO_in_backup (fp
))
277 _IO_switch_to_main_get_area (fp
);
278 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
279 return *(unsigned char *) fp
->_IO_read_ptr
;
281 if (_IO_have_markers (fp
))
283 if (save_for_backup (fp
))
286 else if (_IO_have_backup (fp
))
287 _IO_free_backup_area (fp
);
288 return _IO_UNDERFLOW (fp
);
295 if (_IO_in_put_mode (fp
))
296 if (_IO_switch_to_get_mode (fp
) == EOF
)
298 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
299 return *(unsigned char *) fp
->_IO_read_ptr
++;
300 if (_IO_in_backup (fp
))
302 _IO_switch_to_main_get_area (fp
);
303 if (fp
->_IO_read_ptr
< fp
->_IO_read_end
)
304 return *(unsigned char *) fp
->_IO_read_ptr
++;
306 if (_IO_have_markers (fp
))
308 if (save_for_backup (fp
))
311 else if (_IO_have_backup (fp
))
312 _IO_free_backup_area (fp
);
313 return _IO_UFLOW (fp
);
317 _IO_setb (f
, b
, eb
, a
)
323 if (f
->_IO_buf_base
&& !(f
->_flags
& _IO_USER_BUF
))
324 FREE_BUF (f
->_IO_buf_base
, _IO_blen (f
));
328 f
->_flags
&= ~_IO_USER_BUF
;
330 f
->_flags
|= _IO_USER_BUF
;
337 if (fp
->_IO_buf_base
)
339 if (!(fp
->_flags
& _IO_UNBUFFERED
))
340 if (_IO_DOALLOCATE (fp
) != EOF
)
342 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
346 _IO_default_underflow (fp
)
353 _IO_default_uflow (fp
)
356 int ch
= _IO_UNDERFLOW (fp
);
359 return *(unsigned char *) fp
->_IO_read_ptr
++;
363 _IO_default_xsputn (f
, data
, n
)
368 const char *s
= (char *) data
;
374 /* Space available. */
375 _IO_ssize_t count
= f
->_IO_write_end
- f
->_IO_write_ptr
;
378 if ((_IO_size_t
) count
> more
)
383 f
->_IO_write_ptr
= __mempcpy (f
->_IO_write_ptr
, s
, count
);
385 memcpy (f
->_IO_write_ptr
, s
, count
);
386 f
->_IO_write_ptr
+= count
;
394 char *p
= f
->_IO_write_ptr
;
396 for (i
= count
; --i
>= 0; )
398 f
->_IO_write_ptr
= p
;
402 if (more
== 0 || __overflow (f
, (unsigned char) *s
++) == EOF
)
410 _IO_sgetn (fp
, data
, n
)
415 /* FIXME handle putback buffer here! */
416 return _IO_XSGETN (fp
, data
, n
);
420 _IO_default_xsgetn (fp
, data
, n
)
426 char *s
= (char*) data
;
429 /* Data available. */
430 _IO_ssize_t count
= fp
->_IO_read_end
- fp
->_IO_read_ptr
;
433 if ((_IO_size_t
) count
> more
)
438 s
= __mempcpy (s
, fp
->_IO_read_ptr
, count
);
440 memcpy (s
, fp
->_IO_read_ptr
, count
);
443 fp
->_IO_read_ptr
+= count
;
449 char *p
= fp
->_IO_read_ptr
;
453 fp
->_IO_read_ptr
= p
;
457 if (more
== 0 || __underflow (fp
) == EOF
)
464 /* Seems not to be needed. --drepper */
474 _IO_default_setbuf (fp
, p
, len
)
479 if (_IO_SYNC (fp
) == EOF
)
481 if (p
== NULL
|| len
== 0)
483 fp
->_flags
|= _IO_UNBUFFERED
;
484 _IO_setb (fp
, fp
->_shortbuf
, fp
->_shortbuf
+1, 0);
488 fp
->_flags
&= ~_IO_UNBUFFERED
;
489 _IO_setb (fp
, p
, p
+len
, 0);
491 fp
->_IO_write_base
= fp
->_IO_write_ptr
= fp
->_IO_write_end
= 0;
492 fp
->_IO_read_base
= fp
->_IO_read_ptr
= fp
->_IO_read_end
= 0;
497 _IO_default_seekpos (fp
, pos
, mode
)
502 return _IO_SEEKOFF (fp
, _IO_pos_as_off (pos
), 0, mode
);
506 _IO_default_doallocate (fp
)
511 ALLOC_BUF (buf
, _IO_BUFSIZ
, EOF
);
512 _IO_setb (fp
, buf
, buf
+_IO_BUFSIZ
, 1);
521 fp
->_flags
= _IO_MAGIC
|flags
;
522 fp
->_IO_buf_base
= NULL
;
523 fp
->_IO_buf_end
= NULL
;
524 fp
->_IO_read_base
= NULL
;
525 fp
->_IO_read_ptr
= NULL
;
526 fp
->_IO_read_end
= NULL
;
527 fp
->_IO_write_base
= NULL
;
528 fp
->_IO_write_ptr
= NULL
;
529 fp
->_IO_write_end
= NULL
;
530 fp
->_chain
= NULL
; /* Not necessary. */
532 fp
->_IO_save_base
= NULL
;
533 fp
->_IO_backup_base
= NULL
;
534 fp
->_IO_save_end
= NULL
;
538 fp
->_vtable_offset
= 0;
541 _IO_lock_init (*fp
->_lock
);
546 _IO_default_sync (fp
)
552 /* The way the C++ classes are mapped into the C functions in the
553 current implementation, this function can get called twice! */
556 _IO_default_finish (fp
, dummy
)
560 struct _IO_marker
*mark
;
561 if (fp
->_IO_buf_base
&& !(fp
->_flags
& _IO_USER_BUF
))
563 FREE_BUF (fp
->_IO_buf_base
, _IO_blen (fp
));
564 fp
->_IO_buf_base
= fp
->_IO_buf_end
= NULL
;
567 for (mark
= fp
->_markers
; mark
!= NULL
; mark
= mark
->_next
)
570 if (fp
->_IO_save_base
)
572 free (fp
->_IO_save_base
);
573 fp
->_IO_save_base
= NULL
;
577 _IO_lock_fini (*fp
->_lock
);
584 _IO_default_seekoff (fp
, offset
, dir
, mode
)
594 _IO_sputbackc (fp
, c
)
600 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
601 && (unsigned char)fp
->_IO_read_ptr
[-1] == (unsigned char)c
)
604 result
= (unsigned char) c
;
607 result
= _IO_PBACKFAIL (fp
, c
);
610 fp
->_flags
&= ~_IO_EOF_SEEN
;
621 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
624 result
= (unsigned char) *fp
->_IO_read_ptr
;
627 result
= _IO_PBACKFAIL (fp
, EOF
);
630 fp
->_flags
&= ~_IO_EOF_SEEN
;
635 #if 0 /* Work in progress */
636 /* Seems not to be needed. */
639 _IO_set_column (fp
, c
)
646 fp
->_column
= c
- (fp
->_IO_write_ptr
- fp
->_IO_write_base
);
650 _IO_set_column (fp
, i
)
654 fp
->_cur_column
= i
+ 1;
662 _IO_adjust_column (start
, line
, count
)
667 const char *ptr
= line
+ count
;
670 return line
+ count
- ptr
- 1;
671 return start
+ count
;
675 /* Seems not to be needed. --drepper */
681 return _IO_adjust_column (fp
->_cur_column
- 1,
683 fp
->_IO_write_ptr
- fp
->_IO_write_base
);
693 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
694 if (fp
->_IO_write_ptr
> fp
->_IO_write_base
695 && _IO_OVERFLOW (fp
, EOF
) == EOF
)
701 _IO_flush_all_linebuffered ()
704 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
705 if ((fp
->_flags
& _IO_NO_WRITES
) == 0 && fp
->_flags
& _IO_LINE_BUF
)
706 _IO_OVERFLOW (fp
, EOF
);
709 static void _IO_unbuffer_all
__P ((void));
715 for (fp
= _IO_list_all
; fp
!= NULL
; fp
= fp
->_chain
)
716 if (! (fp
->_flags
& _IO_UNBUFFERED
))
717 _IO_SETBUF (fp
, NULL
, 0);
723 int result
= _IO_flush_all ();
725 /* We currently don't have a reliable mechanism for making sure that
726 C++ static destructors are executed in the correct order.
727 So it is possible that other static destructors might want to
728 write to cout - and they're supposed to be able to do so.
730 The following will make the standard streambufs be unbuffered,
731 which forces any output from late destructors to be written out. */
739 _IO_init_marker (marker
, fp
)
740 struct _IO_marker
*marker
;
744 if (_IO_in_put_mode (fp
))
745 _IO_switch_to_get_mode (fp
);
746 if (_IO_in_backup (fp
))
747 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_end
;
749 marker
->_pos
= fp
->_IO_read_ptr
- fp
->_IO_read_base
;
751 /* Should perhaps sort the chain? */
752 marker
->_next
= fp
->_markers
;
753 fp
->_markers
= marker
;
757 _IO_remove_marker (marker
)
758 struct _IO_marker
*marker
;
760 /* Unlink from sb's chain. */
761 struct _IO_marker
**ptr
= &marker
->_sbuf
->_markers
;
762 for (; ; ptr
= &(*ptr
)->_next
)
766 else if (*ptr
== marker
)
768 *ptr
= marker
->_next
;
773 if _sbuf has a backup area that is no longer needed
, should we
delete
774 it now
, or wait until the next underflow
?
778 #define BAD_DELTA EOF
781 _IO_marker_difference (mark1
, mark2
)
782 struct _IO_marker
*mark1
;
783 struct _IO_marker
*mark2
;
785 return mark1
->_pos
- mark2
->_pos
;
788 /* Return difference between MARK and current position of MARK's stream. */
790 _IO_marker_delta (mark
)
791 struct _IO_marker
*mark
;
794 if (mark
->_sbuf
== NULL
)
796 if (_IO_in_backup (mark
->_sbuf
))
797 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_end
;
799 cur_pos
= mark
->_sbuf
->_IO_read_ptr
- mark
->_sbuf
->_IO_read_base
;
800 return mark
->_pos
- cur_pos
;
804 _IO_seekmark (fp
, mark
, delta
)
806 struct _IO_marker
*mark
;
809 if (mark
->_sbuf
!= fp
)
813 if (_IO_in_backup (fp
))
814 _IO_switch_to_main_get_area (fp
);
815 fp
->_IO_read_ptr
= fp
->_IO_read_base
+ mark
->_pos
;
819 if (!_IO_in_backup (fp
))
820 _IO_switch_to_backup_area (fp
);
821 fp
->_IO_read_ptr
= fp
->_IO_read_end
+ mark
->_pos
;
827 _IO_unsave_markers (fp
)
830 struct _IO_marker
*mark
= fp
->_markers
;
834 streampos offset
= seekoff (0, ios::cur
, ios::in
);
837 offset
+= eGptr () - Gbase ();
838 for ( ; mark
!= NULL
; mark
= mark
->_next
)
839 mark
->set_streampos (mark
->_pos
+ offset
);
843 for ( ; mark
!= NULL
; mark
= mark
->_next
)
844 mark
->set_streampos (EOF
);
850 if (_IO_have_backup (fp
))
851 _IO_free_backup_area (fp
);
855 /* Seems not to be needed. --drepper */
857 _IO_nobackup_pbackfail (fp
, c
)
861 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
)
863 if (c
!= EOF
&& *fp
->_IO_read_ptr
!= c
)
864 *fp
->_IO_read_ptr
= c
;
865 return (unsigned char) c
;
870 _IO_default_pbackfail (fp
, c
)
874 if (fp
->_IO_read_ptr
> fp
->_IO_read_base
&& !_IO_in_backup (fp
)
875 && (unsigned char) fp
->_IO_read_ptr
[-1] == c
)
879 /* Need to handle a filebuf in write mode (switch to read mode). FIXME!*/
880 if (_IO_have_backup (fp
) && !_IO_in_backup (fp
))
881 _IO_switch_to_backup_area (fp
);
883 if (!_IO_have_backup (fp
))
885 /* No backup buffer: allocate one. */
886 /* Use nshort buffer, if unused? (probably not) FIXME */
887 int backup_size
= 128;
888 char *bbuf
= (char *) malloc (backup_size
);
891 fp
->_IO_save_base
= bbuf
;
892 fp
->_IO_save_end
= fp
->_IO_save_base
+ backup_size
;
893 fp
->_IO_backup_base
= fp
->_IO_save_end
;
894 _IO_switch_to_backup_area (fp
);
896 else if (fp
->_IO_read_ptr
<= fp
->_IO_read_base
)
898 /* Increase size of existing backup buffer. */
900 _IO_size_t old_size
= fp
->_IO_read_end
- fp
->_IO_read_base
;
902 new_size
= 2 * old_size
;
903 new_buf
= (char *) malloc (new_size
);
906 memcpy (new_buf
+ (new_size
- old_size
), fp
->_IO_read_base
,
908 free (fp
->_IO_read_base
);
909 _IO_setg (fp
, new_buf
, new_buf
+ (new_size
- old_size
),
911 fp
->_IO_backup_base
= fp
->_IO_read_ptr
;
914 *--fp
->_IO_read_ptr
= c
;
916 return (unsigned char) c
;
920 _IO_default_seek (fp
, offset
, dir
)
929 _IO_default_stat (fp
, st
)
937 _IO_default_read (fp
, data
, n
)
946 _IO_default_write (fp
, data
, n
)
955 _IO_default_showmanyc (fp
)
962 _IO_default_imbue (fp
, locale
)
979 ~__io_defs() { _IO_cleanup (); }
987 weak_alias (_IO_cleanup
, _cleanup
)
990 #ifdef text_set_element
991 text_set_element(__libc_atexit
, _cleanup
);