2 The io module provides the Python interfaces to stream handling. The
3 builtin open function is defined in this module.
5 At the top of the I/O hierarchy is the abstract base class IOBase. It
6 defines the basic interface to a stream. Note, however, that there is no
7 seperation between reading and writing to streams; implementations are
8 allowed to throw an IOError if they do not support a given operation.
10 Extending IOBase is RawIOBase which deals simply with the reading and
11 writing of raw bytes to a stream. FileIO subclasses RawIOBase to provide
12 an interface to OS files.
14 BufferedIOBase deals with buffering on a raw byte stream (RawIOBase). Its
15 subclasses, BufferedWriter, BufferedReader, and BufferedRWPair buffer
16 streams that are readable, writable, and both respectively.
17 BufferedRandom provides a buffered interface to random access
18 streams. BytesIO is a simple stream of in-memory bytes.
20 Another IOBase subclass, TextIOBase, deals with the encoding and decoding
21 of streams into text. TextIOWrapper, which extends it, is a buffered text
22 interface to a buffered raw stream (`BufferedIOBase`). Finally, StringIO
23 is a in-memory stream for text.
25 Argument names are not part of the specification, and only the arguments
26 of open() are intended to be used as keyword arguments.
32 An int containing the default buffer size used by the module's buffered
33 I/O classes. open() uses the file's blksize (as obtained by os.stat) if
36 # New I/O library conforming to PEP 3116.
38 # This is a prototype; hopefully eventually some of this will be
41 # XXX edge cases when switching between reading/writing
42 # XXX need to support 1 meaning line-buffered
43 # XXX whenever an argument is None, use the default value
44 # XXX read/write ops should check readable/writable
45 # XXX buffered readinto should work with arbitrary buffer objects
46 # XXX use incremental encoder for text output, at least for UTF-16 and UTF-8-SIG
47 # XXX check writable, readable and seekable in appropriate places
48 from __future__
import print_function
49 from __future__
import unicode_literals
51 __author__
= ("Guido van Rossum <guido@python.org>, "
52 "Mike Verdone <mike.verdone@gmail.com>, "
53 "Mark Russell <mark.russell@zen.co.uk>")
55 __all__
= ["BlockingIOError", "open", "IOBase", "RawIOBase", "FileIO",
56 "BytesIO", "StringIO", "BufferedIOBase",
57 "BufferedReader", "BufferedWriter", "BufferedRWPair",
58 "BufferedRandom", "TextIOBase", "TextIOWrapper"]
66 # open() uses st_blksize whenever we can
67 DEFAULT_BUFFER_SIZE
= 8 * 1024 # bytes
69 # py3k has only new style classes
72 class BlockingIOError(IOError):
74 """Exception raised when I/O would block on a non-blocking I/O stream."""
76 def __init__(self
, errno
, strerror
, characters_written
=0):
77 IOError.__init
__(self
, errno
, strerror
)
78 self
.characters_written
= characters_written
81 def open(file, mode
="r", buffering
=None, encoding
=None, errors
=None,
82 newline
=None, closefd
=True):
83 r
"""Open file and return a stream. If the file cannot be opened, an IOError is
86 file is either a string giving the name (and the path if the file
87 isn't in the current working directory) of the file to be opened or an
88 integer file descriptor of the file to be wrapped. (If a file
89 descriptor is given, it is closed when the returned I/O object is
90 closed, unless closefd is set to False.)
92 mode is an optional string that specifies the mode in which the file
93 is opened. It defaults to 'r' which means open for reading in text
94 mode. Other common values are 'w' for writing (truncating the file if
95 it already exists), and 'a' for appending (which on some Unix systems,
96 means that all writes append to the end of the file regardless of the
97 current seek position). In text mode, if encoding is not specified the
98 encoding used is platform dependent. (For reading and writing raw
99 bytes use binary mode and leave encoding unspecified.) The available
102 ========= ===============================================================
104 --------- ---------------------------------------------------------------
105 'r' open for reading (default)
106 'w' open for writing, truncating the file first
107 'a' open for writing, appending to the end of the file if it exists
109 't' text mode (default)
110 '+' open a disk file for updating (reading and writing)
111 'U' universal newline mode (for backwards compatibility; unneeded
113 ========= ===============================================================
115 The default mode is 'rt' (open for reading text). For binary random
116 access, the mode 'w+b' opens and truncates the file to 0 bytes, while
117 'r+b' opens the file without truncation.
119 Python distinguishes between files opened in binary and text modes,
120 even when the underlying operating system doesn't. Files opened in
121 binary mode (appending 'b' to the mode argument) return contents as
122 bytes objects without any decoding. In text mode (the default, or when
123 't' is appended to the mode argument), the contents of the file are
124 returned as strings, the bytes having been first decoded using a
125 platform-dependent encoding or using the specified encoding if given.
127 buffering is an optional integer used to set the buffering policy. By
128 default full buffering is on. Pass 0 to switch buffering off (only
129 allowed in binary mode), 1 to set line buffering, and an integer > 1
132 encoding is the name of the encoding used to decode or encode the
133 file. This should only be used in text mode. The default encoding is
134 platform dependent, but any encoding supported by Python can be
135 passed. See the codecs module for the list of supported encodings.
137 errors is an optional string that specifies how encoding errors are to
138 be handled---this argument should not be used in binary mode. Pass
139 'strict' to raise a ValueError exception if there is an encoding error
140 (the default of None has the same effect), or pass 'ignore' to ignore
141 errors. (Note that ignoring encoding errors can lead to data loss.)
142 See the documentation for codecs.register for a list of the permitted
143 encoding error strings.
145 newline controls how universal newlines works (it only applies to text
146 mode). It can be None, '', '\n', '\r', and '\r\n'. It works as
149 * On input, if newline is None, universal newlines mode is
150 enabled. Lines in the input can end in '\n', '\r', or '\r\n', and
151 these are translated into '\n' before being returned to the
152 caller. If it is '', universal newline mode is enabled, but line
153 endings are returned to the caller untranslated. If it has any of
154 the other legal values, input lines are only terminated by the given
155 string, and the line ending is returned to the caller untranslated.
157 * On output, if newline is None, any '\n' characters written are
158 translated to the system default line separator, os.linesep. If
159 newline is '', no translation takes place. If newline is any of the
160 other legal values, any '\n' characters written are translated to
163 If closefd is False, the underlying file descriptor will be kept open
164 when the file is closed. This does not work when a file name is given
165 and must be True in that case.
167 open() returns a file object whose type depends on the mode, and
168 through which the standard file operations such as reading and writing
169 are performed. When open() is used to open a file in a text mode ('w',
170 'r', 'wt', 'rt', etc.), it returns a TextIOWrapper. When used to open
171 a file in a binary mode, the returned class varies: in read binary
172 mode, it returns a BufferedReader; in write binary and append binary
173 modes, it returns a BufferedWriter, and in read/write mode, it returns
176 It is also possible to use a string or bytearray as a file for both
177 reading and writing. For strings StringIO can be used like a file
178 opened in a text mode, and for bytes a BytesIO can be used like a file
179 opened in a binary mode.
181 if not isinstance(file, (basestring
, int)):
182 raise TypeError("invalid file: %r" % file)
183 if not isinstance(mode
, basestring
):
184 raise TypeError("invalid mode: %r" % mode
)
185 if buffering
is not None and not isinstance(buffering
, int):
186 raise TypeError("invalid buffering: %r" % buffering
)
187 if encoding
is not None and not isinstance(encoding
, basestring
):
188 raise TypeError("invalid encoding: %r" % encoding
)
189 if errors
is not None and not isinstance(errors
, basestring
):
190 raise TypeError("invalid errors: %r" % errors
)
192 if modes
- set("arwb+tU") or len(mode
) > len(modes
):
193 raise ValueError("invalid mode: %r" % mode
)
194 reading
= "r" in modes
195 writing
= "w" in modes
196 appending
= "a" in modes
197 updating
= "+" in modes
199 binary
= "b" in modes
201 if writing
or appending
:
202 raise ValueError("can't use U and writing mode at once")
205 raise ValueError("can't have text and binary mode at once")
206 if reading
+ writing
+ appending
> 1:
207 raise ValueError("can't have read/write/append mode at once")
208 if not (reading
or writing
or appending
):
209 raise ValueError("must have exactly one of read/write/append mode")
210 if binary
and encoding
is not None:
211 raise ValueError("binary mode doesn't take an encoding argument")
212 if binary
and errors
is not None:
213 raise ValueError("binary mode doesn't take an errors argument")
214 if binary
and newline
is not None:
215 raise ValueError("binary mode doesn't take a newline argument")
217 (reading
and "r" or "") +
218 (writing
and "w" or "") +
219 (appending
and "a" or "") +
220 (updating
and "+" or ""),
222 if buffering
is None:
224 line_buffering
= False
225 if buffering
== 1 or buffering
< 0 and raw
.isatty():
227 line_buffering
= True
229 buffering
= DEFAULT_BUFFER_SIZE
231 bs
= os
.fstat(raw
.fileno()).st_blksize
232 except (os
.error
, AttributeError):
238 raise ValueError("invalid buffering size")
242 raise ValueError("can't have unbuffered text I/O")
244 buffer = BufferedRandom(raw
, buffering
)
245 elif writing
or appending
:
246 buffer = BufferedWriter(raw
, buffering
)
248 buffer = BufferedReader(raw
, buffering
)
250 raise ValueError("unknown mode: %r" % mode
)
253 text
= TextIOWrapper(buffer, encoding
, errors
, newline
, line_buffering
)
257 class _DocDescriptor
:
258 """Helper for builtins.open.__doc__
260 def __get__(self
, obj
, typ
):
262 "open(file, mode='r', buffering=None, encoding=None, "
263 "errors=None, newline=None, closefd=True)\n\n" +
267 """Wrapper for builtins.open
269 Trick so that open won't become a bound method when stored
270 as a class variable (as dumbdbm does).
272 See initstdio() in Python/pythonrun.c.
274 __doc__
= _DocDescriptor()
276 def __new__(cls
, *args
, **kwargs
):
277 return open(*args
, **kwargs
)
280 class UnsupportedOperation(ValueError, IOError):
284 class IOBase(object):
286 """The abstract base class for all I/O classes, acting on streams of
287 bytes. There is no public constructor.
289 This class provides dummy implementations for many methods that
290 derived classes can override selectively; the default implementations
291 represent a file that cannot be read, written or seeked.
293 Even though IOBase does not declare read, readinto, or write because
294 their signatures will vary, implementations and clients should
295 consider those methods part of the interface. Also, implementations
296 may raise a IOError when operations they do not support are called.
298 The basic type used for binary data read from or written to a file is
299 bytes. bytearrays are accepted too, and in some cases (such as
300 readinto) needed. Text I/O classes work with str data.
302 Note that calling any method (even inquiries) on a closed stream is
303 undefined. Implementations may raise IOError in this case.
305 IOBase (and its subclasses) support the iterator protocol, meaning
306 that an IOBase object can be iterated over yielding the lines in a
309 IOBase also supports the :keyword:`with` statement. In this example,
310 fp is closed after the suite of the with statment is complete:
312 with open('spam.txt', 'r') as fp:
313 fp.write('Spam and eggs!')
316 __metaclass__
= abc
.ABCMeta
320 def _unsupported(self
, name
):
321 """Internal: raise an exception for unsupported operations."""
322 raise UnsupportedOperation("%s.%s() not supported" %
323 (self
.__class
__.__name
__, name
))
327 def seek(self
, pos
, whence
= 0):
328 """Change stream position.
330 Change the stream position to byte offset offset. offset is
331 interpreted relative to the position indicated by whence. Values
334 * 0 -- start of stream (the default); offset should be zero or positive
335 * 1 -- current stream position; offset may be negative
336 * 2 -- end of stream; offset is usually negative
338 Return the new absolute position.
340 self
._unsupported
("seek")
343 """Return current stream position."""
344 return self
.seek(0, 1)
346 def truncate(self
, pos
= None):
347 """Truncate file to size bytes.
349 Size defaults to the current IO position as reported by tell(). Return
352 self
._unsupported
("truncate")
354 ### Flush and close ###
357 """Flush write buffers, if applicable.
359 This is not implemented for read-only and non-blocking streams.
361 # XXX Should this return the number of bytes written???
366 """Flush and close the IO object.
368 This method has no effect if the file is already closed.
370 if not self
.__closed
:
374 pass # If flush() fails, just give up
378 """Destructor. Calls close()."""
379 # The try/except block is in case this is called at program
380 # exit time, when it's possible that globals have already been
381 # deleted, and then the close() call might fail. Since
382 # there's nothing we can do about such failures and they annoy
383 # the end users, we suppress the traceback.
392 """Return whether object supports random access.
394 If False, seek(), tell() and truncate() will raise IOError.
395 This method may need to do a test seek().
399 def _checkSeekable(self
, msg
=None):
400 """Internal: raise an IOError if file is not seekable
402 if not self
.seekable():
403 raise IOError("File or stream is not seekable."
404 if msg
is None else msg
)
408 """Return whether object was opened for reading.
410 If False, read() will raise IOError.
414 def _checkReadable(self
, msg
=None):
415 """Internal: raise an IOError if file is not readable
417 if not self
.readable():
418 raise IOError("File or stream is not readable."
419 if msg
is None else msg
)
422 """Return whether object was opened for writing.
424 If False, write() and truncate() will raise IOError.
428 def _checkWritable(self
, msg
=None):
429 """Internal: raise an IOError if file is not writable
431 if not self
.writable():
432 raise IOError("File or stream is not writable."
433 if msg
is None else msg
)
437 """closed: bool. True iff the file has been closed.
439 For backwards compatibility, this is a property, not a predicate.
443 def _checkClosed(self
, msg
=None):
444 """Internal: raise an ValueError if file is closed
447 raise ValueError("I/O operation on closed file."
448 if msg
is None else msg
)
450 ### Context manager ###
453 """Context management protocol. Returns self."""
457 def __exit__(self
, *args
):
458 """Context management protocol. Calls close()"""
461 ### Lower-level APIs ###
463 # XXX Should these be present even if unimplemented?
466 """Returns underlying file descriptor if one exists.
468 An IOError is raised if the IO object does not use a file descriptor.
470 self
._unsupported
("fileno")
473 """Return whether this is an 'interactive' stream.
475 Return False if it can't be determined.
480 ### Readline[s] and writelines ###
482 def readline(self
, limit
= -1):
483 r
"""Read and return a line from the stream.
485 If limit is specified, at most limit bytes will be read.
487 The line terminator is always b'\n' for binary files; for text
488 files, the newlines argument to open can be used to select the line
489 terminator(s) recognized.
492 if hasattr(self
, "peek"):
494 readahead
= self
.peek(1)
497 n
= (readahead
.find(b
"\n") + 1) or len(readahead
)
506 if not isinstance(limit
, (int, long)):
507 raise TypeError("limit must be an integer")
509 while limit
< 0 or len(res
) < limit
:
510 b
= self
.read(nreadahead())
514 if res
.endswith(b
"\n"):
523 line
= self
.readline()
528 def readlines(self
, hint
=None):
529 """Return a list of lines from the stream.
531 hint can be specified to control the number of lines read: no more
532 lines will be read if the total size (in bytes/characters) of all
533 lines so far exceeds hint.
537 if not isinstance(hint
, (int, long)):
538 raise TypeError("hint must be an integer")
550 def writelines(self
, lines
):
556 class RawIOBase(IOBase
):
558 """Base class for raw binary I/O."""
560 # The read() method is implemented by calling readinto(); derived
561 # classes that want to support read() only need to implement
562 # readinto() as a primitive operation. In general, readinto() can be
563 # more efficient than read().
565 # (It would be tempting to also provide an implementation of
566 # readinto() in terms of read(), in case the latter is a more suitable
567 # primitive operation, but that would lead to nasty recursion in case
568 # a subclass doesn't implement either.)
570 def read(self
, n
= -1):
571 """Read and return up to n bytes.
573 Returns an empty bytes array on EOF, or None if the object is
574 set not to block and has no data to read.
579 return self
.readall()
580 b
= bytearray(n
.__index
__())
586 """Read until EOF, using multiple read() call."""
589 data
= self
.read(DEFAULT_BUFFER_SIZE
)
595 def readinto(self
, b
):
596 """Read up to len(b) bytes into b.
598 Returns number of bytes read (0 for EOF), or None if the object
599 is set not to block as has no data to read.
601 self
._unsupported
("readinto")
604 """Write the given buffer to the IO stream.
606 Returns the number of bytes written, which may be less than len(b).
608 self
._unsupported
("write")
611 class FileIO(_fileio
._FileIO
, RawIOBase
):
613 """Raw I/O implementation for OS files."""
615 # This multiply inherits from _FileIO and RawIOBase to make
616 # isinstance(io.FileIO(), io.RawIOBase) return True without requiring
617 # that _fileio._FileIO inherits from io.RawIOBase (which would be hard
618 # to do since _fileio.c is written in C).
620 def __init__(self
, name
, mode
="r", closefd
=True):
621 _fileio
._FileIO
.__init
__(self
, name
, mode
, closefd
)
625 _fileio
._FileIO
.close(self
)
626 RawIOBase
.close(self
)
633 class BufferedIOBase(IOBase
):
635 """Base class for buffered IO objects.
637 The main difference with RawIOBase is that the read() method
638 supports omitting the size argument, and does not have a default
639 implementation that defers to readinto().
641 In addition, read(), readinto() and write() may raise
642 BlockingIOError if the underlying raw stream is in non-blocking
643 mode and not ready; unlike their raw counterparts, they will never
646 A typical implementation should not inherit from a RawIOBase
647 implementation, but wrap one.
650 def read(self
, n
= None):
651 """Read and return up to n bytes.
653 If the argument is omitted, None, or negative, reads and
654 returns all data until EOF.
656 If the argument is positive, and the underlying raw stream is
657 not 'interactive', multiple raw reads may be issued to satisfy
658 the byte count (unless EOF is reached first). But for
659 interactive raw streams (XXX and for pipes?), at most one raw
660 read will be issued, and a short result does not imply that
663 Returns an empty bytes array on EOF.
665 Raises BlockingIOError if the underlying raw stream has no
668 self
._unsupported
("read")
670 def readinto(self
, b
):
671 """Read up to len(b) bytes into b.
673 Like read(), this may issue multiple reads to the underlying raw
674 stream, unless the latter is 'interactive'.
676 Returns the number of bytes read (0 for EOF).
678 Raises BlockingIOError if the underlying raw stream has no
681 # XXX This ought to work with anything that supports the buffer API
682 data
= self
.read(len(b
))
686 except TypeError as err
:
688 if not isinstance(b
, array
.array
):
690 b
[:n
] = array
.array(b
'b', data
)
694 """Write the given buffer to the IO stream.
696 Return the number of bytes written, which is never less than
699 Raises BlockingIOError if the buffer is full and the
700 underlying raw stream cannot accept more data at the moment.
702 self
._unsupported
("write")
705 class _BufferedIOMixin(BufferedIOBase
):
707 """A mixin implementation of BufferedIOBase with an underlying raw stream.
709 This passes most requests on to the underlying raw stream. It
710 does *not* provide implementations of read(), readinto() or
714 def __init__(self
, raw
):
719 def seek(self
, pos
, whence
=0):
720 return self
.raw
.seek(pos
, whence
)
723 return self
.raw
.tell()
725 def truncate(self
, pos
=None):
726 # Flush the stream. We're mixing buffered I/O with lower-level I/O,
727 # and a flush may be necessary to synch both views of the current
733 # XXX: Should seek() be used, instead of passing the position
734 # XXX directly to truncate?
735 return self
.raw
.truncate(pos
)
737 ### Flush and close ###
747 pass # If flush() fails, just give up
753 return self
.raw
.seekable()
756 return self
.raw
.readable()
759 return self
.raw
.writable()
763 return self
.raw
.closed
773 ### Lower-level APIs ###
776 return self
.raw
.fileno()
779 return self
.raw
.isatty()
782 class _BytesIO(BufferedIOBase
):
784 """Buffered I/O implementation using an in-memory bytes buffer."""
788 def __init__(self
, initial_bytes
=None):
790 if initial_bytes
is not None:
791 buf
+= bytearray(initial_bytes
)
796 """Return the bytes value (contents) of the buffer
799 raise ValueError("getvalue on closed file")
800 return bytes(self
._buffer
)
802 def read(self
, n
=None):
804 raise ValueError("read from closed file")
807 if not isinstance(n
, (int, long)):
808 raise TypeError("argument must be an integer")
810 n
= len(self
._buffer
)
811 if len(self
._buffer
) <= self
._pos
:
813 newpos
= min(len(self
._buffer
), self
._pos
+ n
)
814 b
= self
._buffer
[self
._pos
: newpos
]
819 """this is the same as read.
825 raise ValueError("write to closed file")
826 if isinstance(b
, unicode):
827 raise TypeError("can't write unicode to binary stream")
832 if pos
> len(self
._buffer
):
833 # Inserts null bytes between the current end of the file
834 # and the new write position.
835 padding
= b
'\x00' * (pos
- len(self
._buffer
))
836 self
._buffer
+= padding
837 self
._buffer
[pos
:pos
+ n
] = b
841 def seek(self
, pos
, whence
=0):
843 raise ValueError("seek on closed file")
845 pos
= pos
.__index
__()
846 except AttributeError as err
:
847 raise TypeError("an integer is required") # from err
850 raise ValueError("negative seek position %r" % (pos
,))
853 self
._pos
= max(0, self
._pos
+ pos
)
855 self
._pos
= max(0, len(self
._buffer
) + pos
)
857 raise ValueError("invalid whence value")
862 raise ValueError("tell on closed file")
865 def truncate(self
, pos
=None):
867 raise ValueError("truncate on closed file")
871 raise ValueError("negative truncate position %r" % (pos
,))
872 del self
._buffer
[pos
:]
873 return self
.seek(pos
)
884 # Use the faster implementation of BytesIO if available
888 class BytesIO(_bytesio
._BytesIO
, BufferedIOBase
):
889 __doc__
= _bytesio
._BytesIO
.__doc
__
895 class BufferedReader(_BufferedIOMixin
):
897 """BufferedReader(raw[, buffer_size])
899 A buffer for a readable, sequential BaseRawIO object.
901 The constructor creates a BufferedReader for the given readable raw
902 stream and buffer_size. If buffer_size is omitted, DEFAULT_BUFFER_SIZE
906 def __init__(self
, raw
, buffer_size
=DEFAULT_BUFFER_SIZE
):
907 """Create a new buffered reader using the given readable raw IO object.
910 _BufferedIOMixin
.__init
__(self
, raw
)
911 self
.buffer_size
= buffer_size
912 self
._reset
_read
_buf
()
913 self
._read
_lock
= threading
.Lock()
915 def _reset_read_buf(self
):
919 def read(self
, n
=None):
922 Returns exactly n bytes of data unless the underlying raw IO
923 stream reaches EOF or if the call would block in non-blocking
924 mode. If n is negative, read until EOF or until read() would
927 with self
._read
_lock
:
928 return self
._read
_unlocked
(n
)
930 def _read_unlocked(self
, n
=None):
932 empty_values
= (b
"", None)
936 # Special case for when the number of bytes to read is unspecified.
937 if n
is None or n
== -1:
938 self
._reset
_read
_buf
()
939 chunks
= [buf
[pos
:]] # Strip the consumed bytes.
942 # Read until EOF or until read() would block.
943 chunk
= self
.raw
.read()
944 if chunk
in empty_values
:
947 current_size
+= len(chunk
)
949 return b
"".join(chunks
) or nodata_val
951 # The number of bytes to read is specified, return at most n bytes.
952 avail
= len(buf
) - pos
# Length of the available buffered data.
954 # Fast path: the data to read is fully buffered.
956 return buf
[pos
:pos
+n
]
957 # Slow path: read from the stream until enough bytes are read,
958 # or until an EOF occurs or until read() would block.
960 wanted
= max(self
.buffer_size
, n
)
962 chunk
= self
.raw
.read(wanted
)
963 if chunk
in empty_values
:
968 # n is more then avail only when an EOF occurred or when
969 # read() would have blocked.
971 out
= b
"".join(chunks
)
972 self
._read
_buf
= out
[n
:] # Save the extra data in the buffer.
974 return out
[:n
] if out
else nodata_val
977 """Returns buffered bytes without advancing the position.
979 The argument indicates a desired minimal number of bytes; we
980 do at most one raw read to satisfy it. We never return more
981 than self.buffer_size.
983 with self
._read
_lock
:
984 return self
._peek
_unlocked
(n
)
986 def _peek_unlocked(self
, n
=0):
987 want
= min(n
, self
.buffer_size
)
988 have
= len(self
._read
_buf
) - self
._read
_pos
990 to_read
= self
.buffer_size
- have
991 current
= self
.raw
.read(to_read
)
993 self
._read
_buf
= self
._read
_buf
[self
._read
_pos
:] + current
995 return self
._read
_buf
[self
._read
_pos
:]
998 """Reads up to n bytes, with at most one read() system call."""
999 # Returns up to n bytes. If at least one byte is buffered, we
1000 # only return buffered bytes. Otherwise, we do one raw read.
1003 with self
._read
_lock
:
1004 self
._peek
_unlocked
(1)
1005 return self
._read
_unlocked
(
1006 min(n
, len(self
._read
_buf
) - self
._read
_pos
))
1009 return self
.raw
.tell() - len(self
._read
_buf
) + self
._read
_pos
1011 def seek(self
, pos
, whence
=0):
1012 with self
._read
_lock
:
1014 pos
-= len(self
._read
_buf
) - self
._read
_pos
1015 pos
= self
.raw
.seek(pos
, whence
)
1016 self
._reset
_read
_buf
()
1020 class BufferedWriter(_BufferedIOMixin
):
1022 """A buffer for a writeable sequential RawIO object.
1024 The constructor creates a BufferedWriter for the given writeable raw
1025 stream. If the buffer_size is not given, it defaults to
1026 DEAFULT_BUFFER_SIZE. If max_buffer_size is omitted, it defaults to
1027 twice the buffer size.
1030 def __init__(self
, raw
,
1031 buffer_size
=DEFAULT_BUFFER_SIZE
, max_buffer_size
=None):
1032 raw
._checkWritable
()
1033 _BufferedIOMixin
.__init
__(self
, raw
)
1034 self
.buffer_size
= buffer_size
1035 self
.max_buffer_size
= (2*buffer_size
1036 if max_buffer_size
is None
1037 else max_buffer_size
)
1038 self
._write
_buf
= bytearray()
1039 self
._write
_lock
= threading
.Lock()
1043 raise ValueError("write to closed file")
1044 if isinstance(b
, unicode):
1045 raise TypeError("can't write unicode to binary stream")
1046 with self
._write
_lock
:
1047 # XXX we can implement some more tricks to try and avoid
1049 if len(self
._write
_buf
) > self
.buffer_size
:
1050 # We're full, so let's pre-flush the buffer
1052 self
._flush
_unlocked
()
1053 except BlockingIOError
as e
:
1054 # We can't accept anything else.
1055 # XXX Why not just let the exception pass through?
1056 raise BlockingIOError(e
.errno
, e
.strerror
, 0)
1057 before
= len(self
._write
_buf
)
1058 self
._write
_buf
.extend(b
)
1059 written
= len(self
._write
_buf
) - before
1060 if len(self
._write
_buf
) > self
.buffer_size
:
1062 self
._flush
_unlocked
()
1063 except BlockingIOError
as e
:
1064 if len(self
._write
_buf
) > self
.max_buffer_size
:
1065 # We've hit max_buffer_size. We have to accept a
1066 # partial write and cut back our buffer.
1067 overage
= len(self
._write
_buf
) - self
.max_buffer_size
1068 self
._write
_buf
= self
._write
_buf
[:self
.max_buffer_size
]
1069 raise BlockingIOError(e
.errno
, e
.strerror
, overage
)
1072 def truncate(self
, pos
=None):
1073 with self
._write
_lock
:
1074 self
._flush
_unlocked
()
1076 pos
= self
.raw
.tell()
1077 return self
.raw
.truncate(pos
)
1080 with self
._write
_lock
:
1081 self
._flush
_unlocked
()
1083 def _flush_unlocked(self
):
1085 raise ValueError("flush of closed file")
1088 while self
._write
_buf
:
1089 n
= self
.raw
.write(self
._write
_buf
)
1090 del self
._write
_buf
[:n
]
1092 except BlockingIOError
as e
:
1093 n
= e
.characters_written
1094 del self
._write
_buf
[:n
]
1096 raise BlockingIOError(e
.errno
, e
.strerror
, written
)
1099 return self
.raw
.tell() + len(self
._write
_buf
)
1101 def seek(self
, pos
, whence
=0):
1102 with self
._write
_lock
:
1103 self
._flush
_unlocked
()
1104 return self
.raw
.seek(pos
, whence
)
1107 class BufferedRWPair(BufferedIOBase
):
1109 """A buffered reader and writer object together.
1111 A buffered reader object and buffered writer object put together to
1112 form a sequential IO object that can read and write. This is typically
1113 used with a socket or two-way pipe.
1115 reader and writer are RawIOBase objects that are readable and
1116 writeable respectively. If the buffer_size is omitted it defaults to
1117 DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered writer)
1118 defaults to twice the buffer size.
1121 # XXX The usefulness of this (compared to having two separate IO
1122 # objects) is questionable.
1124 def __init__(self
, reader
, writer
,
1125 buffer_size
=DEFAULT_BUFFER_SIZE
, max_buffer_size
=None):
1128 The arguments are two RawIO instances.
1130 reader
._checkReadable
()
1131 writer
._checkWritable
()
1132 self
.reader
= BufferedReader(reader
, buffer_size
)
1133 self
.writer
= BufferedWriter(writer
, buffer_size
, max_buffer_size
)
1135 def read(self
, n
=None):
1138 return self
.reader
.read(n
)
1140 def readinto(self
, b
):
1141 return self
.reader
.readinto(b
)
1144 return self
.writer
.write(b
)
1146 def peek(self
, n
=0):
1147 return self
.reader
.peek(n
)
1150 return self
.reader
.read1(n
)
1153 return self
.reader
.readable()
1156 return self
.writer
.writable()
1159 return self
.writer
.flush()
1166 return self
.reader
.isatty() or self
.writer
.isatty()
1170 return self
.writer
.closed()
1173 class BufferedRandom(BufferedWriter
, BufferedReader
):
1175 """A buffered interface to random access streams.
1177 The constructor creates a reader and writer for a seekable stream,
1178 raw, given in the first argument. If the buffer_size is omitted it
1179 defaults to DEFAULT_BUFFER_SIZE. The max_buffer_size (for the buffered
1180 writer) defaults to twice the buffer size.
1183 def __init__(self
, raw
,
1184 buffer_size
=DEFAULT_BUFFER_SIZE
, max_buffer_size
=None):
1185 raw
._checkSeekable
()
1186 BufferedReader
.__init
__(self
, raw
, buffer_size
)
1187 BufferedWriter
.__init
__(self
, raw
, buffer_size
, max_buffer_size
)
1189 def seek(self
, pos
, whence
=0):
1191 # First do the raw seek, then empty the read buffer, so that
1192 # if the raw seek fails, we don't lose buffered data forever.
1193 pos
= self
.raw
.seek(pos
, whence
)
1194 with self
._read
_lock
:
1195 self
._reset
_read
_buf
()
1200 return self
.raw
.tell() + len(self
._write
_buf
)
1202 return BufferedReader
.tell(self
)
1204 def truncate(self
, pos
=None):
1207 # Use seek to flush the read buffer.
1209 return BufferedWriter
.truncate(self
)
1211 def read(self
, n
=None):
1215 return BufferedReader
.read(self
, n
)
1217 def readinto(self
, b
):
1219 return BufferedReader
.readinto(self
, b
)
1221 def peek(self
, n
=0):
1223 return BufferedReader
.peek(self
, n
)
1227 return BufferedReader
.read1(self
, n
)
1232 with self
._read
_lock
:
1233 self
.raw
.seek(self
._read
_pos
- len(self
._read
_buf
), 1)
1234 self
._reset
_read
_buf
()
1235 return BufferedWriter
.write(self
, b
)
1238 class TextIOBase(IOBase
):
1240 """Base class for text I/O.
1242 This class provides a character and line based interface to stream
1243 I/O. There is no readinto method because Python's character strings
1244 are immutable. There is no public constructor.
1247 def read(self
, n
= -1):
1248 """Read at most n characters from stream.
1250 Read from underlying buffer until we have n characters or we hit EOF.
1251 If n is negative or omitted, read until EOF.
1253 self
._unsupported
("read")
1256 """Write string s to stream."""
1257 self
._unsupported
("write")
1259 def truncate(self
, pos
= None):
1260 """Truncate size to pos."""
1261 self
._unsupported
("truncate")
1264 """Read until newline or EOF.
1266 Returns an empty string if EOF is hit immediately.
1268 self
._unsupported
("readline")
1272 """Subclasses should override."""
1277 """Line endings translated so far.
1279 Only line endings translated during reading are considered.
1281 Subclasses should override.
1286 class IncrementalNewlineDecoder(codecs
.IncrementalDecoder
):
1287 """Codec used when reading a file in universal newlines mode.
1288 It wraps another incremental decoder, translating \\r\\n and \\r into \\n.
1289 It also records the types of newlines encountered.
1290 When used with translate=False, it ensures that the newline sequence is
1291 returned in one piece.
1293 def __init__(self
, decoder
, translate
, errors
='strict'):
1294 codecs
.IncrementalDecoder
.__init
__(self
, errors
=errors
)
1295 self
.translate
= translate
1296 self
.decoder
= decoder
1298 self
.pendingcr
= False
1300 def decode(self
, input, final
=False):
1301 # decode input (with the eventual \r from a previous pass)
1302 output
= self
.decoder
.decode(input, final
=final
)
1303 if self
.pendingcr
and (output
or final
):
1304 output
= "\r" + output
1305 self
.pendingcr
= False
1307 # retain last \r even when not translating data:
1308 # then readline() is sure to get \r\n in one pass
1309 if output
.endswith("\r") and not final
:
1310 output
= output
[:-1]
1311 self
.pendingcr
= True
1313 # Record which newlines are read
1314 crlf
= output
.count('\r\n')
1315 cr
= output
.count('\r') - crlf
1316 lf
= output
.count('\n') - crlf
1317 self
.seennl |
= (lf
and self
._LF
) |
(cr
and self
._CR
) \
1318 |
(crlf
and self
._CRLF
)
1322 output
= output
.replace("\r\n", "\n")
1324 output
= output
.replace("\r", "\n")
1329 buf
, flag
= self
.decoder
.getstate()
1335 def setstate(self
, state
):
1337 self
.pendingcr
= bool(flag
& 1)
1338 self
.decoder
.setstate((buf
, flag
>> 1))
1342 self
.pendingcr
= False
1343 self
.decoder
.reset()
1358 ("\r", "\n", "\r\n")
1362 class TextIOWrapper(TextIOBase
):
1364 r
"""Character and line based layer over a BufferedIOBase object, buffer.
1366 encoding gives the name of the encoding that the stream will be
1367 decoded or encoded with. It defaults to locale.getpreferredencoding.
1369 errors determines the strictness of encoding and decoding (see the
1370 codecs.register) and defaults to "strict".
1372 newline can be None, '', '\n', '\r', or '\r\n'. It controls the
1373 handling of line endings. If it is None, universal newlines is
1374 enabled. With this enabled, on input, the lines endings '\n', '\r',
1375 or '\r\n' are translated to '\n' before being returned to the
1376 caller. Conversely, on output, '\n' is translated to the system
1377 default line seperator, os.linesep. If newline is any other of its
1378 legal values, that newline becomes the newline when the file is read
1379 and it is returned untranslated. On output, '\n' is converted to the
1382 If line_buffering is True, a call to flush is implied when a call to
1383 write contains a newline character.
1388 def __init__(self
, buffer, encoding
=None, errors
=None, newline
=None,
1389 line_buffering
=False):
1390 if newline
not in (None, "", "\n", "\r", "\r\n"):
1391 raise ValueError("illegal newline value: %r" % (newline
,))
1392 if encoding
is None:
1394 encoding
= os
.device_encoding(buffer.fileno())
1395 except (AttributeError, UnsupportedOperation
):
1397 if encoding
is None:
1401 # Importing locale may fail if Python is being built
1404 encoding
= locale
.getpreferredencoding()
1406 if not isinstance(encoding
, basestring
):
1407 raise ValueError("invalid encoding: %r" % encoding
)
1412 if not isinstance(errors
, basestring
):
1413 raise ValueError("invalid errors: %r" % errors
)
1415 self
.buffer = buffer
1416 self
._line
_buffering
= line_buffering
1417 self
._encoding
= encoding
1418 self
._errors
= errors
1419 self
._readuniversal
= not newline
1420 self
._readtranslate
= newline
is None
1421 self
._readnl
= newline
1422 self
._writetranslate
= newline
!= ''
1423 self
._writenl
= newline
or os
.linesep
1424 self
._encoder
= None
1425 self
._decoder
= None
1426 self
._decoded
_chars
= '' # buffer for text returned from decoder
1427 self
._decoded
_chars
_used
= 0 # offset into _decoded_chars for read()
1428 self
._snapshot
= None # info for reconstructing decoder state
1429 self
._seekable
= self
._telling
= self
.buffer.seekable()
1431 # self._snapshot is either None, or a tuple (dec_flags, next_input)
1432 # where dec_flags is the second (integer) item of the decoder state
1433 # and next_input is the chunk of input bytes that comes next after the
1434 # snapshot point. We use this to reconstruct decoder states in tell().
1436 # Naming convention:
1437 # - "bytes_..." for integer variables that count input bytes
1438 # - "chars_..." for integer variables that count decoded characters
1442 return self
._encoding
1449 def line_buffering(self
):
1450 return self
._line
_buffering
1453 return self
._seekable
1456 return self
.buffer.readable()
1459 return self
.buffer.writable()
1463 self
._telling
= self
._seekable
1469 pass # If flush() fails, just give up
1474 return self
.buffer.closed
1478 return self
.buffer.name
1481 return self
.buffer.fileno()
1484 return self
.buffer.isatty()
1488 raise ValueError("write to closed file")
1489 if not isinstance(s
, unicode):
1490 raise TypeError("can't write %s to text stream" %
1491 s
.__class
__.__name
__)
1493 haslf
= (self
._writetranslate
or self
._line
_buffering
) and "\n" in s
1494 if haslf
and self
._writetranslate
and self
._writenl
!= "\n":
1495 s
= s
.replace("\n", self
._writenl
)
1496 encoder
= self
._encoder
or self
._get
_encoder
()
1497 # XXX What if we were just reading?
1498 b
= encoder
.encode(s
)
1499 self
.buffer.write(b
)
1500 if self
._line
_buffering
and (haslf
or "\r" in s
):
1502 self
._snapshot
= None
1504 self
._decoder
.reset()
1507 def _get_encoder(self
):
1508 make_encoder
= codecs
.getincrementalencoder(self
._encoding
)
1509 self
._encoder
= make_encoder(self
._errors
)
1510 return self
._encoder
1512 def _get_decoder(self
):
1513 make_decoder
= codecs
.getincrementaldecoder(self
._encoding
)
1514 decoder
= make_decoder(self
._errors
)
1515 if self
._readuniversal
:
1516 decoder
= IncrementalNewlineDecoder(decoder
, self
._readtranslate
)
1517 self
._decoder
= decoder
1520 # The following three methods implement an ADT for _decoded_chars.
1521 # Text returned from the decoder is buffered here until the client
1522 # requests it by calling our read() or readline() method.
1523 def _set_decoded_chars(self
, chars
):
1524 """Set the _decoded_chars buffer."""
1525 self
._decoded
_chars
= chars
1526 self
._decoded
_chars
_used
= 0
1528 def _get_decoded_chars(self
, n
=None):
1529 """Advance into the _decoded_chars buffer."""
1530 offset
= self
._decoded
_chars
_used
1532 chars
= self
._decoded
_chars
[offset
:]
1534 chars
= self
._decoded
_chars
[offset
:offset
+ n
]
1535 self
._decoded
_chars
_used
+= len(chars
)
1538 def _rewind_decoded_chars(self
, n
):
1539 """Rewind the _decoded_chars buffer."""
1540 if self
._decoded
_chars
_used
< n
:
1541 raise AssertionError("rewind decoded_chars out of bounds")
1542 self
._decoded
_chars
_used
-= n
1544 def _read_chunk(self
):
1546 Read and decode the next chunk of data from the BufferedReader.
1548 The return value is True unless EOF was reached. The decoded string
1549 is placed in self._decoded_chars (replacing its previous value).
1550 The entire input chunk is sent to the decoder, though some of it
1551 may remain buffered in the decoder, yet to be converted.
1554 if self
._decoder
is None:
1555 raise ValueError("no decoder")
1558 # To prepare for tell(), we need to snapshot a point in the
1559 # file where the decoder's input buffer is empty.
1561 dec_buffer
, dec_flags
= self
._decoder
.getstate()
1562 # Given this, we know there was a valid snapshot point
1563 # len(dec_buffer) bytes ago with decoder state (b'', dec_flags).
1565 # Read a chunk, decode it, and put the result in self._decoded_chars.
1566 input_chunk
= self
.buffer.read1(self
._CHUNK
_SIZE
)
1567 eof
= not input_chunk
1568 self
._set
_decoded
_chars
(self
._decoder
.decode(input_chunk
, eof
))
1571 # At the snapshot point, len(dec_buffer) bytes before the read,
1572 # the next input to be decoded is dec_buffer + input_chunk.
1573 self
._snapshot
= (dec_flags
, dec_buffer
+ input_chunk
)
1577 def _pack_cookie(self
, position
, dec_flags
=0,
1578 bytes_to_feed
=0, need_eof
=0, chars_to_skip
=0):
1579 # The meaning of a tell() cookie is: seek to position, set the
1580 # decoder flags to dec_flags, read bytes_to_feed bytes, feed them
1581 # into the decoder with need_eof as the EOF flag, then skip
1582 # chars_to_skip characters of the decoded result. For most simple
1583 # decoders, tell() will often just give a byte offset in the file.
1584 return (position |
(dec_flags
<<64) |
(bytes_to_feed
<<128) |
1585 (chars_to_skip
<<192) |
bool(need_eof
)<<256)
1587 def _unpack_cookie(self
, bigint
):
1588 rest
, position
= divmod(bigint
, 1<<64)
1589 rest
, dec_flags
= divmod(rest
, 1<<64)
1590 rest
, bytes_to_feed
= divmod(rest
, 1<<64)
1591 need_eof
, chars_to_skip
= divmod(rest
, 1<<64)
1592 return position
, dec_flags
, bytes_to_feed
, need_eof
, chars_to_skip
1595 if not self
._seekable
:
1596 raise IOError("underlying stream is not seekable")
1597 if not self
._telling
:
1598 raise IOError("telling position disabled by next() call")
1600 position
= self
.buffer.tell()
1601 decoder
= self
._decoder
1602 if decoder
is None or self
._snapshot
is None:
1603 if self
._decoded
_chars
:
1604 # This should never happen.
1605 raise AssertionError("pending decoded text")
1608 # Skip backward to the snapshot point (see _read_chunk).
1609 dec_flags
, next_input
= self
._snapshot
1610 position
-= len(next_input
)
1612 # How many decoded characters have been used up since the snapshot?
1613 chars_to_skip
= self
._decoded
_chars
_used
1614 if chars_to_skip
== 0:
1615 # We haven't moved from the snapshot point.
1616 return self
._pack
_cookie
(position
, dec_flags
)
1618 # Starting from the snapshot position, we will walk the decoder
1619 # forward until it gives us enough decoded characters.
1620 saved_state
= decoder
.getstate()
1622 # Note our initial start point.
1623 decoder
.setstate((b
'', dec_flags
))
1624 start_pos
= position
1625 start_flags
, bytes_fed
, chars_decoded
= dec_flags
, 0, 0
1628 # Feed the decoder one byte at a time. As we go, note the
1629 # nearest "safe start point" before the current location
1630 # (a point where the decoder has nothing buffered, so seek()
1631 # can safely start from there and advance to this location).
1632 for next_byte
in next_input
:
1634 chars_decoded
+= len(decoder
.decode(next_byte
))
1635 dec_buffer
, dec_flags
= decoder
.getstate()
1636 if not dec_buffer
and chars_decoded
<= chars_to_skip
:
1637 # Decoder buffer is empty, so this is a safe start point.
1638 start_pos
+= bytes_fed
1639 chars_to_skip
-= chars_decoded
1640 start_flags
, bytes_fed
, chars_decoded
= dec_flags
, 0, 0
1641 if chars_decoded
>= chars_to_skip
:
1644 # We didn't get enough decoded data; signal EOF to get more.
1645 chars_decoded
+= len(decoder
.decode(b
'', final
=True))
1647 if chars_decoded
< chars_to_skip
:
1648 raise IOError("can't reconstruct logical file position")
1650 # The returned cookie corresponds to the last safe start point.
1651 return self
._pack
_cookie
(
1652 start_pos
, start_flags
, bytes_fed
, need_eof
, chars_to_skip
)
1654 decoder
.setstate(saved_state
)
1656 def truncate(self
, pos
=None):
1661 return self
.buffer.truncate()
1663 def seek(self
, cookie
, whence
=0):
1665 raise ValueError("tell on closed file")
1666 if not self
._seekable
:
1667 raise IOError("underlying stream is not seekable")
1668 if whence
== 1: # seek relative to current position
1670 raise IOError("can't do nonzero cur-relative seeks")
1671 # Seeking to the current position should attempt to
1672 # sync the underlying buffer with the current position.
1674 cookie
= self
.tell()
1675 if whence
== 2: # seek relative to end of file
1677 raise IOError("can't do nonzero end-relative seeks")
1679 position
= self
.buffer.seek(0, 2)
1680 self
._set
_decoded
_chars
('')
1681 self
._snapshot
= None
1683 self
._decoder
.reset()
1686 raise ValueError("invalid whence (%r, should be 0, 1 or 2)" %
1689 raise ValueError("negative seek position %r" % (cookie
,))
1692 # The strategy of seek() is to go back to the safe start point
1693 # and replay the effect of read(chars_to_skip) from there.
1694 start_pos
, dec_flags
, bytes_to_feed
, need_eof
, chars_to_skip
= \
1695 self
._unpack
_cookie
(cookie
)
1697 # Seek back to the safe start point.
1698 self
.buffer.seek(start_pos
)
1699 self
._set
_decoded
_chars
('')
1700 self
._snapshot
= None
1702 # Restore the decoder to its state from the safe start point.
1703 if self
._decoder
or dec_flags
or chars_to_skip
:
1704 self
._decoder
= self
._decoder
or self
._get
_decoder
()
1705 self
._decoder
.setstate((b
'', dec_flags
))
1706 self
._snapshot
= (dec_flags
, b
'')
1709 # Just like _read_chunk, feed the decoder and save a snapshot.
1710 input_chunk
= self
.buffer.read(bytes_to_feed
)
1711 self
._set
_decoded
_chars
(
1712 self
._decoder
.decode(input_chunk
, need_eof
))
1713 self
._snapshot
= (dec_flags
, input_chunk
)
1715 # Skip chars_to_skip of the decoded characters.
1716 if len(self
._decoded
_chars
) < chars_to_skip
:
1717 raise IOError("can't restore logical file position")
1718 self
._decoded
_chars
_used
= chars_to_skip
1722 def read(self
, n
=None):
1725 decoder
= self
._decoder
or self
._get
_decoder
()
1728 result
= (self
._get
_decoded
_chars
() +
1729 decoder
.decode(self
.buffer.read(), final
=True))
1730 self
._set
_decoded
_chars
('')
1731 self
._snapshot
= None
1734 # Keep reading chunks until we have n characters to return.
1736 result
= self
._get
_decoded
_chars
(n
)
1737 while len(result
) < n
and not eof
:
1738 eof
= not self
._read
_chunk
()
1739 result
+= self
._get
_decoded
_chars
(n
- len(result
))
1743 self
._telling
= False
1744 line
= self
.readline()
1746 self
._snapshot
= None
1747 self
._telling
= self
._seekable
1751 def readline(self
, limit
=None):
1753 raise ValueError("read from closed file")
1756 if not isinstance(limit
, (int, long)):
1757 raise TypeError("limit must be an integer")
1759 # Grab all the decoded text (we will rewind any extra bits later).
1760 line
= self
._get
_decoded
_chars
()
1763 decoder
= self
._decoder
or self
._get
_decoder
()
1767 if self
._readtranslate
:
1768 # Newlines are already translated, only search for \n
1769 pos
= line
.find('\n', start
)
1776 elif self
._readuniversal
:
1777 # Universal newline search. Find any of \r, \r\n, \n
1778 # The decoder ensures that \r\n are not split in two pieces
1780 # In C we'd look for these in parallel of course.
1781 nlpos
= line
.find("\n", start
)
1782 crpos
= line
.find("\r", start
)
1799 elif nlpos
== crpos
+ 1:
1809 pos
= line
.find(self
._readnl
)
1811 endpos
= pos
+ len(self
._readnl
)
1814 if limit
>= 0 and len(line
) >= limit
:
1815 endpos
= limit
# reached length limit
1818 # No line ending seen yet - get more data
1820 while self
._read
_chunk
():
1821 if self
._decoded
_chars
:
1823 if self
._decoded
_chars
:
1824 line
+= self
._get
_decoded
_chars
()
1827 self
._set
_decoded
_chars
('')
1828 self
._snapshot
= None
1831 if limit
>= 0 and endpos
> limit
:
1832 endpos
= limit
# don't exceed limit
1834 # Rewind _decoded_chars to just after the line ending we found.
1835 self
._rewind
_decoded
_chars
(len(line
) - endpos
)
1836 return line
[:endpos
]
1840 return self
._decoder
.newlines
if self
._decoder
else None
1842 class StringIO(TextIOWrapper
):
1844 """An in-memory stream for text. The initial_value argument sets the
1845 value of object. The other arguments are like those of TextIOWrapper's
1849 def __init__(self
, initial_value
="", encoding
="utf-8",
1850 errors
="strict", newline
="\n"):
1851 super(StringIO
, self
).__init
__(BytesIO(),
1856 if not isinstance(initial_value
, unicode):
1857 initial_value
= unicode(initial_value
)
1858 self
.write(initial_value
)
1863 return self
.buffer.getvalue().decode(self
._encoding
, self
._errors
)