1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Package csv reads and writes comma-separated values (CSV) files.
6 // There are many kinds of CSV files; this package supports the format
7 // described in RFC 4180.
9 // A csv file contains zero or more records of one or more fields per record.
10 // Each record is separated by the newline character. The final record may
11 // optionally be followed by a newline character.
13 // field1,field2,field3
15 // White space is considered part of a field.
17 // Carriage returns before newline characters are silently removed.
19 // Blank lines are ignored. A line with only whitespace characters (excluding
20 // the ending newline character) is not considered a blank line.
22 // Fields which start and stop with the quote character " are called
23 // quoted-fields. The beginning and ending quote are not part of the
28 // normal string,"quoted-field"
30 // results in the fields
32 // {`normal string`, `quoted-field`}
34 // Within a quoted-field a quote character followed by a second quote
35 // character is considered a single quote.
37 // "the ""word"" is true","a ""quoted-field"""
41 // {`the "word" is true`, `a "quoted-field"`}
43 // Newlines and commas may be included in a quoted-field
46 // field","comma is ,"
51 // field`, `comma is ,`}
64 // A ParseError is returned for parsing errors.
65 // Line numbers are 1-indexed and columns are 0-indexed.
66 type ParseError
struct {
67 StartLine
int // Line where the record starts
68 Line
int // Line where the error occurred
69 Column
int // Column (rune index) where the error occurred
70 Err error
// The actual error
73 func (e
*ParseError
) Error() string {
74 if e
.Err
== ErrFieldCount
{
75 return fmt
.Sprintf("record on line %d: %v", e
.Line
, e
.Err
)
77 if e
.StartLine
!= e
.Line
{
78 return fmt
.Sprintf("record on line %d; parse error on line %d, column %d: %v", e
.StartLine
, e
.Line
, e
.Column
, e
.Err
)
80 return fmt
.Sprintf("parse error on line %d, column %d: %v", e
.Line
, e
.Column
, e
.Err
)
83 // These are the errors that can be returned in ParseError.Err.
85 ErrTrailingComma
= errors
.New("extra delimiter at end of line") // Deprecated: No longer used.
86 ErrBareQuote
= errors
.New("bare \" in non-quoted-field")
87 ErrQuote
= errors
.New("extraneous or missing \" in quoted-field")
88 ErrFieldCount
= errors
.New("wrong number of fields")
91 var errInvalidDelim
= errors
.New("csv: invalid field or comment delimiter")
93 func validDelim(r rune
) bool {
94 return r
!= 0 && r
!= '"' && r
!= '\r' && r
!= '\n' && utf8
.ValidRune(r
) && r
!= utf8
.RuneError
97 // A Reader reads records from a CSV-encoded file.
99 // As returned by NewReader, a Reader expects input conforming to RFC 4180.
100 // The exported fields can be changed to customize the details before the
101 // first call to Read or ReadAll.
103 // The Reader converts all \r\n sequences in its input to plain \n,
104 // including in multiline field values, so that the returned data does
105 // not depend on which line-ending convention an input file uses.
107 // Comma is the field delimiter.
108 // It is set to comma (',') by NewReader.
109 // Comma must be a valid rune and must not be \r, \n,
110 // or the Unicode replacement character (0xFFFD).
113 // Comment, if not 0, is the comment character. Lines beginning with the
114 // Comment character without preceding whitespace are ignored.
115 // With leading whitespace the Comment character becomes part of the
116 // field, even if TrimLeadingSpace is true.
117 // Comment must be a valid rune and must not be \r, \n,
118 // or the Unicode replacement character (0xFFFD).
119 // It must also not be equal to Comma.
122 // FieldsPerRecord is the number of expected fields per record.
123 // If FieldsPerRecord is positive, Read requires each record to
124 // have the given number of fields. If FieldsPerRecord is 0, Read sets it to
125 // the number of fields in the first record, so that future records must
126 // have the same field count. If FieldsPerRecord is negative, no check is
127 // made and records may have a variable number of fields.
130 // If LazyQuotes is true, a quote may appear in an unquoted field and a
131 // non-doubled quote may appear in a quoted field.
134 // If TrimLeadingSpace is true, leading white space in a field is ignored.
135 // This is done even if the field delimiter, Comma, is white space.
136 TrimLeadingSpace
bool
138 // ReuseRecord controls whether calls to Read may return a slice sharing
139 // the backing array of the previous call's returned slice for performance.
140 // By default, each call to Read returns newly allocated memory owned by the caller.
143 TrailingComma
bool // Deprecated: No longer used.
147 // numLine is the current line being read in the CSV file.
150 // rawBuffer is a line buffer only used by the readLine method.
153 // recordBuffer holds the unescaped fields, one after another.
154 // The fields can be accessed by using the indexes in fieldIndexes.
155 // E.g., For the row `a,"b","c""d",e`, recordBuffer will contain `abc"de`
156 // and fieldIndexes will contain the indexes [1, 2, 5, 6].
159 // fieldIndexes is an index of fields inside recordBuffer.
160 // The i'th field ends at offset fieldIndexes[i] in recordBuffer.
163 // lastRecord is a record cache and only used when ReuseRecord == true.
167 // NewReader returns a new Reader that reads from r.
168 func NewReader(r io
.Reader
) *Reader
{
171 r
: bufio
.NewReader(r
),
175 // Read reads one record (a slice of fields) from r.
176 // If the record has an unexpected number of fields,
177 // Read returns the record along with the error ErrFieldCount.
178 // Except for that case, Read always returns either a non-nil
179 // record or a non-nil error, but not both.
180 // If there is no data left to be read, Read returns nil, io.EOF.
181 // If ReuseRecord is true, the returned slice may be shared
182 // between multiple calls to Read.
183 func (r
*Reader
) Read() (record
[]string, err error
) {
185 record
, err
= r
.readRecord(r
.lastRecord
)
186 r
.lastRecord
= record
188 record
, err
= r
.readRecord(nil)
193 // ReadAll reads all the remaining records from r.
194 // Each record is a slice of fields.
195 // A successful call returns err == nil, not err == io.EOF. Because ReadAll is
196 // defined to read until EOF, it does not treat end of file as an error to be
198 func (r
*Reader
) ReadAll() (records
[][]string, err error
) {
200 record
, err
:= r
.readRecord(nil)
207 records
= append(records
, record
)
211 // readLine reads the next line (with the trailing endline).
212 // If EOF is hit without a trailing endline, it will be omitted.
213 // If some bytes were read, then the error is never io.EOF.
214 // The result is only valid until the next call to readLine.
215 func (r
*Reader
) readLine() ([]byte, error
) {
216 line
, err
:= r
.r
.ReadSlice('\n')
217 if err
== bufio
.ErrBufferFull
{
218 r
.rawBuffer
= append(r
.rawBuffer
[:0], line
...)
219 for err
== bufio
.ErrBufferFull
{
220 line
, err
= r
.r
.ReadSlice('\n')
221 r
.rawBuffer
= append(r
.rawBuffer
, line
...)
225 if len(line
) > 0 && err
== io
.EOF
{
227 // For backwards compatibility, drop trailing \r before EOF.
228 if line
[len(line
)-1] == '\r' {
229 line
= line
[:len(line
)-1]
233 // Normalize \r\n to \n on all input lines.
234 if n
:= len(line
); n
>= 2 && line
[n
-2] == '\r' && line
[n
-1] == '\n' {
241 // lengthNL reports the number of bytes for the trailing \n.
242 func lengthNL(b
[]byte) int {
243 if len(b
) > 0 && b
[len(b
)-1] == '\n' {
249 // nextRune returns the next rune in b or utf8.RuneError.
250 func nextRune(b
[]byte) rune
{
251 r
, _
:= utf8
.DecodeRune(b
)
255 func (r
*Reader
) readRecord(dst
[]string) ([]string, error
) {
256 if r
.Comma
== r
.Comment ||
!validDelim(r
.Comma
) ||
(r
.Comment
!= 0 && !validDelim(r
.Comment
)) {
257 return nil, errInvalidDelim
260 // Read line (automatically skipping past empty lines and any comments).
261 var line
, fullLine
[]byte
264 line
, errRead
= r
.readLine()
265 if r
.Comment
!= 0 && nextRune(line
) == r
.Comment
{
267 continue // Skip comment lines
269 if errRead
== nil && len(line
) == lengthNL(line
) {
271 continue // Skip empty lines
276 if errRead
== io
.EOF
{
280 // Parse each field in the record.
282 const quoteLen
= len(`"`)
283 commaLen
:= utf8
.RuneLen(r
.Comma
)
284 recLine
:= r
.numLine
// Starting line for record
285 r
.recordBuffer
= r
.recordBuffer
[:0]
286 r
.fieldIndexes
= r
.fieldIndexes
[:0]
289 if r
.TrimLeadingSpace
{
290 line
= bytes
.TrimLeftFunc(line
, unicode
.IsSpace
)
292 if len(line
) == 0 || line
[0] != '"' {
293 // Non-quoted string field
294 i
:= bytes
.IndexRune(line
, r
.Comma
)
299 field
= field
[:len(field
)-lengthNL(field
)]
301 // Check to make sure a quote does not appear in field.
303 if j
:= bytes
.IndexByte(field
, '"'); j
>= 0 {
304 col
:= utf8
.RuneCount(fullLine
[:len(fullLine
)-len(line
[j
:])])
305 err
= &ParseError
{StartLine
: recLine
, Line
: r
.numLine
, Column
: col
, Err
: ErrBareQuote
}
309 r
.recordBuffer
= append(r
.recordBuffer
, field
...)
310 r
.fieldIndexes
= append(r
.fieldIndexes
, len(r
.recordBuffer
))
312 line
= line
[i
+commaLen
:]
317 // Quoted string field
318 line
= line
[quoteLen
:]
320 i
:= bytes
.IndexByte(line
, '"')
323 r
.recordBuffer
= append(r
.recordBuffer
, line
[:i
]...)
324 line
= line
[i
+quoteLen
:]
325 switch rn
:= nextRune(line
); {
327 // `""` sequence (append quote).
328 r
.recordBuffer
= append(r
.recordBuffer
, '"')
329 line
= line
[quoteLen
:]
331 // `",` sequence (end of field).
332 line
= line
[commaLen
:]
333 r
.fieldIndexes
= append(r
.fieldIndexes
, len(r
.recordBuffer
))
335 case lengthNL(line
) == len(line
):
336 // `"\n` sequence (end of line).
337 r
.fieldIndexes
= append(r
.fieldIndexes
, len(r
.recordBuffer
))
340 // `"` sequence (bare quote).
341 r
.recordBuffer
= append(r
.recordBuffer
, '"')
343 // `"*` sequence (invalid non-escaped quote).
344 col
:= utf8
.RuneCount(fullLine
[:len(fullLine
)-len(line
)-quoteLen
])
345 err
= &ParseError
{StartLine
: recLine
, Line
: r
.numLine
, Column
: col
, Err
: ErrQuote
}
348 } else if len(line
) > 0 {
349 // Hit end of line (copy all data so far).
350 r
.recordBuffer
= append(r
.recordBuffer
, line
...)
354 line
, errRead
= r
.readLine()
355 if errRead
== io
.EOF
{
360 // Abrupt end of file (EOF or error).
361 if !r
.LazyQuotes
&& errRead
== nil {
362 col
:= utf8
.RuneCount(fullLine
)
363 err
= &ParseError
{StartLine
: recLine
, Line
: r
.numLine
, Column
: col
, Err
: ErrQuote
}
366 r
.fieldIndexes
= append(r
.fieldIndexes
, len(r
.recordBuffer
))
376 // Create a single string and create slices out of it.
377 // This pins the memory of the fields together, but allocates once.
378 str
:= string(r
.recordBuffer
) // Convert to string once to batch allocations
380 if cap(dst
) < len(r
.fieldIndexes
) {
381 dst
= make([]string, len(r
.fieldIndexes
))
383 dst
= dst
[:len(r
.fieldIndexes
)]
385 for i
, idx
:= range r
.fieldIndexes
{
386 dst
[i
] = str
[preIdx
:idx
]
390 // Check or update the expected fields per record.
391 if r
.FieldsPerRecord
> 0 {
392 if len(dst
) != r
.FieldsPerRecord
&& err
== nil {
393 err
= &ParseError
{StartLine
: recLine
, Line
: recLine
, Err
: ErrFieldCount
}
395 } else if r
.FieldsPerRecord
== 0 {
396 r
.FieldsPerRecord
= len(dst
)