2014-04-11 Marc Glisse <marc.glisse@inria.fr>
[official-gcc.git] / libgo / go / archive / zip / zip_test.go
blob32a16a79efbcc78a617093d5f2751f140c17d77d
1 // Copyright 2011 The Go Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style
3 // license that can be found in the LICENSE file.
5 // Tests that involve both reading and writing.
7 package zip
9 import (
10 "bytes"
11 "fmt"
12 "hash"
13 "io"
14 "io/ioutil"
15 "sort"
16 "strings"
17 "testing"
18 "time"
21 func TestOver65kFiles(t *testing.T) {
22 buf := new(bytes.Buffer)
23 w := NewWriter(buf)
24 const nFiles = (1 << 16) + 42
25 for i := 0; i < nFiles; i++ {
26 _, err := w.CreateHeader(&FileHeader{
27 Name: fmt.Sprintf("%d.dat", i),
28 Method: Store, // avoid Issue 6136 and Issue 6138
30 if err != nil {
31 t.Fatalf("creating file %d: %v", i, err)
34 if err := w.Close(); err != nil {
35 t.Fatalf("Writer.Close: %v", err)
37 s := buf.String()
38 zr, err := NewReader(strings.NewReader(s), int64(len(s)))
39 if err != nil {
40 t.Fatalf("NewReader: %v", err)
42 if got := len(zr.File); got != nFiles {
43 t.Fatalf("File contains %d files, want %d", got, nFiles)
45 for i := 0; i < nFiles; i++ {
46 want := fmt.Sprintf("%d.dat", i)
47 if zr.File[i].Name != want {
48 t.Fatalf("File(%d) = %q, want %q", i, zr.File[i].Name, want)
53 func TestModTime(t *testing.T) {
54 var testTime = time.Date(2009, time.November, 10, 23, 45, 58, 0, time.UTC)
55 fh := new(FileHeader)
56 fh.SetModTime(testTime)
57 outTime := fh.ModTime()
58 if !outTime.Equal(testTime) {
59 t.Errorf("times don't match: got %s, want %s", outTime, testTime)
63 func testHeaderRoundTrip(fh *FileHeader, wantUncompressedSize uint32, wantUncompressedSize64 uint64, t *testing.T) {
64 fi := fh.FileInfo()
65 fh2, err := FileInfoHeader(fi)
66 if err != nil {
67 t.Fatal(err)
69 if got, want := fh2.Name, fh.Name; got != want {
70 t.Errorf("Name: got %s, want %s\n", got, want)
72 if got, want := fh2.UncompressedSize, wantUncompressedSize; got != want {
73 t.Errorf("UncompressedSize: got %d, want %d\n", got, want)
75 if got, want := fh2.UncompressedSize64, wantUncompressedSize64; got != want {
76 t.Errorf("UncompressedSize64: got %d, want %d\n", got, want)
78 if got, want := fh2.ModifiedTime, fh.ModifiedTime; got != want {
79 t.Errorf("ModifiedTime: got %d, want %d\n", got, want)
81 if got, want := fh2.ModifiedDate, fh.ModifiedDate; got != want {
82 t.Errorf("ModifiedDate: got %d, want %d\n", got, want)
85 if sysfh, ok := fi.Sys().(*FileHeader); !ok && sysfh != fh {
86 t.Errorf("Sys didn't return original *FileHeader")
90 func TestFileHeaderRoundTrip(t *testing.T) {
91 fh := &FileHeader{
92 Name: "foo.txt",
93 UncompressedSize: 987654321,
94 ModifiedTime: 1234,
95 ModifiedDate: 5678,
97 testHeaderRoundTrip(fh, fh.UncompressedSize, uint64(fh.UncompressedSize), t)
100 func TestFileHeaderRoundTrip64(t *testing.T) {
101 fh := &FileHeader{
102 Name: "foo.txt",
103 UncompressedSize64: 9876543210,
104 ModifiedTime: 1234,
105 ModifiedDate: 5678,
107 testHeaderRoundTrip(fh, uint32max, fh.UncompressedSize64, t)
110 type repeatedByte struct {
111 off int64
112 b byte
113 n int64
116 // rleBuffer is a run-length-encoded byte buffer.
117 // It's an io.Writer (like a bytes.Buffer) and also an io.ReaderAt,
118 // allowing random-access reads.
119 type rleBuffer struct {
120 buf []repeatedByte
123 func (r *rleBuffer) Size() int64 {
124 if len(r.buf) == 0 {
125 return 0
127 last := &r.buf[len(r.buf)-1]
128 return last.off + last.n
131 func (r *rleBuffer) Write(p []byte) (n int, err error) {
132 var rp *repeatedByte
133 if len(r.buf) > 0 {
134 rp = &r.buf[len(r.buf)-1]
135 // Fast path, if p is entirely the same byte repeated.
136 if lastByte := rp.b; len(p) > 0 && p[0] == lastByte {
137 all := true
138 for _, b := range p {
139 if b != lastByte {
140 all = false
141 break
144 if all {
145 rp.n += int64(len(p))
146 return len(p), nil
151 for _, b := range p {
152 if rp == nil || rp.b != b {
153 r.buf = append(r.buf, repeatedByte{r.Size(), b, 1})
154 rp = &r.buf[len(r.buf)-1]
155 } else {
156 rp.n++
159 return len(p), nil
162 func (r *rleBuffer) ReadAt(p []byte, off int64) (n int, err error) {
163 if len(p) == 0 {
164 return
166 skipParts := sort.Search(len(r.buf), func(i int) bool {
167 part := &r.buf[i]
168 return part.off+part.n > off
170 parts := r.buf[skipParts:]
171 if len(parts) > 0 {
172 skipBytes := off - parts[0].off
173 for len(parts) > 0 {
174 part := parts[0]
175 for i := skipBytes; i < part.n; i++ {
176 if n == len(p) {
177 return
179 p[n] = part.b
182 parts = parts[1:]
183 skipBytes = 0
186 if n != len(p) {
187 err = io.ErrUnexpectedEOF
189 return
192 // Just testing the rleBuffer used in the Zip64 test above. Not used by the zip code.
193 func TestRLEBuffer(t *testing.T) {
194 b := new(rleBuffer)
195 var all []byte
196 writes := []string{"abcdeee", "eeeeeee", "eeeefghaaiii"}
197 for _, w := range writes {
198 b.Write([]byte(w))
199 all = append(all, w...)
201 if len(b.buf) != 10 {
202 t.Fatalf("len(b.buf) = %d; want 10", len(b.buf))
205 for i := 0; i < len(all); i++ {
206 for j := 0; j < len(all)-i; j++ {
207 buf := make([]byte, j)
208 n, err := b.ReadAt(buf, int64(i))
209 if err != nil || n != len(buf) {
210 t.Errorf("ReadAt(%d, %d) = %d, %v; want %d, nil", i, j, n, err, len(buf))
212 if !bytes.Equal(buf, all[i:i+j]) {
213 t.Errorf("ReadAt(%d, %d) = %q; want %q", i, j, buf, all[i:i+j])
219 // fakeHash32 is a dummy Hash32 that always returns 0.
220 type fakeHash32 struct {
221 hash.Hash32
224 func (fakeHash32) Write(p []byte) (int, error) { return len(p), nil }
225 func (fakeHash32) Sum32() uint32 { return 0 }
227 func TestZip64(t *testing.T) {
228 if testing.Short() {
229 t.Skip("slow test; skipping")
231 const size = 1 << 32 // before the "END\n" part
232 testZip64(t, size)
235 func testZip64(t testing.TB, size int64) {
236 const chunkSize = 1024
237 chunks := int(size / chunkSize)
238 // write 2^32 bytes plus "END\n" to a zip file
239 buf := new(rleBuffer)
240 w := NewWriter(buf)
241 f, err := w.CreateHeader(&FileHeader{
242 Name: "huge.txt",
243 Method: Store,
245 if err != nil {
246 t.Fatal(err)
248 f.(*fileWriter).crc32 = fakeHash32{}
249 chunk := make([]byte, chunkSize)
250 for i := range chunk {
251 chunk[i] = '.'
253 for i := 0; i < chunks; i++ {
254 _, err := f.Write(chunk)
255 if err != nil {
256 t.Fatal("write chunk:", err)
259 end := []byte("END\n")
260 _, err = f.Write(end)
261 if err != nil {
262 t.Fatal("write end:", err)
264 if err := w.Close(); err != nil {
265 t.Fatal(err)
268 // read back zip file and check that we get to the end of it
269 r, err := NewReader(buf, int64(buf.Size()))
270 if err != nil {
271 t.Fatal("reader:", err)
273 f0 := r.File[0]
274 rc, err := f0.Open()
275 if err != nil {
276 t.Fatal("opening:", err)
278 rc.(*checksumReader).hash = fakeHash32{}
279 for i := 0; i < chunks; i++ {
280 _, err := io.ReadFull(rc, chunk)
281 if err != nil {
282 t.Fatal("read:", err)
285 gotEnd, err := ioutil.ReadAll(rc)
286 if err != nil {
287 t.Fatal("read end:", err)
289 if !bytes.Equal(gotEnd, end) {
290 t.Errorf("End of zip64 archive %q, want %q", gotEnd, end)
292 err = rc.Close()
293 if err != nil {
294 t.Fatal("closing:", err)
296 if size == 1<<32 {
297 if got, want := f0.UncompressedSize, uint32(uint32max); got != want {
298 t.Errorf("UncompressedSize %d, want %d", got, want)
302 if got, want := f0.UncompressedSize64, uint64(size)+uint64(len(end)); got != want {
303 t.Errorf("UncompressedSize64 %d, want %d", got, want)
307 func testInvalidHeader(h *FileHeader, t *testing.T) {
308 var buf bytes.Buffer
309 z := NewWriter(&buf)
311 f, err := z.CreateHeader(h)
312 if err != nil {
313 t.Fatalf("error creating header: %v", err)
315 if _, err := f.Write([]byte("hi")); err != nil {
316 t.Fatalf("error writing content: %v", err)
318 if err := z.Close(); err != nil {
319 t.Fatalf("error closing zip writer: %v", err)
322 b := buf.Bytes()
323 if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != ErrFormat {
324 t.Fatalf("got %v, expected ErrFormat", err)
328 func testValidHeader(h *FileHeader, t *testing.T) {
329 var buf bytes.Buffer
330 z := NewWriter(&buf)
332 f, err := z.CreateHeader(h)
333 if err != nil {
334 t.Fatalf("error creating header: %v", err)
336 if _, err := f.Write([]byte("hi")); err != nil {
337 t.Fatalf("error writing content: %v", err)
339 if err := z.Close(); err != nil {
340 t.Fatalf("error closing zip writer: %v", err)
343 b := buf.Bytes()
344 if _, err = NewReader(bytes.NewReader(b), int64(len(b))); err != nil {
345 t.Fatalf("got %v, expected nil", err)
349 // Issue 4302.
350 func TestHeaderInvalidTagAndSize(t *testing.T) {
351 const timeFormat = "20060102T150405.000.txt"
353 ts := time.Now()
354 filename := ts.Format(timeFormat)
356 h := FileHeader{
357 Name: filename,
358 Method: Deflate,
359 Extra: []byte(ts.Format(time.RFC3339Nano)), // missing tag and len
361 h.SetModTime(ts)
363 testInvalidHeader(&h, t)
366 func TestHeaderTooShort(t *testing.T) {
367 h := FileHeader{
368 Name: "foo.txt",
369 Method: Deflate,
370 Extra: []byte{zip64ExtraId}, // missing size
372 testInvalidHeader(&h, t)
375 // Issue 4393. It is valid to have an extra data header
376 // which contains no body.
377 func TestZeroLengthHeader(t *testing.T) {
378 h := FileHeader{
379 Name: "extadata.txt",
380 Method: Deflate,
381 Extra: []byte{
382 85, 84, 5, 0, 3, 154, 144, 195, 77, // tag 21589 size 5
383 85, 120, 0, 0, // tag 30805 size 0
386 testValidHeader(&h, t)
389 // Just benchmarking how fast the Zip64 test above is. Not related to
390 // our zip performance, since the test above disabled CRC32 and flate.
391 func BenchmarkZip64Test(b *testing.B) {
392 for i := 0; i < b.N; i++ {
393 testZip64(b, 1<<26)