Backed out changeset 2450366cf7ca (bug 1891629) for causing win msix mochitest failures
[gecko.git] / js / src / vm / Compression.cpp
blobcc6903eaae0aa618d979365d58a4dfddac261d19
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*-
2 * vim: set ts=8 sts=2 et sw=2 tw=80:
3 * This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "vm/Compression.h"
9 #include "mozilla/DebugOnly.h"
10 #include "mozilla/MemoryChecking.h"
11 #include "mozilla/PodOperations.h"
12 #include "mozilla/ScopeExit.h"
14 #include "js/Utility.h"
15 #include "util/Memory.h"
17 using namespace js;
19 static void* zlib_alloc(void* cx, uInt items, uInt size) {
20 return js_calloc(items, size);
23 static void zlib_free(void* cx, void* addr) { js_free(addr); }
25 Compressor::Compressor(const unsigned char* inp, size_t inplen)
26 : inp(inp),
27 inplen(inplen),
28 initialized(false),
29 finished(false),
30 currentChunkSize(0) {
31 MOZ_ASSERT(inplen > 0, "data to compress can't be empty");
33 zs.opaque = nullptr;
34 zs.next_in = (Bytef*)inp;
35 zs.avail_in = 0;
36 zs.next_out = nullptr;
37 zs.avail_out = 0;
38 zs.zalloc = zlib_alloc;
39 zs.zfree = zlib_free;
40 zs.total_in = 0;
41 zs.total_out = 0;
42 zs.msg = nullptr;
43 zs.state = nullptr;
44 zs.data_type = 0;
45 zs.adler = 0;
46 zs.reserved = 0;
48 // Reserve space for the CompressedDataHeader.
49 outbytes = sizeof(CompressedDataHeader);
52 Compressor::~Compressor() {
53 if (initialized) {
54 int ret = deflateEnd(&zs);
55 if (ret != Z_OK) {
56 // If we finished early, we can get a Z_DATA_ERROR.
57 MOZ_ASSERT(ret == Z_DATA_ERROR);
58 MOZ_ASSERT(!finished);
63 // According to the zlib docs, the default value for windowBits is 15. Passing
64 // -15 is treated the same, but it also forces 'raw deflate' (no zlib header or
65 // trailer). Raw deflate is necessary for chunked decompression.
66 static const int WindowBits = -15;
68 bool Compressor::init() {
69 if (inplen >= UINT32_MAX) {
70 return false;
72 // zlib is slow and we'd rather be done compression sooner
73 // even if it means decompression is slower which penalizes
74 // Function.toString()
75 int ret = deflateInit2(&zs, Z_BEST_SPEED, Z_DEFLATED, WindowBits, 8,
76 Z_DEFAULT_STRATEGY);
77 if (ret != Z_OK) {
78 MOZ_ASSERT(ret == Z_MEM_ERROR);
79 return false;
81 initialized = true;
82 return true;
85 void Compressor::setOutput(unsigned char* out, size_t outlen) {
86 MOZ_ASSERT(outlen > outbytes);
87 zs.next_out = out + outbytes;
88 zs.avail_out = outlen - outbytes;
91 Compressor::Status Compressor::compressMore() {
92 MOZ_ASSERT(zs.next_out);
93 uInt left = inplen - (zs.next_in - inp);
94 if (left <= MAX_INPUT_SIZE) {
95 zs.avail_in = left;
96 } else if (zs.avail_in == 0) {
97 zs.avail_in = MAX_INPUT_SIZE;
100 // Finish the current chunk if needed.
101 bool flush = false;
102 MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
103 if (currentChunkSize + zs.avail_in >= CHUNK_SIZE) {
104 // Adjust avail_in, so we don't get chunks that are larger than
105 // CHUNK_SIZE.
106 zs.avail_in = CHUNK_SIZE - currentChunkSize;
107 MOZ_ASSERT(currentChunkSize + zs.avail_in == CHUNK_SIZE);
108 flush = true;
111 MOZ_ASSERT(zs.avail_in <= left);
112 bool done = zs.avail_in == left;
114 Bytef* oldin = zs.next_in;
115 Bytef* oldout = zs.next_out;
116 int ret = deflate(&zs, done ? Z_FINISH : (flush ? Z_FULL_FLUSH : Z_NO_FLUSH));
117 outbytes += zs.next_out - oldout;
118 currentChunkSize += zs.next_in - oldin;
119 MOZ_ASSERT(currentChunkSize <= CHUNK_SIZE);
121 if (ret == Z_MEM_ERROR) {
122 zs.avail_out = 0;
123 return OOM;
125 if (ret == Z_BUF_ERROR || (ret == Z_OK && zs.avail_out == 0)) {
126 // We have to resize the output buffer. Note that we're not done yet
127 // because ret != Z_STREAM_END.
128 MOZ_ASSERT(zs.avail_out == 0);
129 return MOREOUTPUT;
132 if (done || currentChunkSize == CHUNK_SIZE) {
133 MOZ_ASSERT_IF(!done, flush);
134 MOZ_ASSERT(chunkSize(inplen, chunkOffsets.length()) == currentChunkSize);
135 if (!chunkOffsets.append(outbytes)) {
136 return OOM;
138 currentChunkSize = 0;
139 MOZ_ASSERT_IF(done, chunkOffsets.length() == (inplen - 1) / CHUNK_SIZE + 1);
142 MOZ_ASSERT_IF(!done, ret == Z_OK);
143 MOZ_ASSERT_IF(done, ret == Z_STREAM_END);
144 return done ? DONE : CONTINUE;
147 size_t Compressor::totalBytesNeeded() const {
148 return AlignBytes(outbytes, sizeof(uint32_t)) + sizeOfChunkOffsets();
151 void Compressor::finish(char* dest, size_t destBytes) {
152 MOZ_ASSERT(!chunkOffsets.empty());
154 CompressedDataHeader* compressedHeader =
155 reinterpret_cast<CompressedDataHeader*>(dest);
156 compressedHeader->compressedBytes = outbytes;
158 size_t outbytesAligned = AlignBytes(outbytes, sizeof(uint32_t));
160 // Zero the padding bytes, the ImmutableStringsCache will hash them.
161 mozilla::PodZero(dest + outbytes, outbytesAligned - outbytes);
163 uint32_t* destArr = reinterpret_cast<uint32_t*>(dest + outbytesAligned);
165 MOZ_ASSERT(uintptr_t(dest + destBytes) ==
166 uintptr_t(destArr + chunkOffsets.length()));
167 mozilla::PodCopy(destArr, chunkOffsets.begin(), chunkOffsets.length());
169 finished = true;
172 bool js::DecompressString(const unsigned char* inp, size_t inplen,
173 unsigned char* out, size_t outlen) {
174 MOZ_ASSERT(inplen <= UINT32_MAX);
176 // Mark the memory we pass to zlib as initialized for MSan.
177 MOZ_MAKE_MEM_DEFINED(out, outlen);
179 z_stream zs;
180 zs.zalloc = zlib_alloc;
181 zs.zfree = zlib_free;
182 zs.opaque = nullptr;
183 zs.next_in = (Bytef*)inp;
184 zs.avail_in = inplen;
185 zs.next_out = out;
186 MOZ_ASSERT(outlen);
187 zs.avail_out = outlen;
188 int ret = inflateInit(&zs);
189 if (ret != Z_OK) {
190 MOZ_ASSERT(ret == Z_MEM_ERROR);
191 return false;
193 ret = inflate(&zs, Z_FINISH);
194 MOZ_ASSERT(ret == Z_STREAM_END);
195 ret = inflateEnd(&zs);
196 MOZ_ASSERT(ret == Z_OK);
197 return true;
200 bool js::DecompressStringChunk(const unsigned char* inp, size_t chunk,
201 unsigned char* out, size_t outlen) {
202 MOZ_ASSERT(outlen <= Compressor::CHUNK_SIZE);
204 const CompressedDataHeader* header =
205 reinterpret_cast<const CompressedDataHeader*>(inp);
207 size_t compressedBytes = header->compressedBytes;
208 size_t compressedBytesAligned = AlignBytes(compressedBytes, sizeof(uint32_t));
210 const unsigned char* offsetBytes = inp + compressedBytesAligned;
211 const uint32_t* offsets = reinterpret_cast<const uint32_t*>(offsetBytes);
213 uint32_t compressedStart =
214 chunk > 0 ? offsets[chunk - 1] : sizeof(CompressedDataHeader);
215 uint32_t compressedEnd = offsets[chunk];
217 MOZ_ASSERT(compressedStart < compressedEnd);
218 MOZ_ASSERT(compressedEnd <= compressedBytes);
220 bool lastChunk = compressedEnd == compressedBytes;
222 // Mark the memory we pass to zlib as initialized for MSan.
223 MOZ_MAKE_MEM_DEFINED(out, outlen);
225 z_stream zs;
226 zs.zalloc = zlib_alloc;
227 zs.zfree = zlib_free;
228 zs.opaque = nullptr;
229 zs.next_in = (Bytef*)(inp + compressedStart);
230 zs.avail_in = compressedEnd - compressedStart;
231 zs.next_out = out;
232 MOZ_ASSERT(outlen);
233 zs.avail_out = outlen;
235 // Bug 1505857 - Use 'volatile' so variable is preserved in crashdump
236 // when release-asserts below are tripped.
237 volatile int ret = inflateInit2(&zs, WindowBits);
238 if (ret != Z_OK) {
239 MOZ_ASSERT(ret == Z_MEM_ERROR);
240 return false;
243 auto autoCleanup = mozilla::MakeScopeExit([&] {
244 mozilla::DebugOnly<int> ret = inflateEnd(&zs);
245 MOZ_ASSERT(ret == Z_OK);
248 if (lastChunk) {
249 ret = inflate(&zs, Z_FINISH);
250 MOZ_RELEASE_ASSERT(ret == Z_STREAM_END);
251 } else {
252 ret = inflate(&zs, Z_NO_FLUSH);
253 if (ret == Z_MEM_ERROR) {
254 return false;
256 MOZ_RELEASE_ASSERT(ret == Z_OK);
258 MOZ_ASSERT(zs.avail_in == 0);
259 MOZ_ASSERT(zs.avail_out == 0);
260 return true;