Bug 752461 - Hide click-to-play overlays when choosing "never activate plugins.....
[gecko.git] / mozglue / linker / Mappable.cpp
blob6a867a5d3368ce8da51eefd78e5244aaef0d7824
1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
5 #include <fcntl.h>
6 #include <unistd.h>
7 #include <sys/mman.h>
8 #include <sys/stat.h>
9 #include <cstring>
10 #include <cstdlib>
11 #include <cstdio>
12 #include "Mappable.h"
13 #ifdef ANDROID
14 #include <linux/ashmem.h>
15 #endif
16 #include <sys/stat.h>
17 #include "ElfLoader.h"
18 #include "SeekableZStream.h"
19 #include "Logging.h"
21 #ifndef PAGE_SIZE
22 #define PAGE_SIZE 4096
23 #endif
25 #ifndef PAGE_MASK
26 #define PAGE_MASK (~ (PAGE_SIZE - 1))
27 #endif
29 Mappable *
30 MappableFile::Create(const char *path)
32 int fd = open(path, O_RDONLY);
33 if (fd != -1)
34 return new MappableFile(fd);
35 return NULL;
38 void *
39 MappableFile::mmap(const void *addr, size_t length, int prot, int flags,
40 off_t offset)
42 MOZ_ASSERT(fd != -1);
43 MOZ_ASSERT(!(flags & MAP_SHARED));
44 flags |= MAP_PRIVATE;
46 void *mapped = ::mmap(const_cast<void *>(addr), length, prot, flags,
47 fd, offset);
48 if (mapped == MAP_FAILED)
49 return mapped;
51 /* Fill the remainder of the last page with zeroes when the requested
52 * protection has write bits. */
53 if ((mapped != MAP_FAILED) && (prot & PROT_WRITE) &&
54 (length & (PAGE_SIZE - 1))) {
55 memset(reinterpret_cast<char *>(mapped) + length, 0,
56 PAGE_SIZE - (length & ~(PAGE_MASK)));
58 return mapped;
61 void
62 MappableFile::finalize()
64 /* Close file ; equivalent to close(fd.forget()) */
65 fd = -1;
68 Mappable *
69 MappableExtractFile::Create(const char *name, Zip *zip, Zip::Stream *stream)
71 const char *cachePath = getenv("MOZ_LINKER_CACHE");
72 if (!cachePath || !*cachePath) {
73 log("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
74 "not extracting");
75 return NULL;
77 AutoDeleteArray<char> path = new char[strlen(cachePath) + strlen(name) + 2];
78 sprintf(path, "%s/%s", cachePath, name);
79 struct stat cacheStat;
80 if (stat(path, &cacheStat) == 0) {
81 struct stat zipStat;
82 stat(zip->GetName(), &zipStat);
83 if (cacheStat.st_mtime > zipStat.st_mtime) {
84 debug("Reusing %s", static_cast<char *>(path));
85 return MappableFile::Create(path);
88 debug("Extracting to %s", static_cast<char *>(path));
89 AutoCloseFD fd = open(path, O_TRUNC | O_RDWR | O_CREAT | O_NOATIME,
90 S_IRUSR | S_IWUSR);
91 if (fd == -1) {
92 log("Couldn't open %s to decompress library", path.get());
93 return NULL;
95 AutoUnlinkFile file = path.forget();
96 if (stream->GetType() == Zip::Stream::DEFLATE) {
97 if (ftruncate(fd, stream->GetUncompressedSize()) == -1) {
98 log("Couldn't ftruncate %s to decompress library", file.get());
99 return NULL;
101 /* Map the temporary file for use as inflate buffer */
102 MappedPtr buffer(::mmap(NULL, stream->GetUncompressedSize(), PROT_WRITE,
103 MAP_SHARED, fd, 0), stream->GetUncompressedSize());
104 if (buffer == MAP_FAILED) {
105 log("Couldn't map %s to decompress library", file.get());
106 return NULL;
109 z_stream zStream = stream->GetZStream(buffer);
111 /* Decompress */
112 if (inflateInit2(&zStream, -MAX_WBITS) != Z_OK) {
113 log("inflateInit failed: %s", zStream.msg);
114 return NULL;
116 if (inflate(&zStream, Z_FINISH) != Z_STREAM_END) {
117 log("inflate failed: %s", zStream.msg);
118 return NULL;
120 if (inflateEnd(&zStream) != Z_OK) {
121 log("inflateEnd failed: %s", zStream.msg);
122 return NULL;
124 if (zStream.total_out != stream->GetUncompressedSize()) {
125 log("File not fully uncompressed! %ld / %d", zStream.total_out,
126 static_cast<unsigned int>(stream->GetUncompressedSize()));
127 return NULL;
129 } else if (stream->GetType() == Zip::Stream::STORE) {
130 SeekableZStream zStream;
131 if (!zStream.Init(stream->GetBuffer())) {
132 log("Couldn't initialize SeekableZStream for %s", name);
133 return NULL;
135 if (ftruncate(fd, zStream.GetUncompressedSize()) == -1) {
136 log("Couldn't ftruncate %s to decompress library", file.get());
137 return NULL;
139 MappedPtr buffer(::mmap(NULL, zStream.GetUncompressedSize(), PROT_WRITE,
140 MAP_SHARED, fd, 0), zStream.GetUncompressedSize());
141 if (buffer == MAP_FAILED) {
142 log("Couldn't map %s to decompress library", file.get());
143 return NULL;
146 if (!zStream.Decompress(buffer, 0, zStream.GetUncompressedSize())) {
147 log("%s: failed to decompress", name);
148 return NULL;
150 } else {
151 return NULL;
154 return new MappableExtractFile(fd.forget(), file.forget());
157 MappableExtractFile::~MappableExtractFile()
159 /* When destroying from a forked process, we don't want the file to be
160 * removed, as the main process is still using the file. Although it
161 * doesn't really matter, it helps e.g. valgrind that the file is there.
162 * The string still needs to be delete[]d, though */
163 if (pid != getpid())
164 delete [] path.forget();
168 * _MappableBuffer is a buffer which content can be mapped at different
169 * locations in the virtual address space.
170 * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
171 * On Android, uses ashmem.
173 class _MappableBuffer: public MappedPtr
175 public:
177 * Returns a _MappableBuffer instance with the given name and the given
178 * length.
180 static _MappableBuffer *Create(const char *name, size_t length)
182 AutoCloseFD fd;
183 #ifdef ANDROID
184 /* On Android, initialize an ashmem region with the given length */
185 fd = open("/" ASHMEM_NAME_DEF, O_RDWR, 0600);
186 if (fd == -1)
187 return NULL;
188 char str[ASHMEM_NAME_LEN];
189 strlcpy(str, name, sizeof(str));
190 ioctl(fd, ASHMEM_SET_NAME, str);
191 if (ioctl(fd, ASHMEM_SET_SIZE, length))
192 return NULL;
194 /* The Gecko crash reporter is confused by adjacent memory mappings of
195 * the same file. On Android, subsequent mappings are growing in memory
196 * address, and chances are we're going to map from the same file
197 * descriptor right away. Allocate one page more than requested so that
198 * there is a gap between this mapping and the subsequent one. */
199 void *buf = ::mmap(NULL, length + PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
200 if (buf != MAP_FAILED) {
201 /* Actually create the gap with anonymous memory */
202 ::mmap(reinterpret_cast<char *>(buf) + ((length + PAGE_SIZE) & PAGE_MASK),
203 PAGE_SIZE, PROT_NONE, MAP_FIXED | MAP_PRIVATE | MAP_ANONYMOUS,
204 -1, 0);
205 debug("Decompression buffer of size %d in ashmem \"%s\", mapped @%p",
206 length, str, buf);
207 return new _MappableBuffer(fd.forget(), buf, length);
209 #else
210 /* On Linux, use /dev/shm as base directory for temporary files, assuming
211 * it's on tmpfs */
212 /* TODO: check that /dev/shm is tmpfs */
213 char path[256];
214 sprintf(path, "/dev/shm/%s.XXXXXX", name);
215 fd = mkstemp(path);
216 if (fd == -1)
217 return NULL;
218 unlink(path);
219 ftruncate(fd, length);
221 void *buf = ::mmap(NULL, length, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
222 if (buf != MAP_FAILED) {
223 debug("Decompression buffer of size %ld in \"%s\", mapped @%p",
224 length, path, buf);
225 return new _MappableBuffer(fd.forget(), buf, length);
227 #endif
228 return NULL;
231 void *mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
233 MOZ_ASSERT(fd != -1);
234 #ifdef ANDROID
235 /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
236 * there is content in the ashmem */
237 if (flags & MAP_PRIVATE) {
238 flags &= ~MAP_PRIVATE;
239 flags |= MAP_SHARED;
241 #endif
242 return ::mmap(const_cast<void *>(addr), length, prot, flags, fd, offset);
245 #ifdef ANDROID
246 ~_MappableBuffer() {
247 /* Free the additional page we allocated. See _MappableBuffer::Create */
248 ::munmap(this + ((GetLength() + PAGE_SIZE) & ~(PAGE_SIZE - 1)), PAGE_SIZE);
250 #endif
252 private:
253 _MappableBuffer(int fd, void *buf, size_t length)
254 : MappedPtr(buf, length), fd(fd) { }
256 /* File descriptor for the temporary file or ashmem */
257 AutoCloseFD fd;
261 Mappable *
262 MappableDeflate::Create(const char *name, Zip *zip, Zip::Stream *stream)
264 MOZ_ASSERT(stream->GetType() == Zip::Stream::DEFLATE);
265 _MappableBuffer *buf = _MappableBuffer::Create(name, stream->GetUncompressedSize());
266 if (buf)
267 return new MappableDeflate(buf, zip, stream);
268 return NULL;
271 MappableDeflate::MappableDeflate(_MappableBuffer *buf, Zip *zip,
272 Zip::Stream *stream)
273 : zip(zip), buffer(buf), zStream(stream->GetZStream(*buf)) { }
275 MappableDeflate::~MappableDeflate() { }
277 void *
278 MappableDeflate::mmap(const void *addr, size_t length, int prot, int flags, off_t offset)
280 MOZ_ASSERT(buffer);
281 MOZ_ASSERT(!(flags & MAP_SHARED));
282 flags |= MAP_PRIVATE;
284 /* The deflate stream is uncompressed up to the required offset + length, if
285 * it hasn't previously been uncompressed */
286 ssize_t missing = offset + length + zStream.avail_out - buffer->GetLength();
287 if (missing > 0) {
288 uInt avail_out = zStream.avail_out;
289 zStream.avail_out = missing;
290 if ((*buffer == zStream.next_out) &&
291 (inflateInit2(&zStream, -MAX_WBITS) != Z_OK)) {
292 log("inflateInit failed: %s", zStream.msg);
293 return MAP_FAILED;
295 int ret = inflate(&zStream, Z_SYNC_FLUSH);
296 if (ret < 0) {
297 log("inflate failed: %s", zStream.msg);
298 return MAP_FAILED;
300 if (ret == Z_NEED_DICT) {
301 log("zstream requires a dictionary. %s", zStream.msg);
302 return MAP_FAILED;
304 zStream.avail_out = avail_out - missing + zStream.avail_out;
305 if (ret == Z_STREAM_END) {
306 if (inflateEnd(&zStream) != Z_OK) {
307 log("inflateEnd failed: %s", zStream.msg);
308 return MAP_FAILED;
310 if (zStream.total_out != buffer->GetLength()) {
311 log("File not fully uncompressed! %ld / %d", zStream.total_out,
312 static_cast<unsigned int>(buffer->GetLength()));
313 return MAP_FAILED;
317 #if defined(ANDROID) && defined(__arm__)
318 if (prot & PROT_EXEC) {
319 /* We just extracted data that may be executed in the future.
320 * We thus need to ensure Instruction and Data cache coherency. */
321 debug("cacheflush(%p, %p)", *buffer + offset, *buffer + (offset + length));
322 cacheflush(reinterpret_cast<uintptr_t>(*buffer + offset),
323 reinterpret_cast<uintptr_t>(*buffer + (offset + length)), 0);
325 #endif
327 return buffer->mmap(addr, length, prot, flags, offset);
330 void
331 MappableDeflate::finalize()
333 /* Free decompression buffer */
334 buffer = NULL;
335 /* Remove reference to Zip archive */
336 zip = NULL;
339 Mappable *
340 MappableSeekableZStream::Create(const char *name, Zip *zip,
341 Zip::Stream *stream)
343 MOZ_ASSERT(stream->GetType() == Zip::Stream::STORE);
344 AutoDeletePtr<MappableSeekableZStream> mappable =
345 new MappableSeekableZStream(zip);
347 if (pthread_mutex_init(&mappable->mutex, NULL))
348 return NULL;
350 if (!mappable->zStream.Init(stream->GetBuffer()))
351 return NULL;
353 mappable->buffer = _MappableBuffer::Create(name,
354 mappable->zStream.GetUncompressedSize());
355 if (!mappable->buffer)
356 return NULL;
358 mappable->chunkAvail = new unsigned char[mappable->zStream.GetChunksNum()];
359 memset(mappable->chunkAvail, 0, mappable->zStream.GetChunksNum());
361 return mappable.forget();
364 MappableSeekableZStream::MappableSeekableZStream(Zip *zip)
365 : zip(zip), chunkAvailNum(0) { }
367 MappableSeekableZStream::~MappableSeekableZStream()
369 pthread_mutex_destroy(&mutex);
372 void *
373 MappableSeekableZStream::mmap(const void *addr, size_t length, int prot,
374 int flags, off_t offset)
376 /* Map with PROT_NONE so that accessing the mapping would segfault, and
377 * bring us to ensure() */
378 void *res = buffer->mmap(addr, length, PROT_NONE, flags, offset);
379 if (res == MAP_FAILED)
380 return MAP_FAILED;
382 /* Store the mapping, ordered by offset and length */
383 std::vector<LazyMap>::reverse_iterator it;
384 for (it = lazyMaps.rbegin(); it < lazyMaps.rend(); ++it) {
385 if ((it->offset < offset) ||
386 ((it->offset == offset) && (it->length < length)))
387 break;
389 LazyMap map = { res, length, prot, offset };
390 lazyMaps.insert(it.base(), map);
391 return res;
394 void
395 MappableSeekableZStream::munmap(void *addr, size_t length)
397 std::vector<LazyMap>::iterator it;
398 for (it = lazyMaps.begin(); it < lazyMaps.end(); ++it)
399 if ((it->addr = addr) && (it->length == length)) {
400 lazyMaps.erase(it);
401 ::munmap(addr, length);
402 return;
404 MOZ_NOT_REACHED("munmap called with unknown mapping");
407 void
408 MappableSeekableZStream::finalize() { }
410 class AutoLock {
411 public:
412 AutoLock(pthread_mutex_t *mutex): mutex(mutex)
414 if (pthread_mutex_lock(mutex))
415 MOZ_NOT_REACHED("pthread_mutex_lock failed");
417 ~AutoLock()
419 if (pthread_mutex_unlock(mutex))
420 MOZ_NOT_REACHED("pthread_mutex_unlock failed");
422 private:
423 pthread_mutex_t *mutex;
426 bool
427 MappableSeekableZStream::ensure(const void *addr)
429 debug("ensure @%p", addr);
430 void *addrPage = reinterpret_cast<void *>
431 (reinterpret_cast<uintptr_t>(addr) & PAGE_MASK);
432 /* Find the mapping corresponding to the given page */
433 std::vector<LazyMap>::iterator map;
434 for (map = lazyMaps.begin(); map < lazyMaps.end(); ++map) {
435 if (map->Contains(addrPage))
436 break;
438 if (map == lazyMaps.end())
439 return false;
441 /* Find corresponding chunk */
442 off_t mapOffset = map->offsetOf(addrPage);
443 size_t chunk = mapOffset / zStream.GetChunkSize();
445 /* In the typical case, we just need to decompress the chunk entirely. But
446 * when the current mapping ends in the middle of the chunk, we want to
447 * stop there. However, if another mapping needs the last part of the
448 * chunk, we still need to continue. As mappings are ordered by offset
449 * and length, we don't need to scan the entire list of mappings.
450 * It is safe to run through lazyMaps here because the linker is never
451 * going to call mmap (which adds lazyMaps) while this function is
452 * called. */
453 size_t length = zStream.GetChunkSize(chunk);
454 size_t chunkStart = chunk * zStream.GetChunkSize();
455 size_t chunkEnd = chunkStart + length;
456 std::vector<LazyMap>::iterator it;
457 for (it = map; it < lazyMaps.end(); ++it) {
458 if (chunkEnd <= it->endOffset())
459 break;
461 if ((it == lazyMaps.end()) || (chunkEnd > it->endOffset())) {
462 /* The mapping "it" points at now is past the interesting one */
463 --it;
464 length = it->endOffset() - chunkStart;
467 AutoLock lock(&mutex);
469 /* The very first page is mapped and accessed separately of the rest, and
470 * as such, only the first page of the first chunk is decompressed this way.
471 * When we fault in the remaining pages of that chunk, we want to decompress
472 * the complete chunk again. Short of doing that, we would end up with
473 * no data between PAGE_SIZE and chunkSize, which would effectively corrupt
474 * symbol resolution in the underlying library. */
475 if (chunkAvail[chunk] < (length + PAGE_SIZE - 1) / PAGE_SIZE) {
476 if (!zStream.DecompressChunk(*buffer + chunkStart, chunk, length))
477 return false;
479 #if defined(ANDROID) && defined(__arm__)
480 if (map->prot & PROT_EXEC) {
481 /* We just extracted data that may be executed in the future.
482 * We thus need to ensure Instruction and Data cache coherency. */
483 debug("cacheflush(%p, %p)", *buffer + chunkStart, *buffer + (chunkStart + length));
484 cacheflush(reinterpret_cast<uintptr_t>(*buffer + chunkStart),
485 reinterpret_cast<uintptr_t>(*buffer + (chunkStart + length)), 0);
487 #endif
488 /* Only count if we haven't already decompressed parts of the chunk */
489 if (chunkAvail[chunk] == 0)
490 chunkAvailNum++;
492 chunkAvail[chunk] = (length + PAGE_SIZE - 1) / PAGE_SIZE;
495 /* Flip the chunk mapping protection to the recorded flags. We could
496 * also flip the protection for other mappings of the same chunk,
497 * but it's easier to skip that and let further segfaults call
498 * ensure again. */
499 const void *chunkAddr = reinterpret_cast<const void *>
500 (reinterpret_cast<uintptr_t>(addrPage)
501 - mapOffset % zStream.GetChunkSize());
502 const void *chunkEndAddr = reinterpret_cast<const void *>
503 (reinterpret_cast<uintptr_t>(chunkAddr) + length);
505 const void *start = std::max(map->addr, chunkAddr);
506 const void *end = std::min(map->end(), chunkEndAddr);
507 length = reinterpret_cast<uintptr_t>(end)
508 - reinterpret_cast<uintptr_t>(start);
510 debug("mprotect @%p, 0x%x, 0x%x", start, length, map->prot);
511 if (mprotect(const_cast<void *>(start), length, map->prot) == 0)
512 return true;
514 log("mprotect failed");
515 return false;
518 void
519 MappableSeekableZStream::stats(const char *when, const char *name) const
521 size_t nEntries = zStream.GetChunksNum();
522 debug("%s: %s; %ld/%ld chunks decompressed",
523 name, when, chunkAvailNum, nEntries);
525 size_t len = 64;
526 AutoDeleteArray<char> map = new char[len + 3];
527 map[0] = '[';
529 for (size_t i = 0, j = 1; i < nEntries; i++, j++) {
530 map[j] = chunkAvail[i] ? '*' : '_';
531 if ((j == len) || (i == nEntries - 1)) {
532 map[j + 1] = ']';
533 map[j + 2] = '\0';
534 debug("%s", static_cast<char *>(map));
535 j = 0;