1 /* This Source Code Form is subject to the terms of the Mozilla Public
2 * License, v. 2.0. If a copy of the MPL was not distributed with this file,
3 * You can obtain one at http://mozilla.org/MPL/2.0/. */
14 #include <linux/ashmem.h>
17 #include "ElfLoader.h"
18 #include "SeekableZStream.h"
22 #define PAGE_SIZE 4096
26 #define PAGE_MASK (~ (PAGE_SIZE - 1))
30 MappableFile::Create(const char *path
)
32 int fd
= open(path
, O_RDONLY
);
34 return new MappableFile(fd
);
39 MappableFile::mmap(const void *addr
, size_t length
, int prot
, int flags
,
43 MOZ_ASSERT(!(flags
& MAP_SHARED
));
46 void *mapped
= ::mmap(const_cast<void *>(addr
), length
, prot
, flags
,
48 if (mapped
== MAP_FAILED
)
51 /* Fill the remainder of the last page with zeroes when the requested
52 * protection has write bits. */
53 if ((mapped
!= MAP_FAILED
) && (prot
& PROT_WRITE
) &&
54 (length
& (PAGE_SIZE
- 1))) {
55 memset(reinterpret_cast<char *>(mapped
) + length
, 0,
56 PAGE_SIZE
- (length
& ~(PAGE_MASK
)));
62 MappableFile::finalize()
64 /* Close file ; equivalent to close(fd.forget()) */
69 MappableExtractFile::Create(const char *name
, Zip
*zip
, Zip::Stream
*stream
)
71 const char *cachePath
= getenv("MOZ_LINKER_CACHE");
72 if (!cachePath
|| !*cachePath
) {
73 log("Warning: MOZ_LINKER_EXTRACT is set, but not MOZ_LINKER_CACHE; "
77 AutoDeleteArray
<char> path
= new char[strlen(cachePath
) + strlen(name
) + 2];
78 sprintf(path
, "%s/%s", cachePath
, name
);
79 struct stat cacheStat
;
80 if (stat(path
, &cacheStat
) == 0) {
82 stat(zip
->GetName(), &zipStat
);
83 if (cacheStat
.st_mtime
> zipStat
.st_mtime
) {
84 debug("Reusing %s", static_cast<char *>(path
));
85 return MappableFile::Create(path
);
88 debug("Extracting to %s", static_cast<char *>(path
));
89 AutoCloseFD fd
= open(path
, O_TRUNC
| O_RDWR
| O_CREAT
| O_NOATIME
,
92 log("Couldn't open %s to decompress library", path
.get());
95 AutoUnlinkFile file
= path
.forget();
96 if (stream
->GetType() == Zip::Stream::DEFLATE
) {
97 if (ftruncate(fd
, stream
->GetUncompressedSize()) == -1) {
98 log("Couldn't ftruncate %s to decompress library", file
.get());
101 /* Map the temporary file for use as inflate buffer */
102 MappedPtr
buffer(::mmap(NULL
, stream
->GetUncompressedSize(), PROT_WRITE
,
103 MAP_SHARED
, fd
, 0), stream
->GetUncompressedSize());
104 if (buffer
== MAP_FAILED
) {
105 log("Couldn't map %s to decompress library", file
.get());
109 z_stream zStream
= stream
->GetZStream(buffer
);
112 if (inflateInit2(&zStream
, -MAX_WBITS
) != Z_OK
) {
113 log("inflateInit failed: %s", zStream
.msg
);
116 if (inflate(&zStream
, Z_FINISH
) != Z_STREAM_END
) {
117 log("inflate failed: %s", zStream
.msg
);
120 if (inflateEnd(&zStream
) != Z_OK
) {
121 log("inflateEnd failed: %s", zStream
.msg
);
124 if (zStream
.total_out
!= stream
->GetUncompressedSize()) {
125 log("File not fully uncompressed! %ld / %d", zStream
.total_out
,
126 static_cast<unsigned int>(stream
->GetUncompressedSize()));
129 } else if (stream
->GetType() == Zip::Stream::STORE
) {
130 SeekableZStream zStream
;
131 if (!zStream
.Init(stream
->GetBuffer())) {
132 log("Couldn't initialize SeekableZStream for %s", name
);
135 if (ftruncate(fd
, zStream
.GetUncompressedSize()) == -1) {
136 log("Couldn't ftruncate %s to decompress library", file
.get());
139 MappedPtr
buffer(::mmap(NULL
, zStream
.GetUncompressedSize(), PROT_WRITE
,
140 MAP_SHARED
, fd
, 0), zStream
.GetUncompressedSize());
141 if (buffer
== MAP_FAILED
) {
142 log("Couldn't map %s to decompress library", file
.get());
146 if (!zStream
.Decompress(buffer
, 0, zStream
.GetUncompressedSize())) {
147 log("%s: failed to decompress", name
);
154 return new MappableExtractFile(fd
.forget(), file
.forget());
157 MappableExtractFile::~MappableExtractFile()
159 /* When destroying from a forked process, we don't want the file to be
160 * removed, as the main process is still using the file. Although it
161 * doesn't really matter, it helps e.g. valgrind that the file is there.
162 * The string still needs to be delete[]d, though */
164 delete [] path
.forget();
168 * _MappableBuffer is a buffer which content can be mapped at different
169 * locations in the virtual address space.
170 * On Linux, uses a (deleted) temporary file on a tmpfs for sharable content.
171 * On Android, uses ashmem.
173 class _MappableBuffer
: public MappedPtr
177 * Returns a _MappableBuffer instance with the given name and the given
180 static _MappableBuffer
*Create(const char *name
, size_t length
)
184 /* On Android, initialize an ashmem region with the given length */
185 fd
= open("/" ASHMEM_NAME_DEF
, O_RDWR
, 0600);
188 char str
[ASHMEM_NAME_LEN
];
189 strlcpy(str
, name
, sizeof(str
));
190 ioctl(fd
, ASHMEM_SET_NAME
, str
);
191 if (ioctl(fd
, ASHMEM_SET_SIZE
, length
))
194 /* The Gecko crash reporter is confused by adjacent memory mappings of
195 * the same file. On Android, subsequent mappings are growing in memory
196 * address, and chances are we're going to map from the same file
197 * descriptor right away. Allocate one page more than requested so that
198 * there is a gap between this mapping and the subsequent one. */
199 void *buf
= ::mmap(NULL
, length
+ PAGE_SIZE
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
200 if (buf
!= MAP_FAILED
) {
201 /* Actually create the gap with anonymous memory */
202 ::mmap(reinterpret_cast<char *>(buf
) + ((length
+ PAGE_SIZE
) & PAGE_MASK
),
203 PAGE_SIZE
, PROT_NONE
, MAP_FIXED
| MAP_PRIVATE
| MAP_ANONYMOUS
,
205 debug("Decompression buffer of size %d in ashmem \"%s\", mapped @%p",
207 return new _MappableBuffer(fd
.forget(), buf
, length
);
210 /* On Linux, use /dev/shm as base directory for temporary files, assuming
212 /* TODO: check that /dev/shm is tmpfs */
214 sprintf(path
, "/dev/shm/%s.XXXXXX", name
);
219 ftruncate(fd
, length
);
221 void *buf
= ::mmap(NULL
, length
, PROT_READ
| PROT_WRITE
, MAP_SHARED
, fd
, 0);
222 if (buf
!= MAP_FAILED
) {
223 debug("Decompression buffer of size %ld in \"%s\", mapped @%p",
225 return new _MappableBuffer(fd
.forget(), buf
, length
);
231 void *mmap(const void *addr
, size_t length
, int prot
, int flags
, off_t offset
)
233 MOZ_ASSERT(fd
!= -1);
235 /* Mapping ashmem MAP_PRIVATE is like mapping anonymous memory, even when
236 * there is content in the ashmem */
237 if (flags
& MAP_PRIVATE
) {
238 flags
&= ~MAP_PRIVATE
;
242 return ::mmap(const_cast<void *>(addr
), length
, prot
, flags
, fd
, offset
);
247 /* Free the additional page we allocated. See _MappableBuffer::Create */
248 ::munmap(this + ((GetLength() + PAGE_SIZE
) & ~(PAGE_SIZE
- 1)), PAGE_SIZE
);
253 _MappableBuffer(int fd
, void *buf
, size_t length
)
254 : MappedPtr(buf
, length
), fd(fd
) { }
256 /* File descriptor for the temporary file or ashmem */
262 MappableDeflate::Create(const char *name
, Zip
*zip
, Zip::Stream
*stream
)
264 MOZ_ASSERT(stream
->GetType() == Zip::Stream::DEFLATE
);
265 _MappableBuffer
*buf
= _MappableBuffer::Create(name
, stream
->GetUncompressedSize());
267 return new MappableDeflate(buf
, zip
, stream
);
271 MappableDeflate::MappableDeflate(_MappableBuffer
*buf
, Zip
*zip
,
273 : zip(zip
), buffer(buf
), zStream(stream
->GetZStream(*buf
)) { }
275 MappableDeflate::~MappableDeflate() { }
278 MappableDeflate::mmap(const void *addr
, size_t length
, int prot
, int flags
, off_t offset
)
281 MOZ_ASSERT(!(flags
& MAP_SHARED
));
282 flags
|= MAP_PRIVATE
;
284 /* The deflate stream is uncompressed up to the required offset + length, if
285 * it hasn't previously been uncompressed */
286 ssize_t missing
= offset
+ length
+ zStream
.avail_out
- buffer
->GetLength();
288 uInt avail_out
= zStream
.avail_out
;
289 zStream
.avail_out
= missing
;
290 if ((*buffer
== zStream
.next_out
) &&
291 (inflateInit2(&zStream
, -MAX_WBITS
) != Z_OK
)) {
292 log("inflateInit failed: %s", zStream
.msg
);
295 int ret
= inflate(&zStream
, Z_SYNC_FLUSH
);
297 log("inflate failed: %s", zStream
.msg
);
300 if (ret
== Z_NEED_DICT
) {
301 log("zstream requires a dictionary. %s", zStream
.msg
);
304 zStream
.avail_out
= avail_out
- missing
+ zStream
.avail_out
;
305 if (ret
== Z_STREAM_END
) {
306 if (inflateEnd(&zStream
) != Z_OK
) {
307 log("inflateEnd failed: %s", zStream
.msg
);
310 if (zStream
.total_out
!= buffer
->GetLength()) {
311 log("File not fully uncompressed! %ld / %d", zStream
.total_out
,
312 static_cast<unsigned int>(buffer
->GetLength()));
317 #if defined(ANDROID) && defined(__arm__)
318 if (prot
& PROT_EXEC
) {
319 /* We just extracted data that may be executed in the future.
320 * We thus need to ensure Instruction and Data cache coherency. */
321 debug("cacheflush(%p, %p)", *buffer
+ offset
, *buffer
+ (offset
+ length
));
322 cacheflush(reinterpret_cast<uintptr_t>(*buffer
+ offset
),
323 reinterpret_cast<uintptr_t>(*buffer
+ (offset
+ length
)), 0);
327 return buffer
->mmap(addr
, length
, prot
, flags
, offset
);
331 MappableDeflate::finalize()
333 /* Free decompression buffer */
335 /* Remove reference to Zip archive */
340 MappableSeekableZStream::Create(const char *name
, Zip
*zip
,
343 MOZ_ASSERT(stream
->GetType() == Zip::Stream::STORE
);
344 AutoDeletePtr
<MappableSeekableZStream
> mappable
=
345 new MappableSeekableZStream(zip
);
347 if (pthread_mutex_init(&mappable
->mutex
, NULL
))
350 if (!mappable
->zStream
.Init(stream
->GetBuffer()))
353 mappable
->buffer
= _MappableBuffer::Create(name
,
354 mappable
->zStream
.GetUncompressedSize());
355 if (!mappable
->buffer
)
358 mappable
->chunkAvail
= new unsigned char[mappable
->zStream
.GetChunksNum()];
359 memset(mappable
->chunkAvail
, 0, mappable
->zStream
.GetChunksNum());
361 return mappable
.forget();
364 MappableSeekableZStream::MappableSeekableZStream(Zip
*zip
)
365 : zip(zip
), chunkAvailNum(0) { }
367 MappableSeekableZStream::~MappableSeekableZStream()
369 pthread_mutex_destroy(&mutex
);
373 MappableSeekableZStream::mmap(const void *addr
, size_t length
, int prot
,
374 int flags
, off_t offset
)
376 /* Map with PROT_NONE so that accessing the mapping would segfault, and
377 * bring us to ensure() */
378 void *res
= buffer
->mmap(addr
, length
, PROT_NONE
, flags
, offset
);
379 if (res
== MAP_FAILED
)
382 /* Store the mapping, ordered by offset and length */
383 std::vector
<LazyMap
>::reverse_iterator it
;
384 for (it
= lazyMaps
.rbegin(); it
< lazyMaps
.rend(); ++it
) {
385 if ((it
->offset
< offset
) ||
386 ((it
->offset
== offset
) && (it
->length
< length
)))
389 LazyMap map
= { res
, length
, prot
, offset
};
390 lazyMaps
.insert(it
.base(), map
);
395 MappableSeekableZStream::munmap(void *addr
, size_t length
)
397 std::vector
<LazyMap
>::iterator it
;
398 for (it
= lazyMaps
.begin(); it
< lazyMaps
.end(); ++it
)
399 if ((it
->addr
= addr
) && (it
->length
== length
)) {
401 ::munmap(addr
, length
);
404 MOZ_NOT_REACHED("munmap called with unknown mapping");
408 MappableSeekableZStream::finalize() { }
412 AutoLock(pthread_mutex_t
*mutex
): mutex(mutex
)
414 if (pthread_mutex_lock(mutex
))
415 MOZ_NOT_REACHED("pthread_mutex_lock failed");
419 if (pthread_mutex_unlock(mutex
))
420 MOZ_NOT_REACHED("pthread_mutex_unlock failed");
423 pthread_mutex_t
*mutex
;
427 MappableSeekableZStream::ensure(const void *addr
)
429 debug("ensure @%p", addr
);
430 void *addrPage
= reinterpret_cast<void *>
431 (reinterpret_cast<uintptr_t>(addr
) & PAGE_MASK
);
432 /* Find the mapping corresponding to the given page */
433 std::vector
<LazyMap
>::iterator map
;
434 for (map
= lazyMaps
.begin(); map
< lazyMaps
.end(); ++map
) {
435 if (map
->Contains(addrPage
))
438 if (map
== lazyMaps
.end())
441 /* Find corresponding chunk */
442 off_t mapOffset
= map
->offsetOf(addrPage
);
443 size_t chunk
= mapOffset
/ zStream
.GetChunkSize();
445 /* In the typical case, we just need to decompress the chunk entirely. But
446 * when the current mapping ends in the middle of the chunk, we want to
447 * stop there. However, if another mapping needs the last part of the
448 * chunk, we still need to continue. As mappings are ordered by offset
449 * and length, we don't need to scan the entire list of mappings.
450 * It is safe to run through lazyMaps here because the linker is never
451 * going to call mmap (which adds lazyMaps) while this function is
453 size_t length
= zStream
.GetChunkSize(chunk
);
454 size_t chunkStart
= chunk
* zStream
.GetChunkSize();
455 size_t chunkEnd
= chunkStart
+ length
;
456 std::vector
<LazyMap
>::iterator it
;
457 for (it
= map
; it
< lazyMaps
.end(); ++it
) {
458 if (chunkEnd
<= it
->endOffset())
461 if ((it
== lazyMaps
.end()) || (chunkEnd
> it
->endOffset())) {
462 /* The mapping "it" points at now is past the interesting one */
464 length
= it
->endOffset() - chunkStart
;
467 AutoLock
lock(&mutex
);
469 /* The very first page is mapped and accessed separately of the rest, and
470 * as such, only the first page of the first chunk is decompressed this way.
471 * When we fault in the remaining pages of that chunk, we want to decompress
472 * the complete chunk again. Short of doing that, we would end up with
473 * no data between PAGE_SIZE and chunkSize, which would effectively corrupt
474 * symbol resolution in the underlying library. */
475 if (chunkAvail
[chunk
] < (length
+ PAGE_SIZE
- 1) / PAGE_SIZE
) {
476 if (!zStream
.DecompressChunk(*buffer
+ chunkStart
, chunk
, length
))
479 #if defined(ANDROID) && defined(__arm__)
480 if (map
->prot
& PROT_EXEC
) {
481 /* We just extracted data that may be executed in the future.
482 * We thus need to ensure Instruction and Data cache coherency. */
483 debug("cacheflush(%p, %p)", *buffer
+ chunkStart
, *buffer
+ (chunkStart
+ length
));
484 cacheflush(reinterpret_cast<uintptr_t>(*buffer
+ chunkStart
),
485 reinterpret_cast<uintptr_t>(*buffer
+ (chunkStart
+ length
)), 0);
488 /* Only count if we haven't already decompressed parts of the chunk */
489 if (chunkAvail
[chunk
] == 0)
492 chunkAvail
[chunk
] = (length
+ PAGE_SIZE
- 1) / PAGE_SIZE
;
495 /* Flip the chunk mapping protection to the recorded flags. We could
496 * also flip the protection for other mappings of the same chunk,
497 * but it's easier to skip that and let further segfaults call
499 const void *chunkAddr
= reinterpret_cast<const void *>
500 (reinterpret_cast<uintptr_t>(addrPage
)
501 - mapOffset
% zStream
.GetChunkSize());
502 const void *chunkEndAddr
= reinterpret_cast<const void *>
503 (reinterpret_cast<uintptr_t>(chunkAddr
) + length
);
505 const void *start
= std::max(map
->addr
, chunkAddr
);
506 const void *end
= std::min(map
->end(), chunkEndAddr
);
507 length
= reinterpret_cast<uintptr_t>(end
)
508 - reinterpret_cast<uintptr_t>(start
);
510 debug("mprotect @%p, 0x%x, 0x%x", start
, length
, map
->prot
);
511 if (mprotect(const_cast<void *>(start
), length
, map
->prot
) == 0)
514 log("mprotect failed");
519 MappableSeekableZStream::stats(const char *when
, const char *name
) const
521 size_t nEntries
= zStream
.GetChunksNum();
522 debug("%s: %s; %ld/%ld chunks decompressed",
523 name
, when
, chunkAvailNum
, nEntries
);
526 AutoDeleteArray
<char> map
= new char[len
+ 3];
529 for (size_t i
= 0, j
= 1; i
< nEntries
; i
++, j
++) {
530 map
[j
] = chunkAvail
[i
] ? '*' : '_';
531 if ((j
== len
) || (i
== nEntries
- 1)) {
534 debug("%s", static_cast<char *>(map
));