Merge Chromium + Blink git repositories
[chromium-blink-merge.git] / base / memory / shared_memory_unittest.cc
blob0c09a5c44d5454f259d149bb4063f2ce502f1f83
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
5 #include "base/atomicops.h"
6 #include "base/basictypes.h"
7 #include "base/memory/scoped_ptr.h"
8 #include "base/memory/shared_memory.h"
9 #include "base/process/kill.h"
10 #include "base/rand_util.h"
11 #include "base/strings/string_number_conversions.h"
12 #include "base/sys_info.h"
13 #include "base/test/multiprocess_test.h"
14 #include "base/threading/platform_thread.h"
15 #include "base/time/time.h"
16 #include "testing/gtest/include/gtest/gtest.h"
17 #include "testing/multiprocess_func_list.h"
19 #if defined(OS_POSIX)
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <sys/mman.h>
23 #include <sys/stat.h>
24 #include <sys/types.h>
25 #include <unistd.h>
26 #endif
28 #if defined(OS_WIN)
29 #include "base/win/scoped_handle.h"
30 #endif
32 namespace base {
34 namespace {
36 #if !defined(OS_MACOSX)
37 // Each thread will open the shared memory. Each thread will take a different 4
38 // byte int pointer, and keep changing it, with some small pauses in between.
39 // Verify that each thread's value in the shared memory is always correct.
40 class MultipleThreadMain : public PlatformThread::Delegate {
41 public:
42 explicit MultipleThreadMain(int16 id) : id_(id) {}
43 ~MultipleThreadMain() override {}
45 static void CleanUp() {
46 SharedMemory memory;
47 memory.Delete(s_test_name_);
50 // PlatformThread::Delegate interface.
51 void ThreadMain() override {
52 const uint32 kDataSize = 1024;
53 SharedMemory memory;
54 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, kDataSize);
55 EXPECT_TRUE(rv);
56 rv = memory.Map(kDataSize);
57 EXPECT_TRUE(rv);
58 int* ptr = static_cast<int*>(memory.memory()) + id_;
59 EXPECT_EQ(0, *ptr);
61 for (int idx = 0; idx < 100; idx++) {
62 *ptr = idx;
63 PlatformThread::Sleep(TimeDelta::FromMilliseconds(1));
64 EXPECT_EQ(*ptr, idx);
66 // Reset back to 0 for the next test that uses the same name.
67 *ptr = 0;
69 memory.Close();
72 private:
73 int16 id_;
75 static const char* const s_test_name_;
77 DISALLOW_COPY_AND_ASSIGN(MultipleThreadMain);
80 const char* const MultipleThreadMain::s_test_name_ =
81 "SharedMemoryOpenThreadTest";
82 #endif // !defined(OS_MACOSX)
84 } // namespace
86 // Android/Mac doesn't support SharedMemory::Open/Delete/
87 // CreateNamedDeprecated(openExisting=true)
88 #if !defined(OS_ANDROID) && !defined(OS_MACOSX)
89 TEST(SharedMemoryTest, OpenClose) {
90 const uint32 kDataSize = 1024;
91 std::string test_name = "SharedMemoryOpenCloseTest";
93 // Open two handles to a memory segment, confirm that they are mapped
94 // separately yet point to the same space.
95 SharedMemory memory1;
96 bool rv = memory1.Delete(test_name);
97 EXPECT_TRUE(rv);
98 rv = memory1.Delete(test_name);
99 EXPECT_TRUE(rv);
100 rv = memory1.Open(test_name, false);
101 EXPECT_FALSE(rv);
102 rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
103 EXPECT_TRUE(rv);
104 rv = memory1.Map(kDataSize);
105 EXPECT_TRUE(rv);
106 SharedMemory memory2;
107 rv = memory2.Open(test_name, false);
108 EXPECT_TRUE(rv);
109 rv = memory2.Map(kDataSize);
110 EXPECT_TRUE(rv);
111 EXPECT_NE(memory1.memory(), memory2.memory()); // Compare the pointers.
113 // Make sure we don't segfault. (it actually happened!)
114 ASSERT_NE(memory1.memory(), static_cast<void*>(NULL));
115 ASSERT_NE(memory2.memory(), static_cast<void*>(NULL));
117 // Write data to the first memory segment, verify contents of second.
118 memset(memory1.memory(), '1', kDataSize);
119 EXPECT_EQ(memcmp(memory1.memory(), memory2.memory(), kDataSize), 0);
121 // Close the first memory segment, and verify the second has the right data.
122 memory1.Close();
123 char* start_ptr = static_cast<char*>(memory2.memory());
124 char* end_ptr = start_ptr + kDataSize;
125 for (char* ptr = start_ptr; ptr < end_ptr; ptr++)
126 EXPECT_EQ(*ptr, '1');
128 // Close the second memory segment.
129 memory2.Close();
131 rv = memory1.Delete(test_name);
132 EXPECT_TRUE(rv);
133 rv = memory2.Delete(test_name);
134 EXPECT_TRUE(rv);
137 TEST(SharedMemoryTest, OpenExclusive) {
138 const uint32 kDataSize = 1024;
139 const uint32 kDataSize2 = 2048;
140 std::ostringstream test_name_stream;
141 test_name_stream << "SharedMemoryOpenExclusiveTest."
142 << Time::Now().ToDoubleT();
143 std::string test_name = test_name_stream.str();
145 // Open two handles to a memory segment and check that
146 // open_existing_deprecated works as expected.
147 SharedMemory memory1;
148 bool rv = memory1.CreateNamedDeprecated(test_name, false, kDataSize);
149 EXPECT_TRUE(rv);
151 // Memory1 knows it's size because it created it.
152 EXPECT_EQ(memory1.requested_size(), kDataSize);
154 rv = memory1.Map(kDataSize);
155 EXPECT_TRUE(rv);
157 // The mapped memory1 must be at least the size we asked for.
158 EXPECT_GE(memory1.mapped_size(), kDataSize);
160 // The mapped memory1 shouldn't exceed rounding for allocation granularity.
161 EXPECT_LT(memory1.mapped_size(),
162 kDataSize + SysInfo::VMAllocationGranularity());
164 memset(memory1.memory(), 'G', kDataSize);
166 SharedMemory memory2;
167 // Should not be able to create if openExisting is false.
168 rv = memory2.CreateNamedDeprecated(test_name, false, kDataSize2);
169 EXPECT_FALSE(rv);
171 // Should be able to create with openExisting true.
172 rv = memory2.CreateNamedDeprecated(test_name, true, kDataSize2);
173 EXPECT_TRUE(rv);
175 // Memory2 shouldn't know the size because we didn't create it.
176 EXPECT_EQ(memory2.requested_size(), 0U);
178 // We should be able to map the original size.
179 rv = memory2.Map(kDataSize);
180 EXPECT_TRUE(rv);
182 // The mapped memory2 must be at least the size of the original.
183 EXPECT_GE(memory2.mapped_size(), kDataSize);
185 // The mapped memory2 shouldn't exceed rounding for allocation granularity.
186 EXPECT_LT(memory2.mapped_size(),
187 kDataSize2 + SysInfo::VMAllocationGranularity());
189 // Verify that opening memory2 didn't truncate or delete memory 1.
190 char* start_ptr = static_cast<char*>(memory2.memory());
191 char* end_ptr = start_ptr + kDataSize;
192 for (char* ptr = start_ptr; ptr < end_ptr; ptr++) {
193 EXPECT_EQ(*ptr, 'G');
196 memory1.Close();
197 memory2.Close();
199 rv = memory1.Delete(test_name);
200 EXPECT_TRUE(rv);
202 #endif // !defined(OS_ANDROID) && !defined(OS_MACOSX)
204 // Check that memory is still mapped after its closed.
205 TEST(SharedMemoryTest, CloseNoUnmap) {
206 const size_t kDataSize = 4096;
208 SharedMemory memory;
209 ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
210 char* ptr = static_cast<char*>(memory.memory());
211 ASSERT_NE(ptr, static_cast<void*>(NULL));
212 memset(ptr, 'G', kDataSize);
214 memory.Close();
216 EXPECT_EQ(ptr, memory.memory());
217 EXPECT_EQ(SharedMemory::NULLHandle(), memory.handle());
219 for (size_t i = 0; i < kDataSize; i++) {
220 EXPECT_EQ('G', ptr[i]);
223 memory.Unmap();
224 EXPECT_EQ(nullptr, memory.memory());
227 #if !defined(OS_MACOSX)
228 // Create a set of N threads to each open a shared memory segment and write to
229 // it. Verify that they are always reading/writing consistent data.
230 TEST(SharedMemoryTest, MultipleThreads) {
231 const int kNumThreads = 5;
233 MultipleThreadMain::CleanUp();
234 // On POSIX we have a problem when 2 threads try to create the shmem
235 // (a file) at exactly the same time, since create both creates the
236 // file and zerofills it. We solve the problem for this unit test
237 // (make it not flaky) by starting with 1 thread, then
238 // intentionally don't clean up its shmem before running with
239 // kNumThreads.
241 int threadcounts[] = { 1, kNumThreads };
242 for (size_t i = 0; i < arraysize(threadcounts); i++) {
243 int numthreads = threadcounts[i];
244 scoped_ptr<PlatformThreadHandle[]> thread_handles;
245 scoped_ptr<MultipleThreadMain*[]> thread_delegates;
247 thread_handles.reset(new PlatformThreadHandle[numthreads]);
248 thread_delegates.reset(new MultipleThreadMain*[numthreads]);
250 // Spawn the threads.
251 for (int16 index = 0; index < numthreads; index++) {
252 PlatformThreadHandle pth;
253 thread_delegates[index] = new MultipleThreadMain(index);
254 EXPECT_TRUE(PlatformThread::Create(0, thread_delegates[index], &pth));
255 thread_handles[index] = pth;
258 // Wait for the threads to finish.
259 for (int index = 0; index < numthreads; index++) {
260 PlatformThread::Join(thread_handles[index]);
261 delete thread_delegates[index];
264 MultipleThreadMain::CleanUp();
266 #endif
268 // Allocate private (unique) shared memory with an empty string for a
269 // name. Make sure several of them don't point to the same thing as
270 // we might expect if the names are equal.
271 TEST(SharedMemoryTest, AnonymousPrivate) {
272 int i, j;
273 int count = 4;
274 bool rv;
275 const uint32 kDataSize = 8192;
277 scoped_ptr<SharedMemory[]> memories(new SharedMemory[count]);
278 scoped_ptr<int*[]> pointers(new int*[count]);
279 ASSERT_TRUE(memories.get());
280 ASSERT_TRUE(pointers.get());
282 for (i = 0; i < count; i++) {
283 rv = memories[i].CreateAndMapAnonymous(kDataSize);
284 EXPECT_TRUE(rv);
285 int* ptr = static_cast<int*>(memories[i].memory());
286 EXPECT_TRUE(ptr);
287 pointers[i] = ptr;
290 for (i = 0; i < count; i++) {
291 // zero out the first int in each except for i; for that one, make it 100.
292 for (j = 0; j < count; j++) {
293 if (i == j)
294 pointers[j][0] = 100;
295 else
296 pointers[j][0] = 0;
298 // make sure there is no bleeding of the 100 into the other pointers
299 for (j = 0; j < count; j++) {
300 if (i == j)
301 EXPECT_EQ(100, pointers[j][0]);
302 else
303 EXPECT_EQ(0, pointers[j][0]);
307 for (int i = 0; i < count; i++) {
308 memories[i].Close();
312 TEST(SharedMemoryTest, ShareReadOnly) {
313 StringPiece contents = "Hello World";
315 SharedMemory writable_shmem;
316 SharedMemoryCreateOptions options;
317 options.size = contents.size();
318 options.share_read_only = true;
319 ASSERT_TRUE(writable_shmem.Create(options));
320 ASSERT_TRUE(writable_shmem.Map(options.size));
321 memcpy(writable_shmem.memory(), contents.data(), contents.size());
322 EXPECT_TRUE(writable_shmem.Unmap());
324 SharedMemoryHandle readonly_handle;
325 ASSERT_TRUE(writable_shmem.ShareReadOnlyToProcess(GetCurrentProcessHandle(),
326 &readonly_handle));
327 SharedMemory readonly_shmem(readonly_handle, /*readonly=*/true);
329 ASSERT_TRUE(readonly_shmem.Map(contents.size()));
330 EXPECT_EQ(contents,
331 StringPiece(static_cast<const char*>(readonly_shmem.memory()),
332 contents.size()));
333 EXPECT_TRUE(readonly_shmem.Unmap());
335 // Make sure the writable instance is still writable.
336 ASSERT_TRUE(writable_shmem.Map(contents.size()));
337 StringPiece new_contents = "Goodbye";
338 memcpy(writable_shmem.memory(), new_contents.data(), new_contents.size());
339 EXPECT_EQ(new_contents,
340 StringPiece(static_cast<const char*>(writable_shmem.memory()),
341 new_contents.size()));
343 // We'd like to check that if we send the read-only segment to another
344 // process, then that other process can't reopen it read/write. (Since that
345 // would be a security hole.) Setting up multiple processes is hard in a
346 // unittest, so this test checks that the *current* process can't reopen the
347 // segment read/write. I think the test here is stronger than we actually
348 // care about, but there's a remote possibility that sending a file over a
349 // pipe would transform it into read/write.
350 SharedMemoryHandle handle = readonly_shmem.handle();
352 #if defined(OS_ANDROID)
353 // The "read-only" handle is still writable on Android:
354 // http://crbug.com/320865
355 (void)handle;
356 #elif defined(OS_POSIX)
357 int handle_fd = SharedMemory::GetFdFromSharedMemoryHandle(handle);
358 EXPECT_EQ(O_RDONLY, fcntl(handle_fd, F_GETFL) & O_ACCMODE)
359 << "The descriptor itself should be read-only.";
361 errno = 0;
362 void* writable = mmap(NULL, contents.size(), PROT_READ | PROT_WRITE,
363 MAP_SHARED, handle_fd, 0);
364 int mmap_errno = errno;
365 EXPECT_EQ(MAP_FAILED, writable)
366 << "It shouldn't be possible to re-mmap the descriptor writable.";
367 EXPECT_EQ(EACCES, mmap_errno) << strerror(mmap_errno);
368 if (writable != MAP_FAILED)
369 EXPECT_EQ(0, munmap(writable, readonly_shmem.mapped_size()));
371 #elif defined(OS_WIN)
372 EXPECT_EQ(NULL, MapViewOfFile(handle, FILE_MAP_WRITE, 0, 0, 0))
373 << "Shouldn't be able to map memory writable.";
375 HANDLE temp_handle;
376 BOOL rv = ::DuplicateHandle(GetCurrentProcess(),
377 handle,
378 GetCurrentProcess(),
379 &temp_handle,
380 FILE_MAP_ALL_ACCESS,
381 false,
383 EXPECT_EQ(FALSE, rv)
384 << "Shouldn't be able to duplicate the handle into a writable one.";
385 if (rv)
386 win::ScopedHandle writable_handle(temp_handle);
387 rv = ::DuplicateHandle(GetCurrentProcess(),
388 handle,
389 GetCurrentProcess(),
390 &temp_handle,
391 FILE_MAP_READ,
392 false,
394 EXPECT_EQ(TRUE, rv)
395 << "Should be able to duplicate the handle into a readable one.";
396 if (rv)
397 win::ScopedHandle writable_handle(temp_handle);
398 #else
399 #error Unexpected platform; write a test that tries to make 'handle' writable.
400 #endif // defined(OS_POSIX) || defined(OS_WIN)
403 TEST(SharedMemoryTest, ShareToSelf) {
404 StringPiece contents = "Hello World";
406 SharedMemory shmem;
407 ASSERT_TRUE(shmem.CreateAndMapAnonymous(contents.size()));
408 memcpy(shmem.memory(), contents.data(), contents.size());
409 EXPECT_TRUE(shmem.Unmap());
411 SharedMemoryHandle shared_handle;
412 ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
413 SharedMemory shared(shared_handle, /*readonly=*/false);
415 ASSERT_TRUE(shared.Map(contents.size()));
416 EXPECT_EQ(
417 contents,
418 StringPiece(static_cast<const char*>(shared.memory()), contents.size()));
420 ASSERT_TRUE(shmem.ShareToProcess(GetCurrentProcessHandle(), &shared_handle));
421 SharedMemory readonly(shared_handle, /*readonly=*/true);
423 ASSERT_TRUE(readonly.Map(contents.size()));
424 EXPECT_EQ(contents,
425 StringPiece(static_cast<const char*>(readonly.memory()),
426 contents.size()));
429 TEST(SharedMemoryTest, MapAt) {
430 ASSERT_TRUE(SysInfo::VMAllocationGranularity() >= sizeof(uint32));
431 const size_t kCount = SysInfo::VMAllocationGranularity();
432 const size_t kDataSize = kCount * sizeof(uint32);
434 SharedMemory memory;
435 ASSERT_TRUE(memory.CreateAndMapAnonymous(kDataSize));
436 uint32* ptr = static_cast<uint32*>(memory.memory());
437 ASSERT_NE(ptr, static_cast<void*>(NULL));
439 for (size_t i = 0; i < kCount; ++i) {
440 ptr[i] = i;
443 memory.Unmap();
445 off_t offset = SysInfo::VMAllocationGranularity();
446 ASSERT_TRUE(memory.MapAt(offset, kDataSize - offset));
447 offset /= sizeof(uint32);
448 ptr = static_cast<uint32*>(memory.memory());
449 ASSERT_NE(ptr, static_cast<void*>(NULL));
450 for (size_t i = offset; i < kCount; ++i) {
451 EXPECT_EQ(ptr[i - offset], i);
455 TEST(SharedMemoryTest, MapTwice) {
456 const uint32 kDataSize = 1024;
457 SharedMemory memory;
458 bool rv = memory.CreateAndMapAnonymous(kDataSize);
459 EXPECT_TRUE(rv);
461 void* old_address = memory.memory();
463 rv = memory.Map(kDataSize);
464 EXPECT_FALSE(rv);
465 EXPECT_EQ(old_address, memory.memory());
468 #if defined(OS_POSIX)
469 // This test is not applicable for iOS (crbug.com/399384).
470 #if !defined(OS_IOS)
471 // Create a shared memory object, mmap it, and mprotect it to PROT_EXEC.
472 TEST(SharedMemoryTest, AnonymousExecutable) {
473 const uint32 kTestSize = 1 << 16;
475 SharedMemory shared_memory;
476 SharedMemoryCreateOptions options;
477 options.size = kTestSize;
478 options.executable = true;
480 EXPECT_TRUE(shared_memory.Create(options));
481 EXPECT_TRUE(shared_memory.Map(shared_memory.requested_size()));
483 EXPECT_EQ(0, mprotect(shared_memory.memory(), shared_memory.requested_size(),
484 PROT_READ | PROT_EXEC));
486 #endif // !defined(OS_IOS)
488 // Android supports a different permission model than POSIX for its "ashmem"
489 // shared memory implementation. So the tests about file permissions are not
490 // included on Android.
491 #if !defined(OS_ANDROID)
493 // Set a umask and restore the old mask on destruction.
494 class ScopedUmaskSetter {
495 public:
496 explicit ScopedUmaskSetter(mode_t target_mask) {
497 old_umask_ = umask(target_mask);
499 ~ScopedUmaskSetter() { umask(old_umask_); }
500 private:
501 mode_t old_umask_;
502 DISALLOW_IMPLICIT_CONSTRUCTORS(ScopedUmaskSetter);
505 // Create a shared memory object, check its permissions.
506 TEST(SharedMemoryTest, FilePermissionsAnonymous) {
507 const uint32 kTestSize = 1 << 8;
509 SharedMemory shared_memory;
510 SharedMemoryCreateOptions options;
511 options.size = kTestSize;
512 // Set a file mode creation mask that gives all permissions.
513 ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
515 EXPECT_TRUE(shared_memory.Create(options));
517 int shm_fd =
518 SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
519 struct stat shm_stat;
520 EXPECT_EQ(0, fstat(shm_fd, &shm_stat));
521 // Neither the group, nor others should be able to read the shared memory
522 // file.
523 EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
524 EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
527 // Create a shared memory object, check its permissions.
528 TEST(SharedMemoryTest, FilePermissionsNamed) {
529 const uint32_t kTestSize = 1 << 8;
531 SharedMemory shared_memory;
532 SharedMemoryCreateOptions options;
533 options.size = kTestSize;
535 // Set a file mode creation mask that gives all permissions.
536 ScopedUmaskSetter permissive_mask(S_IWGRP | S_IWOTH);
538 EXPECT_TRUE(shared_memory.Create(options));
540 int fd = SharedMemory::GetFdFromSharedMemoryHandle(shared_memory.handle());
541 struct stat shm_stat;
542 EXPECT_EQ(0, fstat(fd, &shm_stat));
543 // Neither the group, nor others should have been able to open the shared
544 // memory file while its name existed.
545 EXPECT_FALSE(shm_stat.st_mode & S_IRWXO);
546 EXPECT_FALSE(shm_stat.st_mode & S_IRWXG);
548 #endif // !defined(OS_ANDROID)
550 #endif // defined(OS_POSIX)
552 // Map() will return addresses which are aligned to the platform page size, this
553 // varies from platform to platform though. Since we'd like to advertise a
554 // minimum alignment that callers can count on, test for it here.
555 TEST(SharedMemoryTest, MapMinimumAlignment) {
556 static const int kDataSize = 8192;
558 SharedMemory shared_memory;
559 ASSERT_TRUE(shared_memory.CreateAndMapAnonymous(kDataSize));
560 EXPECT_EQ(0U, reinterpret_cast<uintptr_t>(
561 shared_memory.memory()) & (SharedMemory::MAP_MINIMUM_ALIGNMENT - 1));
562 shared_memory.Close();
565 // iOS does not allow multiple processes.
566 // Android ashmem does not support named shared memory.
567 // Mac SharedMemory does not support named shared memory. crbug.com/345734
568 #if !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
569 // On POSIX it is especially important we test shmem across processes,
570 // not just across threads. But the test is enabled on all platforms.
571 class SharedMemoryProcessTest : public MultiProcessTest {
572 public:
573 static void CleanUp() {
574 SharedMemory memory;
575 memory.Delete(s_test_name_);
578 static int TaskTestMain() {
579 int errors = 0;
580 SharedMemory memory;
581 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
582 EXPECT_TRUE(rv);
583 if (rv != true)
584 errors++;
585 rv = memory.Map(s_data_size_);
586 EXPECT_TRUE(rv);
587 if (rv != true)
588 errors++;
589 int* ptr = static_cast<int*>(memory.memory());
591 // This runs concurrently in multiple processes. Writes need to be atomic.
592 subtle::Barrier_AtomicIncrement(ptr, 1);
593 memory.Close();
594 return errors;
597 static const char* const s_test_name_;
598 static const uint32 s_data_size_;
601 const char* const SharedMemoryProcessTest::s_test_name_ = "MPMem";
602 const uint32 SharedMemoryProcessTest::s_data_size_ = 1024;
604 TEST_F(SharedMemoryProcessTest, SharedMemoryAcrossProcesses) {
605 const int kNumTasks = 5;
607 SharedMemoryProcessTest::CleanUp();
609 // Create a shared memory region. Set the first word to 0.
610 SharedMemory memory;
611 bool rv = memory.CreateNamedDeprecated(s_test_name_, true, s_data_size_);
612 ASSERT_TRUE(rv);
613 rv = memory.Map(s_data_size_);
614 ASSERT_TRUE(rv);
615 int* ptr = static_cast<int*>(memory.memory());
616 *ptr = 0;
618 // Start |kNumTasks| processes, each of which atomically increments the first
619 // word by 1.
620 Process processes[kNumTasks];
621 for (int index = 0; index < kNumTasks; ++index) {
622 processes[index] = SpawnChild("SharedMemoryTestMain");
623 ASSERT_TRUE(processes[index].IsValid());
626 // Check that each process exited correctly.
627 int exit_code = 0;
628 for (int index = 0; index < kNumTasks; ++index) {
629 EXPECT_TRUE(processes[index].WaitForExit(&exit_code));
630 EXPECT_EQ(0, exit_code);
633 // Check that the shared memory region reflects |kNumTasks| increments.
634 ASSERT_EQ(kNumTasks, *ptr);
636 memory.Close();
637 SharedMemoryProcessTest::CleanUp();
640 MULTIPROCESS_TEST_MAIN(SharedMemoryTestMain) {
641 return SharedMemoryProcessTest::TaskTestMain();
643 #endif // !defined(OS_IOS) && !defined(OS_ANDROID) && !defined(OS_MACOSX)
645 } // namespace base