Bug 1632310 [wpt PR 23186] - Add test for computed versus resolved style., a=testonly
[gecko.git] / gfx / layers / wr / IpcResourceUpdateQueue.cpp
blob39534db2a3761ff9e0e0975bc8224dab0bbe6b08
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "IpcResourceUpdateQueue.h"
8 #include <string.h>
9 #include <algorithm>
10 #include "mozilla/Maybe.h"
11 #include "mozilla/ipc/SharedMemory.h"
12 #include "mozilla/layers/PTextureChild.h"
13 #include "mozilla/layers/WebRenderBridgeChild.h"
15 namespace mozilla {
16 namespace wr {
18 using namespace mozilla::layers;
20 ShmSegmentsWriter::ShmSegmentsWriter(layers::WebRenderBridgeChild* aAllocator,
21 size_t aChunkSize)
22 : mShmAllocator(aAllocator), mCursor(0), mChunkSize(aChunkSize) {
23 MOZ_ASSERT(mShmAllocator);
26 ShmSegmentsWriter::~ShmSegmentsWriter() { Clear(); }
28 ShmSegmentsWriter::ShmSegmentsWriter(ShmSegmentsWriter&& aOther) noexcept
29 : mSmallAllocs(std::move(aOther.mSmallAllocs)),
30 mLargeAllocs(std::move(aOther.mLargeAllocs)),
31 mShmAllocator(aOther.mShmAllocator),
32 mCursor(aOther.mCursor),
33 mChunkSize(aOther.mChunkSize) {
34 aOther.mCursor = 0;
37 ShmSegmentsWriter& ShmSegmentsWriter::operator=(
38 ShmSegmentsWriter&& aOther) noexcept {
39 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
40 Clear();
41 mSmallAllocs = std::move(aOther.mSmallAllocs);
42 mLargeAllocs = std::move(aOther.mLargeAllocs);
43 mShmAllocator = aOther.mShmAllocator;
44 mCursor = aOther.mCursor;
45 mChunkSize = aOther.mChunkSize;
46 aOther.mCursor = 0;
47 return *this;
50 layers::OffsetRange ShmSegmentsWriter::Write(Range<uint8_t> aBytes) {
51 const size_t start = mCursor;
52 const size_t length = aBytes.length();
54 if (length >= mChunkSize * 4) {
55 auto range = AllocLargeChunk(length);
56 if (range.length()) {
57 // Allocation was successful
58 uint8_t* dstPtr = mLargeAllocs.LastElement().get<uint8_t>();
59 memcpy(dstPtr, aBytes.begin().get(), length);
61 return range;
64 int remainingBytesToCopy = length;
66 size_t srcCursor = 0;
67 size_t dstCursor = mCursor;
68 size_t currAllocLen = mSmallAllocs.Length();
70 while (remainingBytesToCopy > 0) {
71 if (dstCursor >= mSmallAllocs.Length() * mChunkSize) {
72 if (!AllocChunk()) {
73 // Allocation failed, so roll back to the state at the start of this
74 // Write() call and abort.
75 for (size_t i = mSmallAllocs.Length(); currAllocLen < i; i--) {
76 MOZ_ASSERT(i > 0);
77 RefCountedShmem& shm = mSmallAllocs.ElementAt(i - 1);
78 RefCountedShm::Dealloc(mShmAllocator, shm);
79 mSmallAllocs.RemoveElementAt(i - 1);
81 MOZ_ASSERT(mSmallAllocs.Length() == currAllocLen);
82 return layers::OffsetRange(0, start, 0);
84 // Allocation succeeded, so dstCursor should now be pointing to
85 // something inside the allocation buffer
86 MOZ_ASSERT(dstCursor < (mSmallAllocs.Length() * mChunkSize));
89 const size_t dstMaxOffset = mChunkSize * mSmallAllocs.Length();
90 const size_t dstBaseOffset = mChunkSize * (mSmallAllocs.Length() - 1);
92 MOZ_ASSERT(dstCursor >= dstBaseOffset);
93 MOZ_ASSERT(dstCursor <= dstMaxOffset);
95 size_t availableRange = dstMaxOffset - dstCursor;
96 size_t copyRange = std::min<int>(availableRange, remainingBytesToCopy);
98 uint8_t* srcPtr = &aBytes[srcCursor];
99 uint8_t* dstPtr = RefCountedShm::GetBytes(mSmallAllocs.LastElement()) +
100 (dstCursor - dstBaseOffset);
102 memcpy(dstPtr, srcPtr, copyRange);
104 srcCursor += copyRange;
105 dstCursor += copyRange;
106 remainingBytesToCopy -= copyRange;
108 // sanity check
109 MOZ_ASSERT(remainingBytesToCopy >= 0);
112 mCursor += length;
114 return layers::OffsetRange(0, start, length);
117 bool ShmSegmentsWriter::AllocChunk() {
118 RefCountedShmem shm;
119 if (!mShmAllocator->AllocResourceShmem(mChunkSize, shm)) {
120 gfxCriticalNote << "ShmSegmentsWriter failed to allocate chunk #"
121 << mSmallAllocs.Length();
122 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate chunk");
123 return false;
125 RefCountedShm::AddRef(shm);
126 mSmallAllocs.AppendElement(shm);
127 return true;
130 layers::OffsetRange ShmSegmentsWriter::AllocLargeChunk(size_t aSize) {
131 ipc::Shmem shm;
132 auto shmType = ipc::SharedMemory::SharedMemoryType::TYPE_BASIC;
133 if (!mShmAllocator->AllocShmem(aSize, shmType, &shm)) {
134 gfxCriticalNote
135 << "ShmSegmentsWriter failed to allocate large chunk of size " << aSize;
136 MOZ_ASSERT(false, "ShmSegmentsWriter fails to allocate large chunk");
137 return layers::OffsetRange(0, 0, 0);
139 mLargeAllocs.AppendElement(shm);
141 return layers::OffsetRange(mLargeAllocs.Length(), 0, aSize);
144 void ShmSegmentsWriter::Flush(nsTArray<RefCountedShmem>& aSmallAllocs,
145 nsTArray<ipc::Shmem>& aLargeAllocs) {
146 MOZ_ASSERT(aSmallAllocs.IsEmpty());
147 MOZ_ASSERT(aLargeAllocs.IsEmpty());
148 mSmallAllocs.SwapElements(aSmallAllocs);
149 mLargeAllocs.SwapElements(aLargeAllocs);
150 mCursor = 0;
153 bool ShmSegmentsWriter::IsEmpty() const { return mCursor == 0; }
155 void ShmSegmentsWriter::Clear() {
156 if (mShmAllocator) {
157 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mSmallAllocs);
158 IpcResourceUpdateQueue::ReleaseShmems(mShmAllocator, mLargeAllocs);
160 mCursor = 0;
163 ShmSegmentsReader::ShmSegmentsReader(
164 const nsTArray<RefCountedShmem>& aSmallShmems,
165 const nsTArray<ipc::Shmem>& aLargeShmems)
166 : mSmallAllocs(aSmallShmems), mLargeAllocs(aLargeShmems), mChunkSize(0) {
167 if (mSmallAllocs.IsEmpty()) {
168 return;
171 mChunkSize = RefCountedShm::GetSize(mSmallAllocs[0]);
173 // Check that all shmems are readable and have the same size. If anything
174 // isn't right, set mChunkSize to zero which signifies that the reader is
175 // in an invalid state and Read calls will return false;
176 for (const auto& shm : mSmallAllocs) {
177 if (!RefCountedShm::IsValid(shm) ||
178 RefCountedShm::GetSize(shm) != mChunkSize ||
179 RefCountedShm::GetBytes(shm) == nullptr) {
180 mChunkSize = 0;
181 return;
185 for (const auto& shm : mLargeAllocs) {
186 if (!shm.IsReadable() || shm.get<uint8_t>() == nullptr) {
187 mChunkSize = 0;
188 return;
193 bool ShmSegmentsReader::ReadLarge(const layers::OffsetRange& aRange,
194 wr::Vec<uint8_t>& aInto) {
195 // source = zero is for small allocs.
196 MOZ_RELEASE_ASSERT(aRange.source() != 0);
197 if (aRange.source() > mLargeAllocs.Length()) {
198 return false;
200 size_t id = aRange.source() - 1;
201 const ipc::Shmem& shm = mLargeAllocs[id];
202 if (shm.Size<uint8_t>() < aRange.length()) {
203 return false;
206 uint8_t* srcPtr = shm.get<uint8_t>();
207 aInto.PushBytes(Range<uint8_t>(srcPtr, aRange.length()));
209 return true;
212 bool ShmSegmentsReader::Read(const layers::OffsetRange& aRange,
213 wr::Vec<uint8_t>& aInto) {
214 if (aRange.length() == 0) {
215 return true;
218 if (aRange.source() != 0) {
219 return ReadLarge(aRange, aInto);
222 if (mChunkSize == 0) {
223 return false;
226 if (aRange.start() + aRange.length() > mChunkSize * mSmallAllocs.Length()) {
227 return false;
230 size_t initialLength = aInto.Length();
232 size_t srcCursor = aRange.start();
233 int remainingBytesToCopy = aRange.length();
234 while (remainingBytesToCopy > 0) {
235 const size_t shm_idx = srcCursor / mChunkSize;
236 const size_t ptrOffset = srcCursor % mChunkSize;
237 const size_t copyRange =
238 std::min<int>(remainingBytesToCopy, mChunkSize - ptrOffset);
239 uint8_t* srcPtr =
240 RefCountedShm::GetBytes(mSmallAllocs[shm_idx]) + ptrOffset;
242 aInto.PushBytes(Range<uint8_t>(srcPtr, copyRange));
244 srcCursor += copyRange;
245 remainingBytesToCopy -= copyRange;
248 return aInto.Length() - initialLength == aRange.length();
251 IpcResourceUpdateQueue::IpcResourceUpdateQueue(
252 layers::WebRenderBridgeChild* aAllocator, wr::RenderRoot aRenderRoot,
253 size_t aChunkSize)
254 : mWriter(aAllocator, aChunkSize), mRenderRoot(aRenderRoot) {}
256 IpcResourceUpdateQueue::IpcResourceUpdateQueue(
257 IpcResourceUpdateQueue&& aOther) noexcept
258 : mWriter(std::move(aOther.mWriter)),
259 mUpdates(std::move(aOther.mUpdates)),
260 mRenderRoot(aOther.mRenderRoot) {}
262 IpcResourceUpdateQueue& IpcResourceUpdateQueue::operator=(
263 IpcResourceUpdateQueue&& aOther) noexcept {
264 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
265 mWriter = std::move(aOther.mWriter);
266 mUpdates = std::move(aOther.mUpdates);
267 mRenderRoot = aOther.mRenderRoot;
268 return *this;
271 void IpcResourceUpdateQueue::ReplaceResources(IpcResourceUpdateQueue&& aOther) {
272 MOZ_ASSERT(IsEmpty(), "Will forget existing updates!");
273 MOZ_ASSERT(!aOther.HasAnySubQueue(), "Subqueues will be lost!");
274 MOZ_ASSERT(mRenderRoot == aOther.mRenderRoot);
275 mWriter = std::move(aOther.mWriter);
276 mUpdates = std::move(aOther.mUpdates);
277 mRenderRoot = aOther.mRenderRoot;
280 bool IpcResourceUpdateQueue::AddImage(ImageKey key,
281 const ImageDescriptor& aDescriptor,
282 Range<uint8_t> aBytes) {
283 auto bytes = mWriter.Write(aBytes);
284 if (!bytes.length()) {
285 return false;
287 mUpdates.AppendElement(layers::OpAddImage(aDescriptor, bytes, 0, key));
288 return true;
291 bool IpcResourceUpdateQueue::AddBlobImage(BlobImageKey key,
292 const ImageDescriptor& aDescriptor,
293 Range<uint8_t> aBytes,
294 ImageIntRect aVisibleRect) {
295 MOZ_RELEASE_ASSERT(aDescriptor.width > 0 && aDescriptor.height > 0);
296 auto bytes = mWriter.Write(aBytes);
297 if (!bytes.length()) {
298 return false;
300 mUpdates.AppendElement(
301 layers::OpAddBlobImage(aDescriptor, bytes, aVisibleRect, 0, key));
302 return true;
305 void IpcResourceUpdateQueue::AddPrivateExternalImage(
306 wr::ExternalImageId aExtId, wr::ImageKey aKey, wr::ImageDescriptor aDesc) {
307 mUpdates.AppendElement(
308 layers::OpAddPrivateExternalImage(aExtId, aKey, aDesc));
311 void IpcResourceUpdateQueue::AddSharedExternalImage(wr::ExternalImageId aExtId,
312 wr::ImageKey aKey) {
313 mUpdates.AppendElement(layers::OpAddSharedExternalImage(aExtId, aKey));
316 void IpcResourceUpdateQueue::PushExternalImageForTexture(
317 wr::ExternalImageId aExtId, wr::ImageKey aKey,
318 layers::TextureClient* aTexture, bool aIsUpdate) {
319 MOZ_ASSERT(aTexture);
320 MOZ_ASSERT(aTexture->GetIPDLActor());
321 MOZ_RELEASE_ASSERT(aTexture->GetIPDLActor()->GetIPCChannel() ==
322 mWriter.WrBridge()->GetIPCChannel());
323 mUpdates.AppendElement(layers::OpPushExternalImageForTexture(
324 aExtId, aKey, nullptr, aTexture->GetIPDLActor(), aIsUpdate));
327 bool IpcResourceUpdateQueue::UpdateImageBuffer(
328 ImageKey aKey, const ImageDescriptor& aDescriptor, Range<uint8_t> aBytes) {
329 auto bytes = mWriter.Write(aBytes);
330 if (!bytes.length()) {
331 return false;
333 mUpdates.AppendElement(layers::OpUpdateImage(aDescriptor, bytes, aKey));
334 return true;
337 bool IpcResourceUpdateQueue::UpdateBlobImage(BlobImageKey aKey,
338 const ImageDescriptor& aDescriptor,
339 Range<uint8_t> aBytes,
340 ImageIntRect aVisibleRect,
341 ImageIntRect aDirtyRect) {
342 MOZ_ASSERT(aVisibleRect.width > 0 && aVisibleRect.height > 0);
344 auto bytes = mWriter.Write(aBytes);
345 if (!bytes.length()) {
346 return false;
348 mUpdates.AppendElement(layers::OpUpdateBlobImage(aDescriptor, bytes, aKey,
349 aVisibleRect, aDirtyRect));
350 return true;
353 void IpcResourceUpdateQueue::UpdatePrivateExternalImage(
354 wr::ExternalImageId aExtId, wr::ImageKey aKey,
355 const wr::ImageDescriptor& aDesc, ImageIntRect aDirtyRect) {
356 mUpdates.AppendElement(
357 layers::OpUpdatePrivateExternalImage(aExtId, aKey, aDesc, aDirtyRect));
360 void IpcResourceUpdateQueue::UpdateSharedExternalImage(
361 wr::ExternalImageId aExtId, wr::ImageKey aKey, ImageIntRect aDirtyRect) {
362 mUpdates.AppendElement(
363 layers::OpUpdateSharedExternalImage(aExtId, aKey, aDirtyRect));
366 void IpcResourceUpdateQueue::SetBlobImageVisibleArea(
367 wr::BlobImageKey aKey, const ImageIntRect& aArea) {
368 mUpdates.AppendElement(layers::OpSetBlobImageVisibleArea(aArea, aKey));
371 void IpcResourceUpdateQueue::DeleteImage(ImageKey aKey) {
372 mUpdates.AppendElement(layers::OpDeleteImage(aKey));
375 void IpcResourceUpdateQueue::DeleteBlobImage(BlobImageKey aKey) {
376 mUpdates.AppendElement(layers::OpDeleteBlobImage(aKey));
379 bool IpcResourceUpdateQueue::AddRawFont(wr::FontKey aKey, Range<uint8_t> aBytes,
380 uint32_t aIndex) {
381 auto bytes = mWriter.Write(aBytes);
382 if (!bytes.length()) {
383 return false;
385 mUpdates.AppendElement(layers::OpAddRawFont(bytes, aIndex, aKey));
386 return true;
389 bool IpcResourceUpdateQueue::AddFontDescriptor(wr::FontKey aKey,
390 Range<uint8_t> aBytes,
391 uint32_t aIndex) {
392 auto bytes = mWriter.Write(aBytes);
393 if (!bytes.length()) {
394 return false;
396 mUpdates.AppendElement(layers::OpAddFontDescriptor(bytes, aIndex, aKey));
397 return true;
400 void IpcResourceUpdateQueue::DeleteFont(wr::FontKey aKey) {
401 mUpdates.AppendElement(layers::OpDeleteFont(aKey));
404 void IpcResourceUpdateQueue::AddFontInstance(
405 wr::FontInstanceKey aKey, wr::FontKey aFontKey, float aGlyphSize,
406 const wr::FontInstanceOptions* aOptions,
407 const wr::FontInstancePlatformOptions* aPlatformOptions,
408 Range<const gfx::FontVariation> aVariations) {
409 auto bytes = mWriter.WriteAsBytes(aVariations);
410 mUpdates.AppendElement(layers::OpAddFontInstance(
411 aOptions ? Some(*aOptions) : Nothing(),
412 aPlatformOptions ? Some(*aPlatformOptions) : Nothing(), bytes, aKey,
413 aFontKey, aGlyphSize));
416 void IpcResourceUpdateQueue::DeleteFontInstance(wr::FontInstanceKey aKey) {
417 mUpdates.AppendElement(layers::OpDeleteFontInstance(aKey));
420 void IpcResourceUpdateQueue::Flush(
421 nsTArray<layers::OpUpdateResource>& aUpdates,
422 nsTArray<layers::RefCountedShmem>& aSmallAllocs,
423 nsTArray<ipc::Shmem>& aLargeAllocs) {
424 aUpdates.Clear();
425 mUpdates.SwapElements(aUpdates);
426 mWriter.Flush(aSmallAllocs, aLargeAllocs);
429 bool IpcResourceUpdateQueue::IsEmpty() const {
430 if (mUpdates.Length() == 0) {
431 MOZ_ASSERT(mWriter.IsEmpty());
432 return true;
434 return false;
437 void IpcResourceUpdateQueue::Clear() {
438 mWriter.Clear();
439 mUpdates.Clear();
442 // static
443 void IpcResourceUpdateQueue::ReleaseShmems(
444 ipc::IProtocol* aShmAllocator, nsTArray<layers::RefCountedShmem>& aShms) {
445 for (auto& shm : aShms) {
446 if (RefCountedShm::IsValid(shm) && RefCountedShm::Release(shm) == 0) {
447 RefCountedShm::Dealloc(aShmAllocator, shm);
450 aShms.Clear();
453 // static
454 void IpcResourceUpdateQueue::ReleaseShmems(ipc::IProtocol* aShmAllocator,
455 nsTArray<ipc::Shmem>& aShms) {
456 for (auto& shm : aShms) {
457 aShmAllocator->DeallocShmem(shm);
459 aShms.Clear();
462 } // namespace wr
463 } // namespace mozilla