Bug 1632310 [wpt PR 23186] - Add test for computed versus resolved style., a=testonly
[gecko.git] / gfx / layers / mlgpu / SharedBufferMLGPU.cpp
blobb6c9978c805412bc3ef794f1c32243f85bdcea65
1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "SharedBufferMLGPU.h"
8 #include "BufferCache.h"
9 #include "MLGDevice.h"
11 namespace mozilla {
12 namespace layers {
14 SharedBufferMLGPU::SharedBufferMLGPU(MLGDevice* aDevice, MLGBufferType aType,
15 size_t aDefaultSize)
16 : mDevice(aDevice),
17 mType(aType),
18 mDefaultSize(aDefaultSize),
19 mCanUseOffsetAllocation(true),
20 mCurrentPosition(0),
21 mMaxSize(0),
22 mMap(),
23 mMapped(false),
24 mBytesUsedThisFrame(0),
25 mNumSmallFrames(0) {
26 MOZ_COUNT_CTOR(SharedBufferMLGPU);
29 SharedBufferMLGPU::~SharedBufferMLGPU() {
30 MOZ_COUNT_DTOR(SharedBufferMLGPU);
31 Unmap();
34 bool SharedBufferMLGPU::Init() {
35 // If we can't use buffer offset binding, we never allocated shared buffers.
36 if (!mCanUseOffsetAllocation) {
37 return true;
40 // If we can use offset binding, allocate an initial shared buffer now.
41 if (!GrowBuffer(mDefaultSize)) {
42 return false;
44 return true;
47 void SharedBufferMLGPU::Reset() {
48 // We shouldn't be mapped here, but just in case, unmap now.
49 Unmap();
50 mBytesUsedThisFrame = 0;
52 // If we allocated a large buffer for a particularly heavy layer tree,
53 // but have not used most of the buffer again for many frames, we
54 // discard the buffer. This is to prevent having to perform large
55 // pointless uploads after visiting a single havy page - it also
56 // lessens ping-ponging between large and small buffers.
57 if (mBuffer && (mBuffer->GetSize() > mDefaultSize * 4) &&
58 mNumSmallFrames >= 10) {
59 mBuffer = nullptr;
62 // Note that we do not aggressively map a new buffer. There's no reason to,
63 // and it'd cause unnecessary uploads when painting empty frames.
66 bool SharedBufferMLGPU::EnsureMappedBuffer(size_t aBytes) {
67 if (!mBuffer || (mMaxSize - mCurrentPosition < aBytes)) {
68 if (!GrowBuffer(aBytes)) {
69 return false;
72 if (!mMapped && !Map()) {
73 return false;
75 return true;
78 // We don't want to cache large buffers, since it results in larger uploads
79 // that might not be needed.
80 static const size_t kMaxCachedBufferSize = 128 * 1024;
82 bool SharedBufferMLGPU::GrowBuffer(size_t aBytes) {
83 // We only pre-allocate buffers if we can use offset allocation.
84 MOZ_ASSERT(mCanUseOffsetAllocation);
86 // Unmap the previous buffer. This will retain mBuffer, but free up the
87 // address space used by its mapping.
88 Unmap();
90 size_t maybeSize = mDefaultSize;
91 if (mBuffer) {
92 // Try to first grow the previous allocation size.
93 maybeSize = std::min(kMaxCachedBufferSize, mBuffer->GetSize() * 2);
96 size_t bytes = std::max(aBytes, maybeSize);
97 mBuffer = mDevice->CreateBuffer(mType, bytes, MLGUsage::Dynamic);
98 if (!mBuffer) {
99 return false;
102 mCurrentPosition = 0;
103 mMaxSize = mBuffer->GetSize();
104 return true;
107 void SharedBufferMLGPU::PrepareForUsage() {
108 Unmap();
110 if (mBytesUsedThisFrame <= mDefaultSize) {
111 mNumSmallFrames++;
112 } else {
113 mNumSmallFrames = 0;
117 bool SharedBufferMLGPU::Map() {
118 MOZ_ASSERT(mBuffer);
119 MOZ_ASSERT(!mMapped);
121 if (!mDevice->Map(mBuffer, MLGMapType::WRITE_DISCARD, &mMap)) {
122 // Don't retain the buffer, it's useless if we can't map it.
123 mBuffer = nullptr;
124 return false;
127 mCurrentPosition = 0;
128 mMapped = true;
129 return true;
132 void SharedBufferMLGPU::Unmap() {
133 if (!mMapped) {
134 return;
137 mBytesUsedThisFrame += mCurrentPosition;
139 mDevice->Unmap(mBuffer);
140 mMap = MLGMappedResource();
141 mMapped = false;
144 uint8_t* SharedBufferMLGPU::GetBufferPointer(size_t aBytes,
145 ptrdiff_t* aOutOffset,
146 RefPtr<MLGBuffer>* aOutBuffer) {
147 if (!EnsureMappedBuffer(aBytes)) {
148 return nullptr;
151 ptrdiff_t newPos = mCurrentPosition + aBytes;
152 MOZ_ASSERT(size_t(newPos) <= mMaxSize);
154 *aOutOffset = mCurrentPosition;
155 *aOutBuffer = mBuffer;
157 uint8_t* ptr = reinterpret_cast<uint8_t*>(mMap.mData) + mCurrentPosition;
158 mCurrentPosition = newPos;
159 return ptr;
162 VertexBufferSection::VertexBufferSection()
163 : mOffset(-1), mNumVertices(0), mStride(0) {}
165 void VertexBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
166 size_t aNumVertices, size_t aStride) {
167 mBuffer = aBuffer;
168 mOffset = aOffset;
169 mNumVertices = aNumVertices;
170 mStride = aStride;
173 ConstantBufferSection::ConstantBufferSection()
174 : mOffset(-1), mNumBytes(0), mNumItems(0) {}
176 void ConstantBufferSection::Init(MLGBuffer* aBuffer, ptrdiff_t aOffset,
177 size_t aBytes, size_t aNumItems) {
178 mBuffer = aBuffer;
179 mOffset = aOffset;
180 mNumBytes = aBytes;
181 mNumItems = aNumItems;
184 SharedVertexBuffer::SharedVertexBuffer(MLGDevice* aDevice, size_t aDefaultSize)
185 : SharedBufferMLGPU(aDevice, MLGBufferType::Vertex, aDefaultSize) {}
187 bool SharedVertexBuffer::Allocate(VertexBufferSection* aHolder,
188 size_t aNumItems, size_t aSizeOfItem,
189 const void* aData) {
190 RefPtr<MLGBuffer> buffer;
191 ptrdiff_t offset;
192 size_t bytes = aSizeOfItem * aNumItems;
193 uint8_t* ptr = GetBufferPointer(bytes, &offset, &buffer);
194 if (!ptr) {
195 return false;
198 memcpy(ptr, aData, bytes);
199 aHolder->Init(buffer, offset, aNumItems, aSizeOfItem);
200 return true;
203 AutoBufferUploadBase::AutoBufferUploadBase() : mPtr(nullptr) {}
205 AutoBufferUploadBase::~AutoBufferUploadBase() {
206 if (mBuffer) {
207 UnmapBuffer();
211 void AutoBufferUploadBase::Init(void* aPtr, MLGDevice* aDevice,
212 MLGBuffer* aBuffer) {
213 MOZ_ASSERT(!mPtr && aPtr);
214 mPtr = aPtr;
215 mDevice = aDevice;
216 mBuffer = aBuffer;
219 SharedConstantBuffer::SharedConstantBuffer(MLGDevice* aDevice,
220 size_t aDefaultSize)
221 : SharedBufferMLGPU(aDevice, MLGBufferType::Constant, aDefaultSize) {
222 mMaxConstantBufferBindSize = aDevice->GetMaxConstantBufferBindSize();
223 mCanUseOffsetAllocation = aDevice->CanUseConstantBufferOffsetBinding();
226 bool SharedConstantBuffer::Allocate(ConstantBufferSection* aHolder,
227 AutoBufferUploadBase* aPtr,
228 size_t aNumItems, size_t aSizeOfItem) {
229 MOZ_ASSERT(aSizeOfItem % 16 == 0, "Items must be padded to 16 bytes");
231 size_t bytes = aNumItems * aSizeOfItem;
232 if (bytes > mMaxConstantBufferBindSize) {
233 gfxWarning()
234 << "Attempted to allocate too many bytes into a constant buffer";
235 return false;
238 RefPtr<MLGBuffer> buffer;
239 ptrdiff_t offset;
240 if (!GetBufferPointer(aPtr, bytes, &offset, &buffer)) {
241 return false;
244 aHolder->Init(buffer, offset, bytes, aNumItems);
245 return true;
248 uint8_t* SharedConstantBuffer::AllocateNewBuffer(
249 size_t aBytes, ptrdiff_t* aOutOffset, RefPtr<MLGBuffer>* aOutBuffer) {
250 RefPtr<MLGBuffer> buffer;
251 if (BufferCache* cache = mDevice->GetConstantBufferCache()) {
252 buffer = cache->GetOrCreateBuffer(aBytes);
253 } else {
254 buffer = mDevice->CreateBuffer(MLGBufferType::Constant, aBytes,
255 MLGUsage::Dynamic);
257 if (!buffer) {
258 return nullptr;
261 MLGMappedResource map;
262 if (!mDevice->Map(buffer, MLGMapType::WRITE_DISCARD, &map)) {
263 return nullptr;
266 // Signal that offsetting is not supported.
267 *aOutOffset = -1;
268 *aOutBuffer = buffer;
269 return reinterpret_cast<uint8_t*>(map.mData);
272 void AutoBufferUploadBase::UnmapBuffer() { mDevice->Unmap(mBuffer); }
274 } // namespace layers
275 } // namespace mozilla