1 /* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* vim: set ts=8 sts=2 et sw=2 tw=80: */
3 /* This Source Code Form is subject to the terms of the Mozilla Public
4 * License, v. 2.0. If a copy of the MPL was not distributed with this
5 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
7 #include "SharedBufferMLGPU.h"
8 #include "BufferCache.h"
14 SharedBufferMLGPU::SharedBufferMLGPU(MLGDevice
* aDevice
, MLGBufferType aType
,
18 mDefaultSize(aDefaultSize
),
19 mCanUseOffsetAllocation(true),
24 mBytesUsedThisFrame(0),
26 MOZ_COUNT_CTOR(SharedBufferMLGPU
);
29 SharedBufferMLGPU::~SharedBufferMLGPU() {
30 MOZ_COUNT_DTOR(SharedBufferMLGPU
);
34 bool SharedBufferMLGPU::Init() {
35 // If we can't use buffer offset binding, we never allocated shared buffers.
36 if (!mCanUseOffsetAllocation
) {
40 // If we can use offset binding, allocate an initial shared buffer now.
41 if (!GrowBuffer(mDefaultSize
)) {
47 void SharedBufferMLGPU::Reset() {
48 // We shouldn't be mapped here, but just in case, unmap now.
50 mBytesUsedThisFrame
= 0;
52 // If we allocated a large buffer for a particularly heavy layer tree,
53 // but have not used most of the buffer again for many frames, we
54 // discard the buffer. This is to prevent having to perform large
55 // pointless uploads after visiting a single havy page - it also
56 // lessens ping-ponging between large and small buffers.
57 if (mBuffer
&& (mBuffer
->GetSize() > mDefaultSize
* 4) &&
58 mNumSmallFrames
>= 10) {
62 // Note that we do not aggressively map a new buffer. There's no reason to,
63 // and it'd cause unnecessary uploads when painting empty frames.
66 bool SharedBufferMLGPU::EnsureMappedBuffer(size_t aBytes
) {
67 if (!mBuffer
|| (mMaxSize
- mCurrentPosition
< aBytes
)) {
68 if (!GrowBuffer(aBytes
)) {
72 if (!mMapped
&& !Map()) {
78 // We don't want to cache large buffers, since it results in larger uploads
79 // that might not be needed.
80 static const size_t kMaxCachedBufferSize
= 128 * 1024;
82 bool SharedBufferMLGPU::GrowBuffer(size_t aBytes
) {
83 // We only pre-allocate buffers if we can use offset allocation.
84 MOZ_ASSERT(mCanUseOffsetAllocation
);
86 // Unmap the previous buffer. This will retain mBuffer, but free up the
87 // address space used by its mapping.
90 size_t maybeSize
= mDefaultSize
;
92 // Try to first grow the previous allocation size.
93 maybeSize
= std::min(kMaxCachedBufferSize
, mBuffer
->GetSize() * 2);
96 size_t bytes
= std::max(aBytes
, maybeSize
);
97 mBuffer
= mDevice
->CreateBuffer(mType
, bytes
, MLGUsage::Dynamic
);
102 mCurrentPosition
= 0;
103 mMaxSize
= mBuffer
->GetSize();
107 void SharedBufferMLGPU::PrepareForUsage() {
110 if (mBytesUsedThisFrame
<= mDefaultSize
) {
117 bool SharedBufferMLGPU::Map() {
119 MOZ_ASSERT(!mMapped
);
121 if (!mDevice
->Map(mBuffer
, MLGMapType::WRITE_DISCARD
, &mMap
)) {
122 // Don't retain the buffer, it's useless if we can't map it.
127 mCurrentPosition
= 0;
132 void SharedBufferMLGPU::Unmap() {
137 mBytesUsedThisFrame
+= mCurrentPosition
;
139 mDevice
->Unmap(mBuffer
);
140 mMap
= MLGMappedResource();
144 uint8_t* SharedBufferMLGPU::GetBufferPointer(size_t aBytes
,
145 ptrdiff_t* aOutOffset
,
146 RefPtr
<MLGBuffer
>* aOutBuffer
) {
147 if (!EnsureMappedBuffer(aBytes
)) {
151 ptrdiff_t newPos
= mCurrentPosition
+ aBytes
;
152 MOZ_ASSERT(size_t(newPos
) <= mMaxSize
);
154 *aOutOffset
= mCurrentPosition
;
155 *aOutBuffer
= mBuffer
;
157 uint8_t* ptr
= reinterpret_cast<uint8_t*>(mMap
.mData
) + mCurrentPosition
;
158 mCurrentPosition
= newPos
;
162 VertexBufferSection::VertexBufferSection()
163 : mOffset(-1), mNumVertices(0), mStride(0) {}
165 void VertexBufferSection::Init(MLGBuffer
* aBuffer
, ptrdiff_t aOffset
,
166 size_t aNumVertices
, size_t aStride
) {
169 mNumVertices
= aNumVertices
;
173 ConstantBufferSection::ConstantBufferSection()
174 : mOffset(-1), mNumBytes(0), mNumItems(0) {}
176 void ConstantBufferSection::Init(MLGBuffer
* aBuffer
, ptrdiff_t aOffset
,
177 size_t aBytes
, size_t aNumItems
) {
181 mNumItems
= aNumItems
;
184 SharedVertexBuffer::SharedVertexBuffer(MLGDevice
* aDevice
, size_t aDefaultSize
)
185 : SharedBufferMLGPU(aDevice
, MLGBufferType::Vertex
, aDefaultSize
) {}
187 bool SharedVertexBuffer::Allocate(VertexBufferSection
* aHolder
,
188 size_t aNumItems
, size_t aSizeOfItem
,
190 RefPtr
<MLGBuffer
> buffer
;
192 size_t bytes
= aSizeOfItem
* aNumItems
;
193 uint8_t* ptr
= GetBufferPointer(bytes
, &offset
, &buffer
);
198 memcpy(ptr
, aData
, bytes
);
199 aHolder
->Init(buffer
, offset
, aNumItems
, aSizeOfItem
);
203 AutoBufferUploadBase::AutoBufferUploadBase() : mPtr(nullptr) {}
205 AutoBufferUploadBase::~AutoBufferUploadBase() {
211 void AutoBufferUploadBase::Init(void* aPtr
, MLGDevice
* aDevice
,
212 MLGBuffer
* aBuffer
) {
213 MOZ_ASSERT(!mPtr
&& aPtr
);
219 SharedConstantBuffer::SharedConstantBuffer(MLGDevice
* aDevice
,
221 : SharedBufferMLGPU(aDevice
, MLGBufferType::Constant
, aDefaultSize
) {
222 mMaxConstantBufferBindSize
= aDevice
->GetMaxConstantBufferBindSize();
223 mCanUseOffsetAllocation
= aDevice
->CanUseConstantBufferOffsetBinding();
226 bool SharedConstantBuffer::Allocate(ConstantBufferSection
* aHolder
,
227 AutoBufferUploadBase
* aPtr
,
228 size_t aNumItems
, size_t aSizeOfItem
) {
229 MOZ_ASSERT(aSizeOfItem
% 16 == 0, "Items must be padded to 16 bytes");
231 size_t bytes
= aNumItems
* aSizeOfItem
;
232 if (bytes
> mMaxConstantBufferBindSize
) {
234 << "Attempted to allocate too many bytes into a constant buffer";
238 RefPtr
<MLGBuffer
> buffer
;
240 if (!GetBufferPointer(aPtr
, bytes
, &offset
, &buffer
)) {
244 aHolder
->Init(buffer
, offset
, bytes
, aNumItems
);
248 uint8_t* SharedConstantBuffer::AllocateNewBuffer(
249 size_t aBytes
, ptrdiff_t* aOutOffset
, RefPtr
<MLGBuffer
>* aOutBuffer
) {
250 RefPtr
<MLGBuffer
> buffer
;
251 if (BufferCache
* cache
= mDevice
->GetConstantBufferCache()) {
252 buffer
= cache
->GetOrCreateBuffer(aBytes
);
254 buffer
= mDevice
->CreateBuffer(MLGBufferType::Constant
, aBytes
,
261 MLGMappedResource map
;
262 if (!mDevice
->Map(buffer
, MLGMapType::WRITE_DISCARD
, &map
)) {
266 // Signal that offsetting is not supported.
268 *aOutBuffer
= buffer
;
269 return reinterpret_cast<uint8_t*>(map
.mData
);
272 void AutoBufferUploadBase::UnmapBuffer() { mDevice
->Unmap(mBuffer
); }
274 } // namespace layers
275 } // namespace mozilla