1 /* Copyright (C) 2022 Wildfire Games.
2 * This file is part of 0 A.D.
4 * 0 A.D. is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version.
9 * 0 A.D. is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
18 #include "precompiled.h"
20 #include "VertexBuffer.h"
22 #include "lib/sysdep/cpu.h"
23 #include "ps/CLogger.h"
24 #include "ps/Errors.h"
25 #include "ps/VideoMode.h"
26 #include "renderer/backend/IDevice.h"
27 #include "renderer/Renderer.h"
33 // Absolute maximum (bytewise) size of each GL vertex buffer object.
34 // Make it large enough for the maximum feasible mesh size (64K vertexes,
35 // 64 bytes per vertex in InstancingModelRenderer).
36 // TODO: measure what influence this has on performance
37 constexpr std::size_t MAX_VB_SIZE_BYTES
= 4 * 1024 * 1024;
39 CVertexBuffer::CVertexBuffer(
40 const char* name
, const size_t vertexSize
,
41 const Renderer::Backend::IBuffer::Type type
, const bool dynamic
)
42 : CVertexBuffer(name
, vertexSize
, type
, dynamic
, MAX_VB_SIZE_BYTES
)
46 CVertexBuffer::CVertexBuffer(
47 const char* name
, const size_t vertexSize
,
48 const Renderer::Backend::IBuffer::Type type
, const bool dynamic
,
49 const size_t maximumBufferSize
)
50 : m_VertexSize(vertexSize
), m_HasNeededChunks(false)
52 size_t size
= maximumBufferSize
;
54 if (type
== Renderer::Backend::IBuffer::Type::VERTEX
)
56 // We want to store 16-bit indices to any vertex in a buffer, so the
57 // buffer must never be bigger than vertexSize*64K bytes since we can
58 // address at most 64K of them with 16-bit indices
59 size
= std::min(size
, vertexSize
* 65536);
61 else if (type
== Renderer::Backend::IBuffer::Type::INDEX
)
63 ENSURE(vertexSize
== sizeof(u16
));
66 // store max/free vertex counts
67 m_MaxVertices
= m_FreeVertices
= size
/ vertexSize
;
69 m_Buffer
= g_VideoMode
.GetBackendDevice()->CreateBuffer(
70 name
, type
, m_MaxVertices
* m_VertexSize
, dynamic
);
72 // create sole free chunk
73 VBChunk
* chunk
= new VBChunk
;
74 chunk
->m_Owner
= this;
75 chunk
->m_Count
= m_FreeVertices
;
77 m_FreeList
.emplace_back(chunk
);
80 CVertexBuffer::~CVertexBuffer()
82 // Must have released all chunks before destroying the buffer
83 ENSURE(m_AllocList
.empty());
87 for (VBChunk
* const& chunk
: m_FreeList
)
91 bool CVertexBuffer::CompatibleVertexType(
92 const size_t vertexSize
, const Renderer::Backend::IBuffer::Type type
,
93 const bool dynamic
) const
96 return type
== m_Buffer
->GetType() && dynamic
== m_Buffer
->IsDynamic() && vertexSize
== m_VertexSize
;
99 ///////////////////////////////////////////////////////////////////////////////
100 // Allocate: try to allocate a buffer of given number of vertices (each of
101 // given size), with the given type, and using the given texture - return null
102 // if no free chunks available
103 CVertexBuffer::VBChunk
* CVertexBuffer::Allocate(
104 const size_t vertexSize
, const size_t numberOfVertices
,
105 const Renderer::Backend::IBuffer::Type type
, const bool dynamic
,
108 // check this is the right kind of buffer
109 if (!CompatibleVertexType(vertexSize
, type
, dynamic
))
112 if (UseStreaming(dynamic
))
113 ENSURE(backingStore
!= nullptr);
115 // quick check there's enough vertices spare to allocate
116 if (numberOfVertices
> m_FreeVertices
)
119 // trawl free list looking for first free chunk with enough space
120 std::vector
<VBChunk
*>::iterator best_iter
= m_FreeList
.end();
121 for (std::vector
<VBChunk
*>::iterator iter
= m_FreeList
.begin(); iter
!= m_FreeList
.end(); ++iter
)
123 if (numberOfVertices
== (*iter
)->m_Count
)
128 else if (numberOfVertices
< (*iter
)->m_Count
&& (best_iter
== m_FreeList
.end() || (*best_iter
)->m_Count
< (*iter
)->m_Count
))
132 // We could not find a large enough chunk.
133 if (best_iter
== m_FreeList
.end())
136 VBChunk
* chunk
= *best_iter
;
137 m_FreeList
.erase(best_iter
);
138 m_FreeVertices
-= chunk
->m_Count
;
140 chunk
->m_BackingStore
= backingStore
;
141 chunk
->m_Dirty
= false;
142 chunk
->m_Needed
= false;
144 // split chunk into two; - allocate a new chunk using all unused vertices in the
145 // found chunk, and add it to the free list
146 if (chunk
->m_Count
> numberOfVertices
)
148 VBChunk
* newchunk
= new VBChunk
;
149 newchunk
->m_Owner
= this;
150 newchunk
->m_Count
= chunk
->m_Count
- numberOfVertices
;
151 newchunk
->m_Index
= chunk
->m_Index
+ numberOfVertices
;
152 m_FreeList
.emplace_back(newchunk
);
153 m_FreeVertices
+= newchunk
->m_Count
;
155 // resize given chunk
156 chunk
->m_Count
= numberOfVertices
;
159 // return found chunk
160 m_AllocList
.push_back(chunk
);
164 ///////////////////////////////////////////////////////////////////////////////
165 // Release: return given chunk to this buffer
166 void CVertexBuffer::Release(VBChunk
* chunk
)
168 // Update total free count before potentially modifying this chunk's count
169 m_FreeVertices
+= chunk
->m_Count
;
171 m_AllocList
.erase(std::find(m_AllocList
.begin(), m_AllocList
.end(), chunk
));
173 // Sorting O(nlogn) shouldn't be too far from O(n) by performance, because
174 // the container is partly sorted already.
176 m_FreeList
.begin(), m_FreeList
.end(),
177 [](const VBChunk
* chunk1
, const VBChunk
* chunk2
) -> bool
179 return chunk1
->m_Index
< chunk2
->m_Index
;
182 // Coalesce with any free-list items that are adjacent to this chunk;
183 // merge the found chunk with the new one, and remove the old one
185 for (std::vector
<VBChunk
*>::iterator iter
= m_FreeList
.begin(); iter
!= m_FreeList
.end();)
187 if ((*iter
)->m_Index
== chunk
->m_Index
+ chunk
->m_Count
188 || (*iter
)->m_Index
+ (*iter
)->m_Count
== chunk
->m_Index
)
190 chunk
->m_Index
= std::min(chunk
->m_Index
, (*iter
)->m_Index
);
191 chunk
->m_Count
+= (*iter
)->m_Count
;
193 iter
= m_FreeList
.erase(iter
);
194 if (!m_FreeList
.empty() && iter
!= m_FreeList
.begin())
195 iter
= std::prev(iter
);
203 m_FreeList
.emplace_back(chunk
);
206 ///////////////////////////////////////////////////////////////////////////////
207 // UpdateChunkVertices: update vertex data for given chunk
208 void CVertexBuffer::UpdateChunkVertices(VBChunk
* chunk
, void* data
)
211 if (UseStreaming(m_Buffer
->IsDynamic()))
213 // The backend buffer is now out of sync with the backing store.
214 chunk
->m_Dirty
= true;
216 // Sanity check: Make sure the caller hasn't tried to reallocate
217 // their backing store.
218 ENSURE(data
== chunk
->m_BackingStore
);
223 g_Renderer
.GetDeviceCommandContext()->UploadBufferRegion(
224 m_Buffer
.get(), data
, chunk
->m_Index
* m_VertexSize
, chunk
->m_Count
* m_VertexSize
);
228 void CVertexBuffer::UploadIfNeeded(
229 Renderer::Backend::IDeviceCommandContext
* deviceCommandContext
)
231 if (UseStreaming(m_Buffer
->IsDynamic()))
233 if (!m_HasNeededChunks
)
236 // If any chunks are out of sync with the current backend buffer, and are
237 // needed for rendering this frame, we'll need to re-upload the backend buffer.
238 bool needUpload
= false;
239 for (VBChunk
* const& chunk
: m_AllocList
)
241 if (chunk
->m_Dirty
&& chunk
->m_Needed
)
250 deviceCommandContext
->UploadBuffer(m_Buffer
.get(), [&](u8
* mappedData
)
253 // To help detect bugs where PrepareForRendering() was not called,
254 // force all not-needed data to 0, so things won't get rendered
255 // with undefined (but possibly still correct-looking) data.
256 memset(mappedData
, 0, m_MaxVertices
* m_VertexSize
);
259 // Copy only the chunks we need. (This condition is helpful when
260 // the backend buffer contains data for every unit in the world,
261 // but only a handful are visible on screen and we don't need to
262 // bother copying the rest.)
263 for (VBChunk
* const& chunk
: m_AllocList
)
265 std::memcpy(mappedData
+ chunk
->m_Index
* m_VertexSize
, chunk
->m_BackingStore
, chunk
->m_Count
* m_VertexSize
);
268 // Anything we just uploaded is clean; anything else is dirty
269 // since the rest of the backend buffer content is now undefined
270 for (VBChunk
* const& chunk
: m_AllocList
)
274 chunk
->m_Dirty
= false;
275 chunk
->m_Needed
= false;
278 chunk
->m_Dirty
= true;
283 // Reset the flags for the next phase.
284 for (VBChunk
* const& chunk
: m_AllocList
)
285 chunk
->m_Needed
= false;
288 m_HasNeededChunks
= false;
292 size_t CVertexBuffer::GetBytesReserved() const
294 return MAX_VB_SIZE_BYTES
;
297 size_t CVertexBuffer::GetBytesAllocated() const
299 return (m_MaxVertices
- m_FreeVertices
) * m_VertexSize
;
302 void CVertexBuffer::DumpStatus() const
304 debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices
));
307 for (VBChunk
* const& chunk
: m_FreeList
)
309 debug_printf("free chunk %p: size=%d\n", static_cast<void *>(chunk
), static_cast<int>(chunk
->m_Count
));
310 maxSize
= std::max(chunk
->m_Count
, maxSize
);
312 debug_printf("max size = %d\n", static_cast<int>(maxSize
));
315 bool CVertexBuffer::UseStreaming(const bool dynamic
)
320 void CVertexBuffer::PrepareForRendering(VBChunk
* chunk
)
322 chunk
->m_Needed
= true;
323 m_HasNeededChunks
= true;