Merge 'remotes/trunk'
[0ad.git] / source / renderer / VertexBuffer.cpp
blobb724c36b7e86df3c4595a2ce7fc0ca7e2103aa14
1 /* Copyright (C) 2022 Wildfire Games.
2 * This file is part of 0 A.D.
4 * 0 A.D. is free software: you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation, either version 2 of the License, or
7 * (at your option) any later version.
9 * 0 A.D. is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * You should have received a copy of the GNU General Public License
15 * along with 0 A.D. If not, see <http://www.gnu.org/licenses/>.
18 #include "precompiled.h"
20 #include "VertexBuffer.h"
22 #include "lib/sysdep/cpu.h"
23 #include "ps/CLogger.h"
24 #include "ps/Errors.h"
25 #include "ps/VideoMode.h"
26 #include "renderer/backend/IDevice.h"
27 #include "renderer/Renderer.h"
29 #include <algorithm>
30 #include <cstring>
31 #include <iterator>
33 // Absolute maximum (bytewise) size of each GL vertex buffer object.
34 // Make it large enough for the maximum feasible mesh size (64K vertexes,
35 // 64 bytes per vertex in InstancingModelRenderer).
36 // TODO: measure what influence this has on performance
37 constexpr std::size_t MAX_VB_SIZE_BYTES = 4 * 1024 * 1024;
39 CVertexBuffer::CVertexBuffer(
40 const char* name, const size_t vertexSize,
41 const Renderer::Backend::IBuffer::Type type, const bool dynamic)
42 : CVertexBuffer(name, vertexSize, type, dynamic, MAX_VB_SIZE_BYTES)
46 CVertexBuffer::CVertexBuffer(
47 const char* name, const size_t vertexSize,
48 const Renderer::Backend::IBuffer::Type type, const bool dynamic,
49 const size_t maximumBufferSize)
50 : m_VertexSize(vertexSize), m_HasNeededChunks(false)
52 size_t size = maximumBufferSize;
54 if (type == Renderer::Backend::IBuffer::Type::VERTEX)
56 // We want to store 16-bit indices to any vertex in a buffer, so the
57 // buffer must never be bigger than vertexSize*64K bytes since we can
58 // address at most 64K of them with 16-bit indices
59 size = std::min(size, vertexSize * 65536);
61 else if (type == Renderer::Backend::IBuffer::Type::INDEX)
63 ENSURE(vertexSize == sizeof(u16));
66 // store max/free vertex counts
67 m_MaxVertices = m_FreeVertices = size / vertexSize;
69 m_Buffer = g_VideoMode.GetBackendDevice()->CreateBuffer(
70 name, type, m_MaxVertices * m_VertexSize, dynamic);
72 // create sole free chunk
73 VBChunk* chunk = new VBChunk;
74 chunk->m_Owner = this;
75 chunk->m_Count = m_FreeVertices;
76 chunk->m_Index = 0;
77 m_FreeList.emplace_back(chunk);
80 CVertexBuffer::~CVertexBuffer()
82 // Must have released all chunks before destroying the buffer
83 ENSURE(m_AllocList.empty());
85 m_Buffer.reset();
87 for (VBChunk* const& chunk : m_FreeList)
88 delete chunk;
91 bool CVertexBuffer::CompatibleVertexType(
92 const size_t vertexSize, const Renderer::Backend::IBuffer::Type type,
93 const bool dynamic) const
95 ENSURE(m_Buffer);
96 return type == m_Buffer->GetType() && dynamic == m_Buffer->IsDynamic() && vertexSize == m_VertexSize;
99 ///////////////////////////////////////////////////////////////////////////////
100 // Allocate: try to allocate a buffer of given number of vertices (each of
101 // given size), with the given type, and using the given texture - return null
102 // if no free chunks available
103 CVertexBuffer::VBChunk* CVertexBuffer::Allocate(
104 const size_t vertexSize, const size_t numberOfVertices,
105 const Renderer::Backend::IBuffer::Type type, const bool dynamic,
106 void* backingStore)
108 // check this is the right kind of buffer
109 if (!CompatibleVertexType(vertexSize, type, dynamic))
110 return nullptr;
112 if (UseStreaming(dynamic))
113 ENSURE(backingStore != nullptr);
115 // quick check there's enough vertices spare to allocate
116 if (numberOfVertices > m_FreeVertices)
117 return nullptr;
119 // trawl free list looking for first free chunk with enough space
120 std::vector<VBChunk*>::iterator best_iter = m_FreeList.end();
121 for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end(); ++iter)
123 if (numberOfVertices == (*iter)->m_Count)
125 best_iter = iter;
126 break;
128 else if (numberOfVertices < (*iter)->m_Count && (best_iter == m_FreeList.end() || (*best_iter)->m_Count < (*iter)->m_Count))
129 best_iter = iter;
132 // We could not find a large enough chunk.
133 if (best_iter == m_FreeList.end())
134 return nullptr;
136 VBChunk* chunk = *best_iter;
137 m_FreeList.erase(best_iter);
138 m_FreeVertices -= chunk->m_Count;
140 chunk->m_BackingStore = backingStore;
141 chunk->m_Dirty = false;
142 chunk->m_Needed = false;
144 // split chunk into two; - allocate a new chunk using all unused vertices in the
145 // found chunk, and add it to the free list
146 if (chunk->m_Count > numberOfVertices)
148 VBChunk* newchunk = new VBChunk;
149 newchunk->m_Owner = this;
150 newchunk->m_Count = chunk->m_Count - numberOfVertices;
151 newchunk->m_Index = chunk->m_Index + numberOfVertices;
152 m_FreeList.emplace_back(newchunk);
153 m_FreeVertices += newchunk->m_Count;
155 // resize given chunk
156 chunk->m_Count = numberOfVertices;
159 // return found chunk
160 m_AllocList.push_back(chunk);
161 return chunk;
164 ///////////////////////////////////////////////////////////////////////////////
165 // Release: return given chunk to this buffer
166 void CVertexBuffer::Release(VBChunk* chunk)
168 // Update total free count before potentially modifying this chunk's count
169 m_FreeVertices += chunk->m_Count;
171 m_AllocList.erase(std::find(m_AllocList.begin(), m_AllocList.end(), chunk));
173 // Sorting O(nlogn) shouldn't be too far from O(n) by performance, because
174 // the container is partly sorted already.
175 std::sort(
176 m_FreeList.begin(), m_FreeList.end(),
177 [](const VBChunk* chunk1, const VBChunk* chunk2) -> bool
179 return chunk1->m_Index < chunk2->m_Index;
182 // Coalesce with any free-list items that are adjacent to this chunk;
183 // merge the found chunk with the new one, and remove the old one
184 // from the list.
185 for (std::vector<VBChunk*>::iterator iter = m_FreeList.begin(); iter != m_FreeList.end();)
187 if ((*iter)->m_Index == chunk->m_Index + chunk->m_Count
188 || (*iter)->m_Index + (*iter)->m_Count == chunk->m_Index)
190 chunk->m_Index = std::min(chunk->m_Index, (*iter)->m_Index);
191 chunk->m_Count += (*iter)->m_Count;
192 delete *iter;
193 iter = m_FreeList.erase(iter);
194 if (!m_FreeList.empty() && iter != m_FreeList.begin())
195 iter = std::prev(iter);
197 else
199 ++iter;
203 m_FreeList.emplace_back(chunk);
206 ///////////////////////////////////////////////////////////////////////////////
207 // UpdateChunkVertices: update vertex data for given chunk
208 void CVertexBuffer::UpdateChunkVertices(VBChunk* chunk, void* data)
210 ENSURE(m_Buffer);
211 if (UseStreaming(m_Buffer->IsDynamic()))
213 // The backend buffer is now out of sync with the backing store.
214 chunk->m_Dirty = true;
216 // Sanity check: Make sure the caller hasn't tried to reallocate
217 // their backing store.
218 ENSURE(data == chunk->m_BackingStore);
220 else
222 ENSURE(data);
223 g_Renderer.GetDeviceCommandContext()->UploadBufferRegion(
224 m_Buffer.get(), data, chunk->m_Index * m_VertexSize, chunk->m_Count * m_VertexSize);
228 void CVertexBuffer::UploadIfNeeded(
229 Renderer::Backend::IDeviceCommandContext* deviceCommandContext)
231 if (UseStreaming(m_Buffer->IsDynamic()))
233 if (!m_HasNeededChunks)
234 return;
236 // If any chunks are out of sync with the current backend buffer, and are
237 // needed for rendering this frame, we'll need to re-upload the backend buffer.
238 bool needUpload = false;
239 for (VBChunk* const& chunk : m_AllocList)
241 if (chunk->m_Dirty && chunk->m_Needed)
243 needUpload = true;
244 break;
248 if (needUpload)
250 deviceCommandContext->UploadBuffer(m_Buffer.get(), [&](u8* mappedData)
252 #ifndef NDEBUG
253 // To help detect bugs where PrepareForRendering() was not called,
254 // force all not-needed data to 0, so things won't get rendered
255 // with undefined (but possibly still correct-looking) data.
256 memset(mappedData, 0, m_MaxVertices * m_VertexSize);
257 #endif
259 // Copy only the chunks we need. (This condition is helpful when
260 // the backend buffer contains data for every unit in the world,
261 // but only a handful are visible on screen and we don't need to
262 // bother copying the rest.)
263 for (VBChunk* const& chunk : m_AllocList)
264 if (chunk->m_Needed)
265 std::memcpy(mappedData + chunk->m_Index * m_VertexSize, chunk->m_BackingStore, chunk->m_Count * m_VertexSize);
268 // Anything we just uploaded is clean; anything else is dirty
269 // since the rest of the backend buffer content is now undefined
270 for (VBChunk* const& chunk : m_AllocList)
272 if (chunk->m_Needed)
274 chunk->m_Dirty = false;
275 chunk->m_Needed = false;
277 else
278 chunk->m_Dirty = true;
281 else
283 // Reset the flags for the next phase.
284 for (VBChunk* const& chunk : m_AllocList)
285 chunk->m_Needed = false;
288 m_HasNeededChunks = false;
292 size_t CVertexBuffer::GetBytesReserved() const
294 return MAX_VB_SIZE_BYTES;
297 size_t CVertexBuffer::GetBytesAllocated() const
299 return (m_MaxVertices - m_FreeVertices) * m_VertexSize;
302 void CVertexBuffer::DumpStatus() const
304 debug_printf("freeverts = %d\n", static_cast<int>(m_FreeVertices));
306 size_t maxSize = 0;
307 for (VBChunk* const& chunk : m_FreeList)
309 debug_printf("free chunk %p: size=%d\n", static_cast<void *>(chunk), static_cast<int>(chunk->m_Count));
310 maxSize = std::max(chunk->m_Count, maxSize);
312 debug_printf("max size = %d\n", static_cast<int>(maxSize));
315 bool CVertexBuffer::UseStreaming(const bool dynamic)
317 return dynamic;
320 void CVertexBuffer::PrepareForRendering(VBChunk* chunk)
322 chunk->m_Needed = true;
323 m_HasNeededChunks = true;