Big buffer refactor
Replaced RenderBuffer class, replaced AbstractBuffer by Buffer
This commit is contained in:
parent
754a0016c7
commit
29786765c6
|
|
@ -80,7 +80,6 @@ int main()
|
|||
Nz::RenderWindow window;
|
||||
|
||||
Nz::MeshParams meshParams;
|
||||
meshParams.storage = Nz::DataStorage::Software;
|
||||
meshParams.center = true;
|
||||
meshParams.matrix = Nz::Matrix4f::Rotate(Nz::EulerAnglesf(0.f, 90.f, 0.f)) * Nz::Matrix4f::Scale(Nz::Vector3f(0.002f));
|
||||
meshParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_Normal_UV);
|
||||
|
|
@ -110,7 +109,6 @@ int main()
|
|||
|
||||
// Plane
|
||||
Nz::MeshParams meshPrimitiveParams;
|
||||
meshPrimitiveParams.storage = Nz::DataStorage::Software;
|
||||
meshPrimitiveParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_Normal_UV);
|
||||
|
||||
std::shared_ptr<Nz::Mesh> planeMesh = std::make_shared<Nz::Mesh>();
|
||||
|
|
@ -320,9 +318,7 @@ int main()
|
|||
|
||||
constexpr std::size_t MaxPointLight = 2000;
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> lightUbo = device->InstantiateBuffer(Nz::BufferType::Uniform);
|
||||
if (!lightUbo->Initialize(MaxPointLight * alignedSpotLightSize, Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic))
|
||||
return __LINE__;
|
||||
std::shared_ptr<Nz::RenderBuffer> lightUbo = device->InstantiateBuffer(Nz::BufferType::Uniform, MaxPointLight * alignedSpotLightSize, Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic | Nz::BufferUsage::Write);
|
||||
|
||||
std::vector<SpotLight> spotLights;
|
||||
/*auto& firstSpot = spotLights.emplace_back();
|
||||
|
|
@ -403,7 +399,7 @@ int main()
|
|||
std::vector<std::shared_ptr<Nz::ShaderBinding>> gaussianBlurShaderBinding(BloomSubdivisionCount * 2);
|
||||
|
||||
std::vector<Nz::UInt8> gaussianBlurData(gaussianBlurDataOffsets.GetSize());
|
||||
std::vector<std::shared_ptr<Nz::AbstractBuffer>> gaussianBlurUbos;
|
||||
std::vector<std::shared_ptr<Nz::RenderBuffer>> gaussianBlurUbos;
|
||||
|
||||
float sizeFactor = 2.f;
|
||||
for (std::size_t i = 0; i < BloomSubdivisionCount; ++i)
|
||||
|
|
@ -411,19 +407,11 @@ int main()
|
|||
Nz::AccessByOffset<Nz::Vector2f&>(gaussianBlurData.data(), gaussianBlurDataDirection) = Nz::Vector2f(1.f, 0.f);
|
||||
Nz::AccessByOffset<float&>(gaussianBlurData.data(), gaussianBlurDataSize) = sizeFactor;
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> horizontalBlurData = device->InstantiateBuffer(Nz::BufferType::Uniform);
|
||||
if (!horizontalBlurData->Initialize(gaussianBlurDataOffsets.GetSize(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic))
|
||||
return __LINE__;
|
||||
|
||||
horizontalBlurData->Fill(gaussianBlurData.data(), 0, gaussianBlurDataOffsets.GetSize());
|
||||
std::shared_ptr<Nz::RenderBuffer> horizontalBlurData = device->InstantiateBuffer(Nz::BufferType::Uniform, gaussianBlurDataOffsets.GetSize(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic | Nz::BufferUsage::Write, gaussianBlurData.data());
|
||||
|
||||
Nz::AccessByOffset<Nz::Vector2f&>(gaussianBlurData.data(), gaussianBlurDataDirection) = Nz::Vector2f(0.f, 1.f);
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> verticalBlurData = device->InstantiateBuffer(Nz::BufferType::Uniform);
|
||||
if (!verticalBlurData->Initialize(gaussianBlurDataOffsets.GetSize(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic))
|
||||
return __LINE__;
|
||||
|
||||
verticalBlurData->Fill(gaussianBlurData.data(), 0, gaussianBlurDataOffsets.GetSize());
|
||||
std::shared_ptr<Nz::RenderBuffer> verticalBlurData = device->InstantiateBuffer(Nz::BufferType::Uniform, gaussianBlurDataOffsets.GetSize(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic | Nz::BufferUsage::Write, gaussianBlurData.data());
|
||||
|
||||
sizeFactor *= 2.f;
|
||||
|
||||
|
|
@ -555,9 +543,7 @@ int main()
|
|||
Nz::AccessByOffset<float&>(godRaysData.data(), gr_weightOffset) = 5.65f;
|
||||
Nz::AccessByOffset<Nz::Vector2f&>(godRaysData.data(), gr_lightPositionOffset) = Nz::Vector2f(0.5f, 0.1f);
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> godRaysUBO = device->InstantiateBuffer(Nz::BufferType::Uniform);
|
||||
godRaysUBO->Initialize(godRaysData.size(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic);
|
||||
godRaysUBO->Fill(godRaysData.data(), 0, godRaysData.size());
|
||||
std::shared_ptr<Nz::RenderBuffer> godRaysUBO = device->InstantiateBuffer(Nz::BufferType::Uniform, godRaysData.size(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic | Nz::BufferUsage::Write, godRaysData.data());
|
||||
|
||||
std::shared_ptr<Nz::ShaderBinding> godRaysBlitShaderBinding;
|
||||
|
||||
|
|
@ -652,12 +638,7 @@ int main()
|
|||
}
|
||||
};*/
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> fullscreenVertexBuffer = device->InstantiateBuffer(Nz::BufferType::Vertex);
|
||||
if (!fullscreenVertexBuffer->Initialize(fullscreenVertexDeclaration->GetStride() * vertexData.size(), Nz::BufferUsage::DeviceLocal))
|
||||
return __LINE__;
|
||||
|
||||
if (!fullscreenVertexBuffer->Fill(vertexData.data(), 0, fullscreenVertexBuffer->GetSize()))
|
||||
return __LINE__;
|
||||
std::shared_ptr<Nz::RenderBuffer> fullscreenVertexBuffer = device->InstantiateBuffer(Nz::BufferType::Vertex, fullscreenVertexDeclaration->GetStride() * vertexData.size(), Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Write, vertexData.data());
|
||||
|
||||
std::shared_ptr<Nz::ShaderBinding> bloomSkipBlit;
|
||||
std::shared_ptr<Nz::ShaderBinding> finalBlitBinding;
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ int main()
|
|||
|
||||
Nz::MeshParams meshParams;
|
||||
meshParams.center = true;
|
||||
meshParams.storage = Nz::DataStorage::Software;
|
||||
meshParams.matrix = Nz::Matrix4f::Rotate(Nz::EulerAnglesf(0.f, -90.f, 0.f)) * Nz::Matrix4f::Scale(Nz::Vector3f(0.002f));
|
||||
meshParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_UV);
|
||||
|
||||
|
|
@ -76,12 +75,14 @@ int main()
|
|||
basicMat.SetDiffuseMap(Nz::Texture::LoadFromFile(resourceDir / "Spaceship/Texture/diffuse.png", texParams));
|
||||
|
||||
Nz::Model model(std::move(gfxMesh), spaceshipMesh->GetAABB());
|
||||
model.UpdateScissorBox(Nz::Recti(0, 0, 1920, 1080));
|
||||
for (std::size_t i = 0; i < model.GetSubMeshCount(); ++i)
|
||||
model.SetMaterial(i, material);
|
||||
|
||||
Nz::Vector2ui windowSize = window.GetSize();
|
||||
|
||||
Nz::Camera camera(window.GetRenderTarget());
|
||||
//camera.UpdateClearColor(Nz::Color::Gray);
|
||||
|
||||
Nz::ViewerInstance& viewerInstance = camera.GetViewerInstance();
|
||||
viewerInstance.UpdateTargetSize(Nz::Vector2f(window.GetSize()));
|
||||
|
|
|
|||
|
|
@ -53,7 +53,6 @@ int main()
|
|||
|
||||
Nz::MeshParams meshParams;
|
||||
meshParams.center = true;
|
||||
meshParams.storage = Nz::DataStorage::Software;
|
||||
meshParams.matrix = Nz::Matrix4f::Rotate(Nz::EulerAnglesf(0.f, 90.f, 0.f)) * Nz::Matrix4f::Scale(Nz::Vector3f(0.002f));
|
||||
meshParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_UV);
|
||||
|
||||
|
|
@ -136,7 +135,7 @@ int main()
|
|||
|
||||
Nz::Vector2ui windowSize = window.GetSize();
|
||||
|
||||
Nz::VertexMapper vertexMapper(*spaceshipMesh->GetSubMesh(0), Nz::BufferAccess::ReadOnly);
|
||||
Nz::VertexMapper vertexMapper(*spaceshipMesh->GetSubMesh(0));
|
||||
Nz::SparsePtr<Nz::Vector3f> vertices = vertexMapper.GetComponentPtr<Nz::Vector3f>(Nz::VertexComponent::Position);
|
||||
|
||||
entt::registry registry;
|
||||
|
|
@ -165,6 +164,7 @@ int main()
|
|||
{
|
||||
registry.emplace<Nz::NodeComponent>(viewer2D);
|
||||
auto& cameraComponent = registry.emplace<Nz::CameraComponent>(viewer2D, window.GetRenderTarget(), Nz::ProjectionType::Orthographic);
|
||||
cameraComponent.UpdateClearColor(Nz::Color(0, 0, 0, 0));
|
||||
cameraComponent.UpdateRenderOrder(1);
|
||||
cameraComponent.UpdateRenderMask(2);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -92,15 +92,10 @@ int main()
|
|||
|
||||
Nz::Modules<Nz::Renderer> nazara(rendererConfig);
|
||||
|
||||
Nz::RenderWindow window;
|
||||
|
||||
Nz::MeshParams meshParams;
|
||||
meshParams.center = true;
|
||||
meshParams.matrix = Nz::Matrix4f::Rotate(Nz::EulerAnglesf(0.f, -90.f, 0.f)) * Nz::Matrix4f::Scale(Nz::Vector3f(0.002f));
|
||||
meshParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_Normal_UV);
|
||||
|
||||
std::shared_ptr<Nz::RenderDevice> device = Nz::Renderer::Instance()->InstanciateRenderDevice(0);
|
||||
|
||||
Nz::RenderWindow window;
|
||||
|
||||
std::string windowTitle = "Render Test";
|
||||
if (!window.Create(device, Nz::VideoMode(800, 600, 32), windowTitle))
|
||||
{
|
||||
|
|
@ -118,6 +113,12 @@ int main()
|
|||
return __LINE__;
|
||||
}
|
||||
|
||||
Nz::MeshParams meshParams;
|
||||
meshParams.bufferFactory = GetRenderBufferFactory(device);
|
||||
meshParams.center = true;
|
||||
meshParams.matrix = Nz::Matrix4f::Rotate(Nz::EulerAnglesf(0.f, -90.f, 0.f)) * Nz::Matrix4f::Scale(Nz::Vector3f(0.002f));
|
||||
meshParams.vertexDeclaration = Nz::VertexDeclaration::Get(Nz::VertexLayout::XYZ_Normal_UV);
|
||||
|
||||
std::shared_ptr<Nz::Mesh> drfreak = Nz::Mesh::LoadFromFile(resourceDir / "Spaceship/spaceship.obj", meshParams);
|
||||
if (!drfreak)
|
||||
{
|
||||
|
|
@ -180,12 +181,7 @@ int main()
|
|||
Nz::ShaderBindingPtr viewerShaderBinding = basePipelineLayout->AllocateShaderBinding(0);
|
||||
Nz::ShaderBindingPtr textureShaderBinding = renderPipelineLayout->AllocateShaderBinding(1);
|
||||
|
||||
std::shared_ptr<Nz::AbstractBuffer> uniformBuffer = device->InstantiateBuffer(Nz::BufferType::Uniform);
|
||||
if (!uniformBuffer->Initialize(uniformSize, Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic))
|
||||
{
|
||||
NazaraError("Failed to create uniform buffer");
|
||||
return __LINE__;
|
||||
}
|
||||
std::shared_ptr<Nz::RenderBuffer> uniformBuffer = device->InstantiateBuffer(Nz::BufferType::Uniform, uniformSize, Nz::BufferUsage::DeviceLocal | Nz::BufferUsage::Dynamic);
|
||||
|
||||
viewerShaderBinding->Update({
|
||||
{
|
||||
|
|
@ -212,33 +208,18 @@ int main()
|
|||
pipelineInfo.depthBuffer = true;
|
||||
pipelineInfo.shaderModules.emplace_back(fragVertShader);
|
||||
|
||||
auto& vertexBuffer = pipelineInfo.vertexBuffers.emplace_back();
|
||||
vertexBuffer.binding = 0;
|
||||
vertexBuffer.declaration = meshVB->GetVertexDeclaration();
|
||||
auto& pipelineVertexBuffer = pipelineInfo.vertexBuffers.emplace_back();
|
||||
pipelineVertexBuffer.binding = 0;
|
||||
pipelineVertexBuffer.declaration = meshVB->GetVertexDeclaration();
|
||||
|
||||
std::shared_ptr<Nz::RenderPipeline> pipeline = device->InstantiateRenderPipeline(pipelineInfo);
|
||||
|
||||
Nz::RenderDevice* renderDevice = window.GetRenderDevice().get();
|
||||
const std::shared_ptr<Nz::RenderDevice>& renderDevice = window.GetRenderDevice();
|
||||
|
||||
std::shared_ptr<Nz::CommandPool> commandPool = renderDevice->InstantiateCommandPool(Nz::QueueType::Graphics);
|
||||
|
||||
Nz::RenderBuffer* renderBufferIB = static_cast<Nz::RenderBuffer*>(meshIB->GetBuffer()->GetImpl());
|
||||
Nz::RenderBuffer* renderBufferVB = static_cast<Nz::RenderBuffer*>(meshVB->GetBuffer()->GetImpl());
|
||||
|
||||
if (!renderBufferIB->Synchronize(renderDevice))
|
||||
{
|
||||
NazaraError("Failed to synchronize render buffer");
|
||||
return __LINE__;
|
||||
}
|
||||
|
||||
if (!renderBufferVB->Synchronize(renderDevice))
|
||||
{
|
||||
NazaraError("Failed to synchronize render buffer");
|
||||
return __LINE__;
|
||||
}
|
||||
|
||||
Nz::AbstractBuffer* indexBufferImpl = renderBufferIB->GetHardwareBuffer(renderDevice);
|
||||
Nz::AbstractBuffer* vertexBufferImpl = renderBufferVB->GetHardwareBuffer(renderDevice);
|
||||
Nz::RenderBuffer& renderBufferIB = static_cast<Nz::RenderBuffer&>(*meshIB->GetBuffer());
|
||||
Nz::RenderBuffer& renderBufferVB = static_cast<Nz::RenderBuffer&>(*meshVB->GetBuffer());
|
||||
|
||||
Nz::Vector3f viewerPos = Nz::Vector3f::Zero();
|
||||
|
||||
|
|
@ -367,9 +348,9 @@ int main()
|
|||
{
|
||||
builder.BeginRenderPass(windowRT->GetFramebuffer(frame.GetFramebufferIndex()), windowRT->GetRenderPass(), renderRect, { clearValues[0], clearValues[1] });
|
||||
{
|
||||
builder.BindIndexBuffer(*indexBufferImpl);
|
||||
builder.BindIndexBuffer(renderBufferIB);
|
||||
builder.BindPipeline(*pipeline);
|
||||
builder.BindVertexBuffer(0, *vertexBufferImpl);
|
||||
builder.BindVertexBuffer(0, renderBufferVB);
|
||||
builder.BindShaderBinding(0, *viewerShaderBinding);
|
||||
builder.BindShaderBinding(1, *textureShaderBinding);
|
||||
|
||||
|
|
|
|||
|
|
@ -50,14 +50,14 @@ namespace Nz
|
|||
|
||||
SparsePtr& operator=(const SparsePtr& ptr) = default;
|
||||
|
||||
SparsePtr operator+(int count) const;
|
||||
SparsePtr operator+(unsigned int count) const;
|
||||
SparsePtr operator-(int count) const;
|
||||
SparsePtr operator-(unsigned int count) const;
|
||||
SparsePtr operator+(Int64 count) const;
|
||||
SparsePtr operator+(UInt64 count) const;
|
||||
SparsePtr operator-(Int64 count) const;
|
||||
SparsePtr operator-(UInt64 count) const;
|
||||
std::ptrdiff_t operator-(const SparsePtr& ptr) const;
|
||||
|
||||
SparsePtr& operator+=(int count);
|
||||
SparsePtr& operator-=(int count);
|
||||
SparsePtr& operator+=(Int64 count);
|
||||
SparsePtr& operator-=(Int64 count);
|
||||
|
||||
SparsePtr& operator++();
|
||||
SparsePtr operator++(int);
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T> SparsePtr<T>::operator+(int count) const
|
||||
SparsePtr<T> SparsePtr<T>::operator+(Int64 count) const
|
||||
{
|
||||
return SparsePtr(m_ptr + count * m_stride, m_stride);
|
||||
}
|
||||
|
|
@ -273,7 +273,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T> SparsePtr<T>::operator+(unsigned int count) const
|
||||
SparsePtr<T> SparsePtr<T>::operator+(UInt64 count) const
|
||||
{
|
||||
return SparsePtr(m_ptr + count * m_stride, m_stride);
|
||||
}
|
||||
|
|
@ -286,7 +286,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T> SparsePtr<T>::operator-(int count) const
|
||||
SparsePtr<T> SparsePtr<T>::operator-(Int64 count) const
|
||||
{
|
||||
return SparsePtr(m_ptr - count * m_stride, m_stride);
|
||||
}
|
||||
|
|
@ -299,7 +299,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T> SparsePtr<T>::operator-(unsigned int count) const
|
||||
SparsePtr<T> SparsePtr<T>::operator-(UInt64 count) const
|
||||
{
|
||||
return SparsePtr(m_ptr - count * m_stride, m_stride);
|
||||
}
|
||||
|
|
@ -325,7 +325,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T>& SparsePtr<T>::operator+=(int count)
|
||||
SparsePtr<T>& SparsePtr<T>::operator+=(Int64 count)
|
||||
{
|
||||
m_ptr += count * m_stride;
|
||||
|
||||
|
|
@ -340,7 +340,7 @@ namespace Nz
|
|||
*/
|
||||
|
||||
template<typename T>
|
||||
SparsePtr<T>& SparsePtr<T>::operator-=(int count)
|
||||
SparsePtr<T>& SparsePtr<T>::operator-=(Int64 count)
|
||||
{
|
||||
m_ptr -= count * m_stride;
|
||||
|
||||
|
|
|
|||
|
|
@ -47,6 +47,7 @@
|
|||
#include <Nazara/Graphics/Graphics.hpp>
|
||||
#include <Nazara/Graphics/GuillotineTextureAtlas.hpp>
|
||||
#include <Nazara/Graphics/InstancedRenderable.hpp>
|
||||
#include <Nazara/Graphics/Light.hpp>
|
||||
#include <Nazara/Graphics/Material.hpp>
|
||||
#include <Nazara/Graphics/MaterialPass.hpp>
|
||||
#include <Nazara/Graphics/MaterialPassRegistry.hpp>
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@
|
|||
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Graphics/Config.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <Nazara/Utility/Mesh.hpp>
|
||||
#include <Nazara/Utility/VertexDeclaration.hpp>
|
||||
#include <memory>
|
||||
|
|
@ -23,9 +24,9 @@ namespace Nz
|
|||
GraphicalMesh(GraphicalMesh&&) noexcept = default;
|
||||
~GraphicalMesh() = default;
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetIndexBuffer(std::size_t subMesh) const;
|
||||
inline const std::shared_ptr<RenderBuffer>& GetIndexBuffer(std::size_t subMesh) const;
|
||||
inline std::size_t GetIndexCount(std::size_t subMesh) const;
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetVertexBuffer(std::size_t subMesh) const;
|
||||
inline const std::shared_ptr<RenderBuffer>& GetVertexBuffer(std::size_t subMesh) const;
|
||||
inline const std::shared_ptr<const VertexDeclaration>& GetVertexDeclaration(std::size_t subMesh) const;
|
||||
inline std::size_t GetSubMeshCount() const;
|
||||
|
||||
|
|
@ -35,8 +36,8 @@ namespace Nz
|
|||
private:
|
||||
struct GraphicalSubMesh
|
||||
{
|
||||
std::shared_ptr<AbstractBuffer> indexBuffer;
|
||||
std::shared_ptr<AbstractBuffer> vertexBuffer;
|
||||
std::shared_ptr<RenderBuffer> indexBuffer;
|
||||
std::shared_ptr<RenderBuffer> vertexBuffer;
|
||||
std::size_t indexCount;
|
||||
std::shared_ptr<const VertexDeclaration> vertexDeclaration;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline const std::shared_ptr<AbstractBuffer>& GraphicalMesh::GetIndexBuffer(std::size_t subMesh) const
|
||||
inline const std::shared_ptr<RenderBuffer>& GraphicalMesh::GetIndexBuffer(std::size_t subMesh) const
|
||||
{
|
||||
assert(subMesh < m_subMeshes.size());
|
||||
return m_subMeshes[subMesh].indexBuffer;
|
||||
|
|
@ -20,7 +20,7 @@ namespace Nz
|
|||
return m_subMeshes[subMesh].indexCount;
|
||||
}
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& GraphicalMesh::GetVertexBuffer(std::size_t subMesh) const
|
||||
inline const std::shared_ptr<RenderBuffer>& GraphicalMesh::GetVertexBuffer(std::size_t subMesh) const
|
||||
{
|
||||
assert(subMesh < m_subMeshes.size());
|
||||
return m_subMeshes[subMesh].vertexBuffer;
|
||||
|
|
|
|||
|
|
@ -37,7 +37,7 @@ namespace Nz
|
|||
inline const std::shared_ptr<RenderPipeline>& GetBlitPipeline(bool transparent) const;
|
||||
inline const std::shared_ptr<RenderPipelineLayout>& GetBlitPipelineLayout() const;
|
||||
inline const DefaultTextures& GetDefaultTextures() const;
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetFullscreenVertexBuffer() const;
|
||||
inline const std::shared_ptr<RenderBuffer>& GetFullscreenVertexBuffer() const;
|
||||
inline const std::shared_ptr<VertexDeclaration>& GetFullscreenVertexDeclaration() const;
|
||||
inline MaterialPassRegistry& GetMaterialPassRegistry();
|
||||
inline const MaterialPassRegistry& GetMaterialPassRegistry() const;
|
||||
|
|
@ -70,7 +70,7 @@ namespace Nz
|
|||
|
||||
std::optional<RenderPassCache> m_renderPassCache;
|
||||
std::optional<TextureSamplerCache> m_samplerCache;
|
||||
std::shared_ptr<AbstractBuffer> m_fullscreenVertexBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_fullscreenVertexBuffer;
|
||||
std::shared_ptr<RenderDevice> m_renderDevice;
|
||||
std::shared_ptr<RenderPipeline> m_blitPipeline;
|
||||
std::shared_ptr<RenderPipeline> m_blitPipelineTransparent;
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ namespace Nz
|
|||
return m_defaultTextures;
|
||||
}
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& Graphics::GetFullscreenVertexBuffer() const
|
||||
inline const std::shared_ptr<RenderBuffer>& Graphics::GetFullscreenVertexBuffer() const
|
||||
{
|
||||
return m_fullscreenVertexBuffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ namespace Nz
|
|||
inline const std::shared_ptr<UberShader>& GetShader(ShaderStageType shaderStage) const;
|
||||
inline const std::shared_ptr<Texture>& GetTexture(std::size_t textureIndex) const;
|
||||
inline const TextureSamplerInfo& GetTextureSampler(std::size_t textureIndex) const;
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetUniformBuffer(std::size_t bufferIndex) const;
|
||||
inline const std::shared_ptr<RenderBuffer>& GetUniformBuffer(std::size_t bufferIndex) const;
|
||||
inline const std::vector<UInt8>& GetUniformBufferConstData(std::size_t bufferIndex);
|
||||
inline std::vector<UInt8>& GetUniformBufferData(std::size_t bufferIndex);
|
||||
|
||||
|
|
@ -101,7 +101,7 @@ namespace Nz
|
|||
inline void SetPrimitiveMode(PrimitiveMode mode);
|
||||
inline void SetTexture(std::size_t textureIndex, std::shared_ptr<Texture> texture);
|
||||
inline void SetTextureSampler(std::size_t textureIndex, TextureSamplerInfo samplerInfo);
|
||||
inline void SetUniformBuffer(std::size_t bufferIndex, std::shared_ptr<AbstractBuffer> uniformBuffer);
|
||||
inline void SetUniformBuffer(std::size_t bufferIndex, std::shared_ptr<RenderBuffer> uniformBuffer);
|
||||
|
||||
bool Update(RenderFrame& renderFrame, CommandBufferBuilder& builder);
|
||||
|
||||
|
|
@ -125,7 +125,7 @@ namespace Nz
|
|||
|
||||
struct UniformBuffer
|
||||
{
|
||||
std::shared_ptr<AbstractBuffer> buffer;
|
||||
std::shared_ptr<RenderBuffer> buffer;
|
||||
std::vector<UInt8> data;
|
||||
bool dataInvalidated = true;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -385,7 +385,7 @@ namespace Nz
|
|||
return m_textures[textureIndex].samplerInfo;
|
||||
}
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& MaterialPass::GetUniformBuffer(std::size_t bufferIndex) const
|
||||
inline const std::shared_ptr<RenderBuffer>& MaterialPass::GetUniformBuffer(std::size_t bufferIndex) const
|
||||
{
|
||||
NazaraAssert(bufferIndex < m_uniformBuffers.size(), "Invalid uniform buffer index");
|
||||
return m_uniformBuffers[bufferIndex].buffer;
|
||||
|
|
@ -624,7 +624,7 @@ namespace Nz
|
|||
}
|
||||
}
|
||||
|
||||
inline void MaterialPass::SetUniformBuffer(std::size_t bufferIndex, std::shared_ptr<AbstractBuffer> uniformBuffer)
|
||||
inline void MaterialPass::SetUniformBuffer(std::size_t bufferIndex, std::shared_ptr<RenderBuffer> uniformBuffer)
|
||||
{
|
||||
NazaraAssert(bufferIndex < m_uniformBuffers.size(), "Invalid shared uniform buffer index");
|
||||
if (m_uniformBuffers[bufferIndex].buffer != uniformBuffer)
|
||||
|
|
|
|||
|
|
@ -30,13 +30,13 @@ namespace Nz
|
|||
|
||||
void BuildElement(std::size_t passIndex, const WorldInstance& worldInstance, std::vector<std::unique_ptr<RenderElement>>& elements) const override;
|
||||
|
||||
const std::shared_ptr<AbstractBuffer>& GetIndexBuffer(std::size_t subMeshIndex) const;
|
||||
const std::shared_ptr<RenderBuffer>& GetIndexBuffer(std::size_t subMeshIndex) const;
|
||||
std::size_t GetIndexCount(std::size_t subMeshIndex) const;
|
||||
const std::shared_ptr<Material>& GetMaterial(std::size_t subMeshIndex) const override;
|
||||
std::size_t GetMaterialCount() const override;
|
||||
inline std::size_t GetSubMeshCount() const;
|
||||
const std::vector<RenderPipelineInfo::VertexBufferData>& GetVertexBufferData(std::size_t subMeshIndex) const;
|
||||
const std::shared_ptr<AbstractBuffer>& GetVertexBuffer(std::size_t subMeshIndex) const;
|
||||
const std::shared_ptr<RenderBuffer>& GetVertexBuffer(std::size_t subMeshIndex) const;
|
||||
|
||||
inline void SetMaterial(std::size_t subMeshIndex, std::shared_ptr<Material> material);
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ namespace Nz
|
|||
inline std::size_t FetchLayerIndex(int renderLayer) const;
|
||||
inline std::size_t FetchMaterialPassIndex(const MaterialPass* materialPass) const;
|
||||
inline std::size_t FetchPipelineIndex(const RenderPipeline* pipeline) const;
|
||||
inline std::size_t FetchVertexBuffer(const AbstractBuffer* vertexBuffer) const;
|
||||
inline std::size_t FetchVertexBuffer(const RenderBuffer* vertexBuffer) const;
|
||||
inline std::size_t FetchVertexDeclaration(const VertexDeclaration* vertexDeclaration) const;
|
||||
|
||||
inline void Finalize();
|
||||
|
|
@ -37,7 +37,7 @@ namespace Nz
|
|||
inline void RegisterLayer(int renderLayer);
|
||||
inline void RegisterMaterialPass(const MaterialPass* materialPass);
|
||||
inline void RegisterPipeline(const RenderPipeline* pipeline);
|
||||
inline void RegisterVertexBuffer(const AbstractBuffer* vertexBuffer);
|
||||
inline void RegisterVertexBuffer(const RenderBuffer* vertexBuffer);
|
||||
inline void RegisterVertexDeclaration(const VertexDeclaration* vertexDeclaration);
|
||||
|
||||
private:
|
||||
|
|
@ -45,7 +45,7 @@ namespace Nz
|
|||
robin_hood::unordered_map<int, std::size_t> m_renderLayerRegistry;
|
||||
robin_hood::unordered_map<const MaterialPass*, std::size_t> m_materialPassRegistry;
|
||||
robin_hood::unordered_map<const RenderPipeline*, std::size_t> m_pipelineRegistry;
|
||||
robin_hood::unordered_map<const AbstractBuffer*, std::size_t> m_vertexBufferRegistry;
|
||||
robin_hood::unordered_map<const RenderBuffer*, std::size_t> m_vertexBufferRegistry;
|
||||
robin_hood::unordered_map<const VertexDeclaration*, std::size_t> m_vertexDeclarationRegistry;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ namespace Nz
|
|||
return it->second;
|
||||
}
|
||||
|
||||
inline std::size_t RenderQueueRegistry::FetchVertexBuffer(const AbstractBuffer* vertexBuffer) const
|
||||
inline std::size_t RenderQueueRegistry::FetchVertexBuffer(const RenderBuffer* vertexBuffer) const
|
||||
{
|
||||
auto it = m_vertexBufferRegistry.find(vertexBuffer);
|
||||
assert(it != m_vertexBufferRegistry.end());
|
||||
|
|
@ -80,7 +80,7 @@ namespace Nz
|
|||
m_pipelineRegistry.try_emplace(pipeline, m_pipelineRegistry.size());
|
||||
}
|
||||
|
||||
inline void RenderQueueRegistry::RegisterVertexBuffer(const AbstractBuffer* vertexBuffer)
|
||||
inline void RenderQueueRegistry::RegisterVertexBuffer(const RenderBuffer* vertexBuffer)
|
||||
{
|
||||
m_vertexBufferRegistry.try_emplace(vertexBuffer, m_vertexBufferRegistry.size());
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,24 +24,24 @@ namespace Nz
|
|||
class RenderSubmesh : public RenderElement
|
||||
{
|
||||
public:
|
||||
inline RenderSubmesh(int renderLayer, std::shared_ptr<MaterialPass> materialPass, std::shared_ptr<RenderPipeline> renderPipeline, const WorldInstance& worldInstance, std::size_t indexCount, std::shared_ptr<AbstractBuffer> indexBuffer, std::shared_ptr<AbstractBuffer> vertexBuffer, const Recti& scissorBox);
|
||||
inline RenderSubmesh(int renderLayer, std::shared_ptr<MaterialPass> materialPass, std::shared_ptr<RenderPipeline> renderPipeline, const WorldInstance& worldInstance, std::size_t indexCount, std::shared_ptr<RenderBuffer> indexBuffer, std::shared_ptr<RenderBuffer> vertexBuffer, const Recti& scissorBox);
|
||||
~RenderSubmesh() = default;
|
||||
|
||||
inline UInt64 ComputeSortingScore(const Frustumf& frustum, const RenderQueueRegistry& registry) const override;
|
||||
|
||||
inline const AbstractBuffer* GetIndexBuffer() const;
|
||||
inline const RenderBuffer* GetIndexBuffer() const;
|
||||
inline std::size_t GetIndexCount() const;
|
||||
inline const MaterialPass& GetMaterialPass() const;
|
||||
inline const RenderPipeline* GetRenderPipeline() const;
|
||||
inline const Recti& GetScissorBox() const;
|
||||
inline const AbstractBuffer* GetVertexBuffer() const;
|
||||
inline const RenderBuffer* GetVertexBuffer() const;
|
||||
inline const WorldInstance& GetWorldInstance() const;
|
||||
|
||||
inline void Register(RenderQueueRegistry& registry) const override;
|
||||
|
||||
private:
|
||||
std::shared_ptr<AbstractBuffer> m_indexBuffer;
|
||||
std::shared_ptr<AbstractBuffer> m_vertexBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_indexBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_vertexBuffer;
|
||||
std::shared_ptr<MaterialPass> m_materialPass;
|
||||
std::shared_ptr<RenderPipeline> m_renderPipeline;
|
||||
std::size_t m_indexCount;
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline RenderSubmesh::RenderSubmesh(int renderLayer, std::shared_ptr<MaterialPass> materialPass, std::shared_ptr<RenderPipeline> renderPipeline, const WorldInstance& worldInstance, std::size_t indexCount, std::shared_ptr<AbstractBuffer> indexBuffer, std::shared_ptr<AbstractBuffer> vertexBuffer, const Recti& scissorBox) :
|
||||
inline RenderSubmesh::RenderSubmesh(int renderLayer, std::shared_ptr<MaterialPass> materialPass, std::shared_ptr<RenderPipeline> renderPipeline, const WorldInstance& worldInstance, std::size_t indexCount, std::shared_ptr<RenderBuffer> indexBuffer, std::shared_ptr<RenderBuffer> vertexBuffer, const Recti& scissorBox) :
|
||||
RenderElement(BasicRenderElement::Submesh),
|
||||
m_indexBuffer(std::move(indexBuffer)),
|
||||
m_vertexBuffer(std::move(vertexBuffer)),
|
||||
|
|
@ -70,7 +70,7 @@ namespace Nz
|
|||
}
|
||||
}
|
||||
|
||||
inline const AbstractBuffer* RenderSubmesh::GetIndexBuffer() const
|
||||
inline const RenderBuffer* RenderSubmesh::GetIndexBuffer() const
|
||||
{
|
||||
return m_indexBuffer.get();
|
||||
}
|
||||
|
|
@ -95,7 +95,7 @@ namespace Nz
|
|||
return m_scissorBox;
|
||||
}
|
||||
|
||||
inline const AbstractBuffer* RenderSubmesh::GetVertexBuffer() const
|
||||
inline const RenderBuffer* RenderSubmesh::GetVertexBuffer() const
|
||||
{
|
||||
return m_vertexBuffer.get();
|
||||
}
|
||||
|
|
|
|||
|
|
@ -38,17 +38,17 @@ namespace Nz
|
|||
private:
|
||||
struct BufferCopy
|
||||
{
|
||||
AbstractBuffer* targetBuffer;
|
||||
RenderBuffer* targetBuffer;
|
||||
UploadPool::Allocation* allocation;
|
||||
std::size_t size;
|
||||
};
|
||||
|
||||
struct VertexBufferPool
|
||||
{
|
||||
std::vector<std::shared_ptr<AbstractBuffer>> vertexBuffers;
|
||||
std::vector<std::shared_ptr<RenderBuffer>> vertexBuffers;
|
||||
};
|
||||
|
||||
std::shared_ptr<AbstractBuffer> m_indexBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_indexBuffer;
|
||||
std::shared_ptr<VertexBufferPool> m_vertexBufferPool;
|
||||
std::size_t m_maxVertexBufferSize;
|
||||
std::size_t m_maxVertexCount;
|
||||
|
|
@ -61,7 +61,7 @@ namespace Nz
|
|||
{
|
||||
struct DrawCall
|
||||
{
|
||||
const AbstractBuffer* vertexBuffer;
|
||||
const RenderBuffer* vertexBuffer;
|
||||
const RenderPipeline* renderPipeline;
|
||||
const ShaderBinding* shaderBinding;
|
||||
std::size_t firstIndex;
|
||||
|
|
@ -77,7 +77,7 @@ namespace Nz
|
|||
|
||||
std::unordered_map<const RenderSpriteChain*, DrawCallIndices> drawCallPerElement;
|
||||
std::vector<DrawCall> drawCalls;
|
||||
std::vector<std::shared_ptr<AbstractBuffer>> vertexBuffers;
|
||||
std::vector<std::shared_ptr<RenderBuffer>> vertexBuffers;
|
||||
std::vector<ShaderBindingPtr> shaderBindings;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -37,8 +37,8 @@ namespace Nz
|
|||
{
|
||||
struct DrawCall
|
||||
{
|
||||
const AbstractBuffer* indexBuffer;
|
||||
const AbstractBuffer* vertexBuffer;
|
||||
const RenderBuffer* indexBuffer;
|
||||
const RenderBuffer* vertexBuffer;
|
||||
const RenderPipeline* renderPipeline;
|
||||
const ShaderBinding* shaderBinding;
|
||||
std::size_t indexCount;
|
||||
|
|
|
|||
|
|
@ -35,8 +35,8 @@ namespace Nz
|
|||
inline const Vector2f& GetTargetSize() const;
|
||||
inline const Matrix4f& GetViewMatrix() const;
|
||||
inline const Matrix4f& GetViewProjMatrix() const;
|
||||
inline std::shared_ptr<AbstractBuffer>& GetViewerBuffer();
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetViewerBuffer() const;
|
||||
inline std::shared_ptr<RenderBuffer>& GetViewerBuffer();
|
||||
inline const std::shared_ptr<RenderBuffer>& GetViewerBuffer() const;
|
||||
|
||||
void UpdateBuffers(UploadPool& uploadPool, CommandBufferBuilder& builder);
|
||||
inline void UpdateProjectionMatrix(const Matrix4f& projectionMatrix);
|
||||
|
|
@ -52,7 +52,7 @@ namespace Nz
|
|||
ViewerInstance& operator=(ViewerInstance&&) noexcept = default;
|
||||
|
||||
private:
|
||||
std::shared_ptr<AbstractBuffer> m_viewerDataBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_viewerDataBuffer;
|
||||
Matrix4f m_invProjectionMatrix;
|
||||
Matrix4f m_invViewProjMatrix;
|
||||
Matrix4f m_invViewMatrix;
|
||||
|
|
|
|||
|
|
@ -43,12 +43,12 @@ namespace Nz
|
|||
return m_viewProjMatrix;
|
||||
}
|
||||
|
||||
inline std::shared_ptr<AbstractBuffer>& ViewerInstance::GetViewerBuffer()
|
||||
inline std::shared_ptr<RenderBuffer>& ViewerInstance::GetViewerBuffer()
|
||||
{
|
||||
return m_viewerDataBuffer;
|
||||
}
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& ViewerInstance::GetViewerBuffer() const
|
||||
inline const std::shared_ptr<RenderBuffer>& ViewerInstance::GetViewerBuffer() const
|
||||
{
|
||||
return m_viewerDataBuffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -31,8 +31,8 @@ namespace Nz
|
|||
WorldInstance(WorldInstance&&) noexcept = default;
|
||||
~WorldInstance() = default;
|
||||
|
||||
inline std::shared_ptr<AbstractBuffer>& GetInstanceBuffer();
|
||||
inline const std::shared_ptr<AbstractBuffer>& GetInstanceBuffer() const;
|
||||
inline std::shared_ptr<RenderBuffer>& GetInstanceBuffer();
|
||||
inline const std::shared_ptr<RenderBuffer>& GetInstanceBuffer() const;
|
||||
inline const Matrix4f& GetInvWorldMatrix() const;
|
||||
inline const Matrix4f& GetWorldMatrix() const;
|
||||
|
||||
|
|
@ -44,7 +44,7 @@ namespace Nz
|
|||
WorldInstance& operator=(WorldInstance&&) noexcept = default;
|
||||
|
||||
private:
|
||||
std::shared_ptr<AbstractBuffer> m_instanceDataBuffer;
|
||||
std::shared_ptr<RenderBuffer> m_instanceDataBuffer;
|
||||
Matrix4f m_invWorldMatrix;
|
||||
Matrix4f m_worldMatrix;
|
||||
bool m_dataInvalided;
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline std::shared_ptr<AbstractBuffer>& WorldInstance::GetInstanceBuffer()
|
||||
inline std::shared_ptr<RenderBuffer>& WorldInstance::GetInstanceBuffer()
|
||||
{
|
||||
return m_instanceDataBuffer;
|
||||
}
|
||||
|
||||
inline const std::shared_ptr<AbstractBuffer>& WorldInstance::GetInstanceBuffer() const
|
||||
inline const std::shared_ptr<RenderBuffer>& WorldInstance::GetInstanceBuffer() const
|
||||
{
|
||||
return m_instanceDataBuffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,30 +11,25 @@
|
|||
#include <Nazara/OpenGLRenderer/Config.hpp>
|
||||
#include <Nazara/OpenGLRenderer/OpenGLDevice.hpp>
|
||||
#include <Nazara/OpenGLRenderer/Wrapper/Buffer.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class NAZARA_OPENGLRENDERER_API OpenGLBuffer : public AbstractBuffer
|
||||
class NAZARA_OPENGLRENDERER_API OpenGLBuffer : public RenderBuffer
|
||||
{
|
||||
public:
|
||||
OpenGLBuffer(OpenGLDevice& device, BufferType type);
|
||||
OpenGLBuffer(OpenGLDevice& device, BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData = nullptr);
|
||||
OpenGLBuffer(const OpenGLBuffer&) = delete;
|
||||
OpenGLBuffer(OpenGLBuffer&&) = delete;
|
||||
~OpenGLBuffer() = default;
|
||||
|
||||
bool Fill(const void* data, UInt64 offset, UInt64 size) override;
|
||||
|
||||
bool Initialize(UInt64 size, BufferUsageFlags usage) override;
|
||||
|
||||
inline const GL::Buffer& GetBuffer() const;
|
||||
UInt64 GetSize() const override;
|
||||
DataStorage GetStorage() const override;
|
||||
inline BufferType GetType() const;
|
||||
|
||||
void* Map(BufferAccess access, UInt64 offset, UInt64 size) override;
|
||||
void* Map(UInt64 offset, UInt64 size) override;
|
||||
bool Unmap() override;
|
||||
|
||||
OpenGLBuffer& operator=(const OpenGLBuffer&) = delete;
|
||||
|
|
@ -42,9 +37,6 @@ namespace Nz
|
|||
|
||||
private:
|
||||
GL::Buffer m_buffer;
|
||||
BufferType m_type;
|
||||
BufferUsageFlags m_usage;
|
||||
UInt64 m_size;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -11,11 +11,6 @@ namespace Nz
|
|||
{
|
||||
return m_buffer;
|
||||
}
|
||||
|
||||
inline BufferType OpenGLBuffer::GetType() const
|
||||
{
|
||||
return m_type;
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/OpenGLRenderer/DebugOff.hpp>
|
||||
|
|
|
|||
|
|
@ -26,11 +26,11 @@ namespace Nz
|
|||
void BeginDebugRegion(const std::string_view& regionName, const Color& color) override;
|
||||
void BeginRenderPass(const Framebuffer& framebuffer, const RenderPass& renderPass, const Recti& renderRect, const ClearValues* clearValues, std::size_t clearValueCount) override;
|
||||
|
||||
void BindIndexBuffer(const AbstractBuffer& indexBuffer, UInt64 offset = 0) override;
|
||||
void BindIndexBuffer(const RenderBuffer& indexBuffer, UInt64 offset = 0) override;
|
||||
void BindPipeline(const RenderPipeline& pipeline) override;
|
||||
void BindShaderBinding(UInt32 set, const ShaderBinding& binding) override;
|
||||
void BindShaderBinding(const RenderPipelineLayout& pipelineLayout, UInt32 set, const ShaderBinding& binding) override;
|
||||
void BindVertexBuffer(UInt32 binding, const AbstractBuffer& vertexBuffer, UInt64 offset = 0) override;
|
||||
void BindVertexBuffer(UInt32 binding, const RenderBuffer& vertexBuffer, UInt64 offset = 0) override;
|
||||
|
||||
void BlitTexture(const Texture& fromTexture, const Boxui& fromBox, TextureLayout fromLayout, const Texture& toTexture, const Boxui& toBox, TextureLayout toLayout, SamplerFilter filter) override;
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ namespace Nz
|
|||
const RenderDeviceFeatures& GetEnabledFeatures() const override;
|
||||
inline const GL::Context& GetReferenceContext() const;
|
||||
|
||||
std::shared_ptr<AbstractBuffer> InstantiateBuffer(BufferType type) override;
|
||||
std::shared_ptr<RenderBuffer> InstantiateBuffer(BufferType type, UInt64 size, BufferUsageFlags usageFlags, const void* initialData = nullptr) override;
|
||||
std::shared_ptr<CommandPool> InstantiateCommandPool(QueueType queueType) override;
|
||||
std::shared_ptr<Framebuffer> InstantiateFramebuffer(unsigned int width, unsigned int height, const std::shared_ptr<RenderPass>& renderPass, const std::vector<std::shared_ptr<Texture>>& attachments) override;
|
||||
std::shared_ptr<RenderPass> InstantiateRenderPass(std::vector<RenderPass::Attachment> attachments, std::vector<RenderPass::SubpassDescription> subpassDescriptions, std::vector<RenderPass::SubpassDependency> subpassDependencies) override;
|
||||
|
|
|
|||
|
|
@ -42,11 +42,11 @@ namespace Nz
|
|||
inline void BeginRenderPass(const Framebuffer& framebuffer, const RenderPass& renderPass, const Recti& renderRect);
|
||||
inline void BeginRenderPass(const Framebuffer& framebuffer, const RenderPass& renderPass, const Recti& renderRect, std::initializer_list<ClearValues> clearValues);
|
||||
|
||||
virtual void BindIndexBuffer(const AbstractBuffer& indexBuffer, UInt64 offset = 0) = 0;
|
||||
virtual void BindIndexBuffer(const RenderBuffer& indexBuffer, UInt64 offset = 0) = 0;
|
||||
virtual void BindPipeline(const RenderPipeline& pipeline) = 0;
|
||||
virtual void BindShaderBinding(UInt32 set, const ShaderBinding& binding) = 0;
|
||||
virtual void BindShaderBinding(const RenderPipelineLayout& pipelineLayout, UInt32 set, const ShaderBinding& binding) = 0;
|
||||
virtual void BindVertexBuffer(UInt32 binding, const AbstractBuffer& vertexBuffer, UInt64 offset = 0) = 0;
|
||||
virtual void BindVertexBuffer(UInt32 binding, const RenderBuffer& vertexBuffer, UInt64 offset = 0) = 0;
|
||||
|
||||
virtual void BlitTexture(const Texture& fromTexture, const Boxui& fromBox, TextureLayout fromLayout, const Texture& toTexture, const Boxui& toBox, TextureLayout toLayout, SamplerFilter filter) = 0;
|
||||
|
||||
|
|
|
|||
|
|
@ -7,61 +7,34 @@
|
|||
#ifndef NAZARA_RENDERER_RENDERBUFFER_HPP
|
||||
#define NAZARA_RENDERER_RENDERBUFFER_HPP
|
||||
|
||||
#include <Nazara/Core/MovablePtr.hpp>
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Renderer/Config.hpp>
|
||||
#include <Nazara/Renderer/RenderDevice.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/SoftwareBuffer.hpp>
|
||||
#include <Nazara/Utility/Buffer.hpp>
|
||||
#include <memory>
|
||||
#include <unordered_map>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class RenderDevice;
|
||||
|
||||
class NAZARA_RENDERER_API RenderBuffer : public AbstractBuffer
|
||||
class NAZARA_RENDERER_API RenderBuffer : public Buffer
|
||||
{
|
||||
public:
|
||||
inline RenderBuffer(Buffer* parent, BufferType type);
|
||||
inline RenderBuffer(RenderDevice& renderDevice, BufferType type, UInt64 size, BufferUsageFlags usage);
|
||||
RenderBuffer(const RenderBuffer&) = delete;
|
||||
RenderBuffer(RenderBuffer&&) = default;
|
||||
~RenderBuffer() = default;
|
||||
RenderBuffer(RenderBuffer&&) = delete;
|
||||
~RenderBuffer();
|
||||
|
||||
bool Fill(const void* data, UInt64 offset, UInt64 size) final;
|
||||
|
||||
bool Initialize(UInt64 size, BufferUsageFlags usage) override;
|
||||
|
||||
AbstractBuffer* GetHardwareBuffer(RenderDevice* device);
|
||||
UInt64 GetSize() const override;
|
||||
DataStorage GetStorage() const override;
|
||||
|
||||
void* Map(BufferAccess access, UInt64 offset = 0, UInt64 size = 0) final;
|
||||
bool Unmap() final;
|
||||
inline RenderDevice& GetRenderDevice();
|
||||
inline const RenderDevice& GetRenderDevice() const;
|
||||
|
||||
RenderBuffer& operator=(const RenderBuffer&) = delete;
|
||||
RenderBuffer& operator=(RenderBuffer&&) = default;
|
||||
|
||||
public: //< temp
|
||||
bool Synchronize(RenderDevice* device);
|
||||
RenderBuffer& operator=(RenderBuffer&&) = delete;
|
||||
|
||||
private:
|
||||
struct HardwareBuffer;
|
||||
|
||||
HardwareBuffer* GetHardwareBufferData(RenderDevice* device);
|
||||
|
||||
struct HardwareBuffer
|
||||
{
|
||||
std::shared_ptr<AbstractBuffer> buffer;
|
||||
bool synchronized = false;
|
||||
RenderDevice& m_renderDevice;
|
||||
};
|
||||
|
||||
BufferUsageFlags m_usage;
|
||||
SoftwareBuffer m_softwareBuffer;
|
||||
Buffer* m_parent;
|
||||
BufferType m_type;
|
||||
std::size_t m_size;
|
||||
std::unordered_map<RenderDevice*, HardwareBuffer> m_hardwareBuffers;
|
||||
};
|
||||
NAZARA_RENDERER_API BufferFactory GetRenderBufferFactory(std::shared_ptr<RenderDevice> device);
|
||||
}
|
||||
|
||||
#include <Nazara/Renderer/RenderBuffer.inl>
|
||||
|
|
|
|||
|
|
@ -3,17 +3,25 @@
|
|||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <memory>
|
||||
#include <Nazara/Renderer/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
inline RenderBuffer::RenderBuffer(Buffer* parent, BufferType type) :
|
||||
m_softwareBuffer(parent, type),
|
||||
m_parent(parent),
|
||||
m_type(type)
|
||||
inline RenderBuffer::RenderBuffer(RenderDevice& renderDevice, BufferType type, UInt64 size, BufferUsageFlags usage) :
|
||||
Buffer(DataStorage::Hardware, type, size, usage),
|
||||
m_renderDevice(renderDevice)
|
||||
{
|
||||
}
|
||||
|
||||
inline RenderDevice& RenderBuffer::GetRenderDevice()
|
||||
{
|
||||
return m_renderDevice;
|
||||
}
|
||||
|
||||
inline const RenderDevice& RenderBuffer::GetRenderDevice() const
|
||||
{
|
||||
return m_renderDevice;
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/Renderer/DebugOff.hpp>
|
||||
|
|
|
|||
|
|
@ -9,20 +9,20 @@
|
|||
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Core/MovablePtr.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class RenderBufferView
|
||||
{
|
||||
public:
|
||||
inline RenderBufferView(AbstractBuffer* buffer);
|
||||
inline RenderBufferView(AbstractBuffer* buffer, UInt64 offset, UInt64 size);
|
||||
inline RenderBufferView(RenderBuffer* buffer);
|
||||
inline RenderBufferView(RenderBuffer* buffer, UInt64 offset, UInt64 size);
|
||||
RenderBufferView(const RenderBufferView&) = default;
|
||||
RenderBufferView(RenderBufferView&&) = default;
|
||||
~RenderBufferView() = default;
|
||||
|
||||
inline AbstractBuffer* GetBuffer() const;
|
||||
inline RenderBuffer* GetBuffer() const;
|
||||
inline UInt64 GetOffset() const;
|
||||
inline UInt64 GetSize() const;
|
||||
|
||||
|
|
@ -32,7 +32,7 @@ namespace Nz
|
|||
private:
|
||||
UInt64 m_offset;
|
||||
UInt64 m_size;
|
||||
AbstractBuffer* m_buffer;
|
||||
RenderBuffer* m_buffer;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,19 +8,19 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline RenderBufferView::RenderBufferView(AbstractBuffer* buffer) :
|
||||
inline RenderBufferView::RenderBufferView(RenderBuffer* buffer) :
|
||||
RenderBufferView(buffer, 0, buffer->GetSize())
|
||||
{
|
||||
}
|
||||
|
||||
inline RenderBufferView::RenderBufferView(AbstractBuffer* buffer, UInt64 offset, UInt64 size) :
|
||||
inline RenderBufferView::RenderBufferView(RenderBuffer* buffer, UInt64 offset, UInt64 size) :
|
||||
m_offset(offset),
|
||||
m_size(size),
|
||||
m_buffer(buffer)
|
||||
{
|
||||
}
|
||||
|
||||
inline AbstractBuffer* RenderBufferView::GetBuffer() const
|
||||
inline RenderBuffer* RenderBufferView::GetBuffer() const
|
||||
{
|
||||
return m_buffer;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,6 +11,7 @@
|
|||
#include <Nazara/Renderer/Config.hpp>
|
||||
#include <Nazara/Renderer/Enums.hpp>
|
||||
#include <Nazara/Renderer/Framebuffer.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <Nazara/Renderer/RenderDeviceInfo.hpp>
|
||||
#include <Nazara/Renderer/RenderPass.hpp>
|
||||
#include <Nazara/Renderer/RenderPipeline.hpp>
|
||||
|
|
@ -19,7 +20,6 @@
|
|||
#include <Nazara/Renderer/TextureSampler.hpp>
|
||||
#include <Nazara/Shader/ShaderWriter.hpp>
|
||||
#include <Nazara/Shader/Ast/Nodes.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/PixelFormat.hpp>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
|
@ -38,7 +38,7 @@ namespace Nz
|
|||
virtual const RenderDeviceInfo& GetDeviceInfo() const = 0;
|
||||
virtual const RenderDeviceFeatures& GetEnabledFeatures() const = 0;
|
||||
|
||||
virtual std::shared_ptr<AbstractBuffer> InstantiateBuffer(BufferType type) = 0;
|
||||
virtual std::shared_ptr<RenderBuffer> InstantiateBuffer(BufferType type, UInt64 size, BufferUsageFlags usageFlags, const void* initialData = nullptr) = 0;
|
||||
virtual std::shared_ptr<CommandPool> InstantiateCommandPool(QueueType queueType) = 0;
|
||||
virtual std::shared_ptr<Framebuffer> InstantiateFramebuffer(unsigned int width, unsigned int height, const std::shared_ptr<RenderPass>& renderPass, const std::vector<std::shared_ptr<Texture>>& attachments) = 0;
|
||||
virtual std::shared_ptr<RenderPass> InstantiateRenderPass(std::vector<RenderPass::Attachment> attachments, std::vector<RenderPass::SubpassDescription> subpassDescriptions, std::vector<RenderPass::SubpassDependency> subpassDependencies) = 0;
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@
|
|||
#include <Nazara/Renderer/Config.hpp>
|
||||
#include <Nazara/Renderer/Enums.hpp>
|
||||
#include <Nazara/Renderer/RenderDeviceInfo.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/Enums.hpp>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
class AbstractBuffer;
|
||||
class RenderBuffer;
|
||||
class ShaderBinding;
|
||||
class ShaderBindingDeleter;
|
||||
class Texture;
|
||||
|
|
@ -48,7 +48,7 @@ namespace Nz
|
|||
|
||||
struct UniformBufferBinding
|
||||
{
|
||||
AbstractBuffer* buffer;
|
||||
RenderBuffer* buffer;
|
||||
UInt64 offset;
|
||||
UInt64 range;
|
||||
};
|
||||
|
|
|
|||
|
|
@ -30,7 +30,6 @@
|
|||
#define NAZARA_GLOBAL_UTILITY_HPP
|
||||
|
||||
#include <Nazara/Utility/AbstractAtlas.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/AbstractImage.hpp>
|
||||
#include <Nazara/Utility/AbstractTextDrawer.hpp>
|
||||
#include <Nazara/Utility/Algorithm.hpp>
|
||||
|
|
|
|||
|
|
@ -1,33 +0,0 @@
|
|||
// Copyright (C) 2022 Jérôme "Lynix" Leclercq (lynix680@gmail.com)
|
||||
// This file is part of the "Nazara Engine - Utility module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#pragma once
|
||||
|
||||
#ifndef NAZARA_UTILITY_ABSTRACTBUFFER_HPP
|
||||
#define NAZARA_UTILITY_ABSTRACTBUFFER_HPP
|
||||
|
||||
#include <Nazara/Utility/Config.hpp>
|
||||
#include <Nazara/Utility/Enums.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class NAZARA_UTILITY_API AbstractBuffer
|
||||
{
|
||||
public:
|
||||
AbstractBuffer() = default;
|
||||
virtual ~AbstractBuffer();
|
||||
|
||||
virtual bool Fill(const void* data, UInt64 offset, UInt64 size) = 0;
|
||||
|
||||
virtual bool Initialize(UInt64 size, BufferUsageFlags usage) = 0;
|
||||
|
||||
virtual UInt64 GetSize() const = 0;
|
||||
virtual DataStorage GetStorage() const = 0;
|
||||
|
||||
virtual void* Map(BufferAccess access, UInt64 offset = 0, UInt64 size = 0) = 0;
|
||||
virtual bool Unmap() = 0;
|
||||
};
|
||||
}
|
||||
|
||||
#endif // NAZARA_UTILITY_ABSTRACTBUFFER_HPP
|
||||
|
|
@ -44,7 +44,7 @@ namespace Nz
|
|||
|
||||
NAZARA_UTILITY_API Boxf ComputeAABB(SparsePtr<const Vector3f> positionPtr, std::size_t vertexCount);
|
||||
NAZARA_UTILITY_API void ComputeBoxIndexVertexCount(const Vector3ui& subdivision, std::size_t* indexCount, std::size_t* vertexCount);
|
||||
NAZARA_UTILITY_API unsigned int ComputeCacheMissCount(IndexIterator indices, std::size_t indexCount);
|
||||
NAZARA_UTILITY_API UInt64 ComputeCacheMissCount(IndexIterator indices, std::size_t indexCount);
|
||||
NAZARA_UTILITY_API void ComputeConeIndexVertexCount(unsigned int subdivision, std::size_t* indexCount, std::size_t* vertexCount);
|
||||
NAZARA_UTILITY_API void ComputeCubicSphereIndexVertexCount(unsigned int subdivision, std::size_t* indexCount, std::size_t* vertexCount);
|
||||
NAZARA_UTILITY_API void ComputeIcoSphereIndexVertexCount(unsigned int recursionLevel, std::size_t* indexCount, std::size_t* vertexCount);
|
||||
|
|
|
|||
|
|
@ -8,68 +8,45 @@
|
|||
#define NAZARA_UTILITY_BUFFER_HPP
|
||||
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/Config.hpp>
|
||||
#include <Nazara/Utility/Enums.hpp>
|
||||
#include <array>
|
||||
#include <functional>
|
||||
#include <memory>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class Buffer;
|
||||
|
||||
using BufferFactory = std::function<std::shared_ptr<Buffer>(BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData)>;
|
||||
|
||||
class NAZARA_UTILITY_API Buffer
|
||||
{
|
||||
friend class Utility;
|
||||
|
||||
public:
|
||||
using BufferFactory = std::function<std::unique_ptr<AbstractBuffer>(Buffer* parent, BufferType type)>;
|
||||
|
||||
Buffer(BufferType type);
|
||||
Buffer(BufferType type, UInt32 size, DataStorage storage = DataStorage::Software, BufferUsageFlags usage = 0);
|
||||
inline Buffer(DataStorage storage, BufferType type, UInt64 size, BufferUsageFlags usage);
|
||||
Buffer(const Buffer&) = delete;
|
||||
Buffer(Buffer&&) = delete;
|
||||
~Buffer() = default;
|
||||
virtual ~Buffer();
|
||||
|
||||
bool CopyContent(const Buffer& buffer);
|
||||
std::shared_ptr<Buffer> CopyContent(const BufferFactory& bufferFactory);
|
||||
|
||||
bool Create(UInt32 size, DataStorage storage = DataStorage::Software, BufferUsageFlags usage = 0);
|
||||
void Destroy();
|
||||
virtual bool Fill(const void* data, UInt64 offset, UInt64 size) = 0;
|
||||
|
||||
bool Fill(const void* data, UInt32 offset, UInt32 size);
|
||||
|
||||
inline AbstractBuffer* GetImpl() const;
|
||||
inline UInt32 GetSize() const;
|
||||
inline UInt64 GetSize() const;
|
||||
inline DataStorage GetStorage() const;
|
||||
inline BufferType GetType() const;
|
||||
inline BufferUsageFlags GetUsage() const;
|
||||
inline BufferUsageFlags GetUsageFlags() const;
|
||||
|
||||
inline bool HasStorage(DataStorage storage) const;
|
||||
|
||||
inline bool IsValid() const;
|
||||
|
||||
void* Map(BufferAccess access, UInt32 offset = 0, UInt32 size = 0);
|
||||
void* Map(BufferAccess access, UInt32 offset = 0, UInt32 size = 0) const;
|
||||
|
||||
bool SetStorage(DataStorage storage);
|
||||
|
||||
void Unmap() const;
|
||||
virtual void* Map(UInt64 offset, UInt64 size) = 0;
|
||||
virtual bool Unmap() = 0;
|
||||
|
||||
Buffer& operator=(const Buffer&) = delete;
|
||||
Buffer& operator=(Buffer&&) = delete;
|
||||
|
||||
static bool IsStorageSupported(DataStorage storage);
|
||||
static void SetBufferFactory(DataStorage storage, BufferFactory func);
|
||||
|
||||
private:
|
||||
static bool Initialize();
|
||||
static void Uninitialize();
|
||||
|
||||
std::unique_ptr<AbstractBuffer> m_impl;
|
||||
BufferType m_type;
|
||||
BufferUsageFlags m_usage;
|
||||
UInt32 m_size;
|
||||
|
||||
static std::array<BufferFactory, DataStorageCount> s_bufferFactories;
|
||||
DataStorage m_storage;
|
||||
UInt64 m_size;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -8,19 +8,22 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline AbstractBuffer* Buffer::GetImpl() const
|
||||
inline Buffer::Buffer(DataStorage storage, BufferType type, UInt64 size, BufferUsageFlags usage) :
|
||||
m_type(type),
|
||||
m_usage(usage),
|
||||
m_storage(storage),
|
||||
m_size(size)
|
||||
{
|
||||
return m_impl.get();
|
||||
}
|
||||
|
||||
inline UInt32 Buffer::GetSize() const
|
||||
inline UInt64 Nz::Buffer::GetSize() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
inline DataStorage Buffer::GetStorage() const
|
||||
{
|
||||
return m_impl->GetStorage();
|
||||
return m_storage;
|
||||
}
|
||||
|
||||
inline BufferType Buffer::GetType() const
|
||||
|
|
@ -28,20 +31,10 @@ namespace Nz
|
|||
return m_type;
|
||||
}
|
||||
|
||||
inline BufferUsageFlags Buffer::GetUsage() const
|
||||
inline BufferUsageFlags Buffer::GetUsageFlags() const
|
||||
{
|
||||
return m_usage;
|
||||
}
|
||||
|
||||
inline bool Buffer::HasStorage(DataStorage storage) const
|
||||
{
|
||||
return GetStorage() == storage;
|
||||
}
|
||||
|
||||
inline bool Buffer::IsValid() const
|
||||
{
|
||||
return m_impl != nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/Utility/DebugOff.hpp>
|
||||
|
|
|
|||
|
|
@ -16,16 +16,10 @@ namespace Nz
|
|||
{
|
||||
public:
|
||||
BufferMapper();
|
||||
BufferMapper(T* buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
BufferMapper(T& buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
BufferMapper(const T* buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
BufferMapper(const T& buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
BufferMapper(T& buffer, UInt64 offset, UInt64 length);
|
||||
~BufferMapper();
|
||||
|
||||
bool Map(T* buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
bool Map(T& buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
bool Map(const T* buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
bool Map(const T& buffer, BufferAccess access, unsigned int offset = 0, unsigned int length = 0);
|
||||
bool Map(T& buffer, UInt64 offset, UInt64 length);
|
||||
|
||||
const T* GetBuffer() const;
|
||||
void* GetPointer() const;
|
||||
|
|
@ -33,7 +27,7 @@ namespace Nz
|
|||
void Unmap();
|
||||
|
||||
private:
|
||||
const T* m_buffer;
|
||||
T* m_buffer;
|
||||
void* m_ptr;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,33 +17,13 @@ namespace Nz
|
|||
}
|
||||
|
||||
template<typename T>
|
||||
BufferMapper<T>::BufferMapper(T* buffer, BufferAccess access, unsigned int offset, unsigned int length) :
|
||||
BufferMapper<T>::BufferMapper(T& buffer, UInt64 offset, UInt64 length) :
|
||||
m_buffer(nullptr)
|
||||
{
|
||||
if (!Map(buffer, access, offset, length))
|
||||
if (!Map(buffer, offset, length))
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexpected
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
BufferMapper<T>::BufferMapper(T& buffer, BufferAccess access, unsigned int offset, unsigned int length) :
|
||||
BufferMapper(&buffer, access, offset, length)
|
||||
{
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
BufferMapper<T>::BufferMapper(const T* buffer, BufferAccess access, unsigned int offset, unsigned int length) :
|
||||
m_buffer(nullptr)
|
||||
{
|
||||
if (!Map(buffer, access, offset, length))
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexpected
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
BufferMapper<T>::BufferMapper(const T& buffer, BufferAccess access, unsigned int offset, unsigned int length) :
|
||||
BufferMapper(&buffer, access, offset, length)
|
||||
{
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
BufferMapper<T>::~BufferMapper()
|
||||
{
|
||||
|
|
@ -64,63 +44,21 @@ namespace Nz
|
|||
}
|
||||
|
||||
template<typename T>
|
||||
bool BufferMapper<T>::Map(T* buffer, BufferAccess access, unsigned int offset, unsigned int length)
|
||||
bool BufferMapper<T>::Map(T& buffer, UInt64 offset, UInt64 length)
|
||||
{
|
||||
Unmap();
|
||||
|
||||
#if NAZARA_UTILITY_SAFE
|
||||
if (!buffer)
|
||||
m_buffer = &buffer;
|
||||
m_ptr = buffer.Map(offset, length);
|
||||
if (!m_ptr)
|
||||
{
|
||||
NazaraError("Buffer must be valid");
|
||||
m_ptr = nullptr;
|
||||
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexpected
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
m_buffer = buffer;
|
||||
m_ptr = buffer->Map(access, offset, length);
|
||||
if (!m_ptr)
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexpected
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool BufferMapper<T>::Map(T& buffer, BufferAccess access, unsigned int offset, unsigned int length)
|
||||
{
|
||||
return Map(&buffer, access, offset, length);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool BufferMapper<T>::Map(const T* buffer, BufferAccess access, unsigned int offset, unsigned int length)
|
||||
{
|
||||
Unmap();
|
||||
|
||||
#if NAZARA_UTILITY_SAFE
|
||||
if (!buffer)
|
||||
{
|
||||
NazaraError("Buffer must be valid");
|
||||
m_ptr = nullptr;
|
||||
|
||||
return false;
|
||||
}
|
||||
#endif
|
||||
|
||||
m_buffer = buffer;
|
||||
m_ptr = buffer->Map(access, offset, length);
|
||||
if (!m_ptr)
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexpected
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
bool BufferMapper<T>::Map(const T& buffer, BufferAccess access, unsigned int offset, unsigned int length)
|
||||
{
|
||||
return Map(&buffer, access, offset, length);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
void BufferMapper<T>::Unmap()
|
||||
{
|
||||
|
|
|
|||
|
|
@ -70,7 +70,9 @@ namespace Nz
|
|||
DeviceLocal,
|
||||
DirectMapping,
|
||||
Dynamic,
|
||||
Read,
|
||||
PersistentMapping,
|
||||
Write,
|
||||
|
||||
Max = DirectMapping
|
||||
};
|
||||
|
|
|
|||
|
|
@ -17,40 +17,34 @@ namespace Nz
|
|||
public:
|
||||
IndexBuffer() = default;
|
||||
IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer);
|
||||
IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size);
|
||||
IndexBuffer(bool largeIndices, std::size_t length, DataStorage storage, BufferUsageFlags usage);
|
||||
IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size);
|
||||
IndexBuffer(bool largeIndices, UInt64 indexCount, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData = nullptr);
|
||||
IndexBuffer(const IndexBuffer&) = default;
|
||||
IndexBuffer(IndexBuffer&&) noexcept = default;
|
||||
~IndexBuffer() = default;
|
||||
|
||||
unsigned int ComputeCacheMissCount() const;
|
||||
unsigned int ComputeCacheMissCount();
|
||||
|
||||
bool Fill(const void* data, std::size_t startIndex, std::size_t length);
|
||||
bool FillRaw(const void* data, std::size_t offset, std::size_t size);
|
||||
bool Fill(const void* data, UInt64 startIndex, UInt64 length);
|
||||
bool FillRaw(const void* data, UInt64 offset, UInt64 size);
|
||||
|
||||
inline const std::shared_ptr<Buffer>& GetBuffer() const;
|
||||
inline std::size_t GetEndOffset() const;
|
||||
inline std::size_t GetIndexCount() const;
|
||||
inline std::size_t GetStride() const;
|
||||
inline std::size_t GetStartOffset() const;
|
||||
inline UInt64 GetEndOffset() const;
|
||||
inline UInt64 GetIndexCount() const;
|
||||
inline UInt64 GetStride() const;
|
||||
inline UInt64 GetStartOffset() const;
|
||||
|
||||
inline bool HasLargeIndices() const;
|
||||
|
||||
inline bool IsValid() const;
|
||||
|
||||
inline void* Map(BufferAccess access, std::size_t startVertex = 0, std::size_t length = 0);
|
||||
inline void* Map(BufferAccess access, std::size_t startVertex = 0, std::size_t length = 0) const;
|
||||
void* MapRaw(BufferAccess access, std::size_t offset = 0, std::size_t size = 0);
|
||||
void* MapRaw(BufferAccess access, std::size_t offset = 0, std::size_t size = 0) const;
|
||||
inline void* Map(UInt64 startIndex, UInt64 length);
|
||||
inline void* Map(UInt64 startIndex, UInt64 length) const;
|
||||
void* MapRaw(UInt64 offset, UInt64 size);
|
||||
void* MapRaw(UInt64 offset, UInt64 size) const;
|
||||
|
||||
void Optimize();
|
||||
|
||||
void Reset();
|
||||
void Reset(bool largeIndices, std::shared_ptr<Buffer> buffer);
|
||||
void Reset(bool largeIndices, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size);
|
||||
void Reset(bool largeIndices, std::size_t length, DataStorage storage, BufferUsageFlags usage);
|
||||
void Reset(const IndexBuffer& indexBuffer);
|
||||
|
||||
void Unmap() const;
|
||||
|
||||
IndexBuffer& operator=(const IndexBuffer&) = default;
|
||||
|
|
@ -58,9 +52,9 @@ namespace Nz
|
|||
|
||||
private:
|
||||
std::shared_ptr<Buffer> m_buffer;
|
||||
std::size_t m_endOffset;
|
||||
std::size_t m_indexCount;
|
||||
std::size_t m_startOffset;
|
||||
UInt64 m_endOffset;
|
||||
UInt64 m_indexCount;
|
||||
UInt64 m_startOffset;
|
||||
bool m_largeIndices;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -13,22 +13,22 @@ namespace Nz
|
|||
return m_buffer;
|
||||
}
|
||||
|
||||
inline std::size_t IndexBuffer::GetEndOffset() const
|
||||
inline UInt64 IndexBuffer::GetEndOffset() const
|
||||
{
|
||||
return m_endOffset;
|
||||
}
|
||||
|
||||
inline std::size_t IndexBuffer::GetIndexCount() const
|
||||
inline UInt64 IndexBuffer::GetIndexCount() const
|
||||
{
|
||||
return m_indexCount;
|
||||
}
|
||||
|
||||
inline std::size_t IndexBuffer::GetStride() const
|
||||
inline UInt64 IndexBuffer::GetStride() const
|
||||
{
|
||||
return static_cast<std::size_t>((m_largeIndices) ? sizeof(UInt32) : sizeof(UInt16));
|
||||
return (m_largeIndices) ? sizeof(UInt32) : sizeof(UInt16);
|
||||
}
|
||||
|
||||
inline std::size_t IndexBuffer::GetStartOffset() const
|
||||
inline UInt64 IndexBuffer::GetStartOffset() const
|
||||
{
|
||||
return m_startOffset;
|
||||
}
|
||||
|
|
@ -43,16 +43,16 @@ namespace Nz
|
|||
return m_buffer != nullptr;
|
||||
}
|
||||
|
||||
inline void* IndexBuffer::Map(BufferAccess access, std::size_t startIndex, std::size_t length)
|
||||
inline void* IndexBuffer::Map(UInt64 startIndex, UInt64 length)
|
||||
{
|
||||
std::size_t stride = GetStride();
|
||||
return MapRaw(access, startIndex*stride, length*stride);
|
||||
UInt64 stride = GetStride();
|
||||
return MapRaw(startIndex * stride, length * stride);
|
||||
}
|
||||
|
||||
inline void* IndexBuffer::Map(BufferAccess access, std::size_t startIndex, std::size_t length) const
|
||||
inline void* IndexBuffer::Map(UInt64 startIndex, UInt64 length) const
|
||||
{
|
||||
std::size_t stride = GetStride();
|
||||
return MapRaw(access, startIndex*stride, length*stride);
|
||||
UInt64 stride = GetStride();
|
||||
return MapRaw(startIndex * stride, length * stride);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -19,10 +19,8 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API IndexMapper
|
||||
{
|
||||
public:
|
||||
IndexMapper(IndexBuffer& indexBuffer, BufferAccess access = BufferAccess::ReadWrite, std::size_t indexCount = 0);
|
||||
IndexMapper(SubMesh& subMesh, BufferAccess access = BufferAccess::ReadWrite);
|
||||
IndexMapper(const IndexBuffer& indexBuffer, BufferAccess access = BufferAccess::ReadOnly, std::size_t indexCount = 0);
|
||||
IndexMapper(const SubMesh& subMesh, BufferAccess access = BufferAccess::ReadOnly);
|
||||
IndexMapper(IndexBuffer& indexBuffer, std::size_t indexCount = 0);
|
||||
IndexMapper(SubMesh& subMes);
|
||||
~IndexMapper() = default;
|
||||
|
||||
UInt32 Get(std::size_t i) const;
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@
|
|||
#include <Nazara/Utility/Config.hpp>
|
||||
#include <Nazara/Utility/Enums.hpp>
|
||||
#include <Nazara/Utility/Skeleton.hpp>
|
||||
#include <Nazara/Utility/SoftwareBuffer.hpp>
|
||||
#include <Nazara/Utility/SubMesh.hpp>
|
||||
#include <Nazara/Utility/VertexDeclaration.hpp>
|
||||
#include <Nazara/Utility/VertexStruct.hpp>
|
||||
|
|
@ -28,20 +29,35 @@ namespace Nz
|
|||
{
|
||||
struct NAZARA_UTILITY_API MeshParams : ResourceParameters
|
||||
{
|
||||
MeshParams();
|
||||
// How buffer will be allocated (by default in RAM)
|
||||
BufferFactory bufferFactory = &SoftwareBufferFactory;
|
||||
|
||||
BufferUsageFlags indexBufferFlags = 0; ///< Buffer usage flags used to build the index buffers
|
||||
BufferUsageFlags vertexBufferFlags = 0; ///< Buffer usage flags used to build the vertex buffers
|
||||
Matrix4f matrix = Matrix4f::Identity(); ///< A matrix which will transform every vertex position
|
||||
DataStorage storage = DataStorage::Hardware; ///< The place where the buffers will be allocated
|
||||
Vector2f texCoordOffset = {0.f, 0.f}; ///< Offset to apply on the texture coordinates (not scaled)
|
||||
Vector2f texCoordScale = {1.f, 1.f}; ///< Scale to apply on the texture coordinates
|
||||
bool animated = true; ///< If true, will load an animated version of the model if possible
|
||||
bool center = false; ///< If true, will center the mesh vertices around the origin
|
||||
// Buffer usage flags used to build the index buffers
|
||||
BufferUsageFlags indexBufferFlags = BufferUsage::DirectMapping | BufferUsage::Read | BufferUsage::Write;
|
||||
|
||||
// Buffer usage flags used to build the vertex buffers
|
||||
BufferUsageFlags vertexBufferFlags = BufferUsage::DirectMapping | BufferUsage::Read | BufferUsage::Write;
|
||||
|
||||
// A matrix which will transform every vertex position
|
||||
Matrix4f matrix = Matrix4f::Identity();
|
||||
|
||||
// Offset to apply on the texture coordinates (not scaled)
|
||||
Vector2f texCoordOffset = {0.f, 0.f};
|
||||
|
||||
// Scale to apply on the texture coordinates
|
||||
Vector2f texCoordScale = {1.f, 1.f};
|
||||
|
||||
// If true, will load an animated version of the model if possible
|
||||
bool animated = true;
|
||||
|
||||
// If true, will center the mesh vertices around the origin
|
||||
bool center = false;
|
||||
|
||||
// Optimize the index buffers after loading, improve cache locality (and thus rendering speed) but increase loading time.
|
||||
#ifndef NAZARA_DEBUG
|
||||
bool optimizeIndexBuffers = true; ///< Optimize the index buffers after loading, improve cache locality (and thus rendering speed) but increase loading time.
|
||||
bool optimizeIndexBuffers = true;
|
||||
#else
|
||||
bool optimizeIndexBuffers = false; ///< Since this optimization take a lot of time, especially in debug mode, don't enable it by default in debug.
|
||||
bool optimizeIndexBuffers = false;
|
||||
#endif
|
||||
|
||||
/* The declaration must have a Vector3f position component enabled
|
||||
|
|
|
|||
|
|
@ -17,12 +17,12 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API SkeletalMesh final : public SubMesh
|
||||
{
|
||||
public:
|
||||
SkeletalMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<const IndexBuffer> indexBuffer);
|
||||
SkeletalMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<IndexBuffer> indexBuffer);
|
||||
~SkeletalMesh() = default;
|
||||
|
||||
const Boxf& GetAABB() const override;
|
||||
AnimationType GetAnimationType() const final;
|
||||
const std::shared_ptr<const IndexBuffer>& GetIndexBuffer() const override;
|
||||
const std::shared_ptr<IndexBuffer>& GetIndexBuffer() const override;
|
||||
const std::shared_ptr<VertexBuffer>& GetVertexBuffer() const;
|
||||
std::size_t GetVertexCount() const override;
|
||||
|
||||
|
|
@ -30,11 +30,11 @@ namespace Nz
|
|||
bool IsValid() const;
|
||||
|
||||
void SetAABB(const Boxf& aabb);
|
||||
void SetIndexBuffer(std::shared_ptr<const IndexBuffer> indexBuffer);
|
||||
void SetIndexBuffer(std::shared_ptr<IndexBuffer> indexBuffer);
|
||||
|
||||
private:
|
||||
Boxf m_aabb;
|
||||
std::shared_ptr<const IndexBuffer> m_indexBuffer;
|
||||
std::shared_ptr<IndexBuffer> m_indexBuffer;
|
||||
std::shared_ptr<VertexBuffer> m_vertexBuffer;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,34 +8,30 @@
|
|||
#define NAZARA_UTILITY_SOFTWAREBUFFER_HPP
|
||||
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/Buffer.hpp>
|
||||
#include <vector>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
class Buffer;
|
||||
|
||||
class NAZARA_UTILITY_API SoftwareBuffer : public AbstractBuffer
|
||||
class NAZARA_UTILITY_API SoftwareBuffer : public Buffer
|
||||
{
|
||||
public:
|
||||
SoftwareBuffer(Buffer* parent, BufferType type);
|
||||
SoftwareBuffer(BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData);
|
||||
~SoftwareBuffer() = default;
|
||||
|
||||
bool Fill(const void* data, UInt64 offset, UInt64 size) override;
|
||||
|
||||
bool Initialize(UInt64 size, BufferUsageFlags usage) override;
|
||||
|
||||
const UInt8* GetData() const;
|
||||
UInt64 GetSize() const override;
|
||||
DataStorage GetStorage() const override;
|
||||
|
||||
void* Map(BufferAccess access, UInt64 offset = 0, UInt64 size = 0) override;
|
||||
void* Map(UInt64 offset = 0, UInt64 size = 0) override;
|
||||
bool Unmap() override;
|
||||
|
||||
private:
|
||||
std::vector<UInt8> m_buffer;
|
||||
std::unique_ptr<UInt8[]> m_buffer;
|
||||
bool m_mapped;
|
||||
};
|
||||
|
||||
NAZARA_UTILITY_API std::shared_ptr<Buffer> SoftwareBufferFactory(BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData = nullptr);
|
||||
}
|
||||
|
||||
#endif // NAZARA_UTILITY_SOFTWAREBUFFER_HPP
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API StaticMesh final : public SubMesh
|
||||
{
|
||||
public:
|
||||
StaticMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<const IndexBuffer> indexBuffer);
|
||||
StaticMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<IndexBuffer> indexBuffer);
|
||||
~StaticMesh() = default;
|
||||
|
||||
void Center();
|
||||
|
|
@ -24,7 +24,7 @@ namespace Nz
|
|||
|
||||
const Boxf& GetAABB() const override;
|
||||
AnimationType GetAnimationType() const final;
|
||||
const std::shared_ptr<const IndexBuffer>& GetIndexBuffer() const override;
|
||||
const std::shared_ptr<IndexBuffer>& GetIndexBuffer() const override;
|
||||
const std::shared_ptr<VertexBuffer>& GetVertexBuffer() const;
|
||||
std::size_t GetVertexCount() const override;
|
||||
|
||||
|
|
@ -32,11 +32,11 @@ namespace Nz
|
|||
bool IsValid() const;
|
||||
|
||||
void SetAABB(const Boxf& aabb);
|
||||
void SetIndexBuffer(std::shared_ptr<const IndexBuffer> indexBuffer);
|
||||
void SetIndexBuffer(std::shared_ptr<IndexBuffer> indexBuffer);
|
||||
|
||||
private:
|
||||
Boxf m_aabb;
|
||||
std::shared_ptr<const IndexBuffer> m_indexBuffer;
|
||||
std::shared_ptr<IndexBuffer> m_indexBuffer;
|
||||
std::shared_ptr<VertexBuffer> m_vertexBuffer;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -34,7 +34,7 @@ namespace Nz
|
|||
|
||||
virtual const Boxf& GetAABB() const = 0;
|
||||
virtual AnimationType GetAnimationType() const = 0;
|
||||
virtual const std::shared_ptr<const IndexBuffer>& GetIndexBuffer() const = 0;
|
||||
virtual const std::shared_ptr<IndexBuffer>& GetIndexBuffer() const = 0;
|
||||
std::size_t GetMaterialIndex() const;
|
||||
PrimitiveMode GetPrimitiveMode() const;
|
||||
std::size_t GetTriangleCount() const;
|
||||
|
|
|
|||
|
|
@ -18,8 +18,8 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API TriangleIterator
|
||||
{
|
||||
public:
|
||||
TriangleIterator(PrimitiveMode primitiveMode, const IndexBuffer& indexBuffer);
|
||||
TriangleIterator(const SubMesh& subMesh);
|
||||
TriangleIterator(PrimitiveMode primitiveMode, IndexBuffer& indexBuffer);
|
||||
TriangleIterator(SubMesh& subMesh);
|
||||
~TriangleIterator() = default;
|
||||
|
||||
bool Advance();
|
||||
|
|
|
|||
|
|
@ -15,30 +15,21 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API UniformBuffer
|
||||
{
|
||||
public:
|
||||
UniformBuffer() = default;
|
||||
UniformBuffer(std::shared_ptr<Buffer> buffer);
|
||||
UniformBuffer(std::shared_ptr<Buffer> buffer, UInt32 offset, UInt32 size);
|
||||
UniformBuffer(UInt32 length, DataStorage storage, BufferUsageFlags usage);
|
||||
UniformBuffer(std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size);
|
||||
UniformBuffer(UInt64 size, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData = nullptr);
|
||||
UniformBuffer(const UniformBuffer&) = default;
|
||||
UniformBuffer(UniformBuffer&&) noexcept = default;
|
||||
~UniformBuffer() = default;
|
||||
|
||||
bool Fill(const void* data, UInt32 offset, UInt32 size);
|
||||
bool Fill(const void* data, UInt64 offset, UInt64 size);
|
||||
|
||||
inline const std::shared_ptr<Buffer>& GetBuffer() const;
|
||||
inline UInt32 GetEndOffset() const;
|
||||
inline UInt32 GetStartOffset() const;
|
||||
inline UInt64 GetEndOffset() const;
|
||||
inline UInt64 GetStartOffset() const;
|
||||
|
||||
inline bool IsValid() const;
|
||||
|
||||
void* Map(BufferAccess access, UInt32 offset = 0, UInt32 size = 0);
|
||||
void* Map(BufferAccess access, UInt32 offset = 0, UInt32 size = 0) const;
|
||||
|
||||
void Reset();
|
||||
void Reset(std::shared_ptr<Buffer> buffer);
|
||||
void Reset(std::shared_ptr<Buffer> buffer, UInt32 offset, UInt32 size);
|
||||
void Reset(UInt32 size, DataStorage storage, BufferUsageFlags usage);
|
||||
void Reset(const UniformBuffer& uniformBuffer);
|
||||
void* Map(UInt64 offset = 0, UInt64 size = 0);
|
||||
void* Map(UInt64 offset = 0, UInt64 size = 0) const;
|
||||
|
||||
void Unmap() const;
|
||||
|
||||
|
|
@ -47,8 +38,8 @@ namespace Nz
|
|||
|
||||
private:
|
||||
std::shared_ptr<Buffer> m_buffer;
|
||||
UInt32 m_endOffset;
|
||||
UInt32 m_startOffset;
|
||||
UInt64 m_endOffset;
|
||||
UInt64 m_startOffset;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,20 +13,15 @@ namespace Nz
|
|||
return m_buffer;
|
||||
}
|
||||
|
||||
inline UInt32 UniformBuffer::GetEndOffset() const
|
||||
inline UInt64 UniformBuffer::GetEndOffset() const
|
||||
{
|
||||
return m_endOffset;
|
||||
}
|
||||
|
||||
inline UInt32 UniformBuffer::GetStartOffset() const
|
||||
inline UInt64 UniformBuffer::GetStartOffset() const
|
||||
{
|
||||
return m_startOffset;
|
||||
}
|
||||
|
||||
inline bool UniformBuffer::IsValid() const
|
||||
{
|
||||
return m_buffer != nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
#include <Nazara/Utility/DebugOff.hpp>
|
||||
|
|
|
|||
|
|
@ -18,34 +18,28 @@ namespace Nz
|
|||
public:
|
||||
VertexBuffer() = default;
|
||||
VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer);
|
||||
VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size);
|
||||
VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::size_t length, DataStorage storage, BufferUsageFlags usage);
|
||||
VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size);
|
||||
VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, UInt64 vertexCount, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData = nullptr);
|
||||
VertexBuffer(const VertexBuffer&) = default;
|
||||
VertexBuffer(VertexBuffer&&) noexcept = default;
|
||||
~VertexBuffer() = default;
|
||||
|
||||
bool Fill(const void* data, std::size_t startVertex, std::size_t length);
|
||||
bool FillRaw(const void* data, std::size_t offset, std::size_t size);
|
||||
bool Fill(const void* data, UInt64 startVertex, UInt64 length);
|
||||
bool FillRaw(const void* data, UInt64 offset, UInt64 size);
|
||||
|
||||
inline const std::shared_ptr<Buffer>& GetBuffer() const;
|
||||
inline std::size_t GetEndOffset() const;
|
||||
inline std::size_t GetStartOffset() const;
|
||||
inline std::size_t GetStride() const;
|
||||
inline std::size_t GetVertexCount() const;
|
||||
inline UInt64 GetEndOffset() const;
|
||||
inline UInt64 GetStartOffset() const;
|
||||
inline UInt64 GetStride() const;
|
||||
inline UInt64 GetVertexCount() const;
|
||||
inline const std::shared_ptr<const VertexDeclaration>& GetVertexDeclaration() const;
|
||||
|
||||
inline bool IsValid() const;
|
||||
|
||||
void* Map(BufferAccess access, std::size_t startVertex = 0, std::size_t length = 0);
|
||||
void* Map(BufferAccess access, std::size_t startVertex = 0, std::size_t length = 0) const;
|
||||
void* MapRaw(BufferAccess access, std::size_t offset = 0, std::size_t size = 0);
|
||||
void* MapRaw(BufferAccess access, std::size_t offset = 0, std::size_t size = 0) const;
|
||||
|
||||
void Reset();
|
||||
void Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer);
|
||||
void Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size);
|
||||
void Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::size_t length, DataStorage storage, BufferUsageFlags usage);
|
||||
void Reset(const VertexBuffer& vertexBuffer);
|
||||
void* Map(UInt64 startVertex, UInt64 length);
|
||||
void* Map(UInt64 startVertex, UInt64 length) const;
|
||||
void* MapRaw(UInt64 offset, UInt64 size);
|
||||
void* MapRaw(UInt64 offset, UInt64 size) const;
|
||||
|
||||
void SetVertexDeclaration(std::shared_ptr<const VertexDeclaration> vertexDeclaration);
|
||||
|
||||
|
|
@ -57,9 +51,9 @@ namespace Nz
|
|||
private:
|
||||
std::shared_ptr<Buffer> m_buffer;
|
||||
std::shared_ptr<const VertexDeclaration> m_vertexDeclaration;
|
||||
std::size_t m_endOffset;
|
||||
std::size_t m_startOffset;
|
||||
std::size_t m_vertexCount;
|
||||
UInt64 m_endOffset;
|
||||
UInt64 m_startOffset;
|
||||
UInt64 m_vertexCount;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,22 +13,22 @@ namespace Nz
|
|||
return m_buffer;
|
||||
}
|
||||
|
||||
inline std::size_t VertexBuffer::GetEndOffset() const
|
||||
inline UInt64 VertexBuffer::GetEndOffset() const
|
||||
{
|
||||
return m_endOffset;
|
||||
}
|
||||
|
||||
inline std::size_t VertexBuffer::GetStride() const
|
||||
inline UInt64 VertexBuffer::GetStride() const
|
||||
{
|
||||
return static_cast<std::size_t>(m_vertexDeclaration->GetStride());
|
||||
return static_cast<UInt64>(m_vertexDeclaration->GetStride());
|
||||
}
|
||||
|
||||
inline std::size_t VertexBuffer::GetStartOffset() const
|
||||
inline UInt64 VertexBuffer::GetStartOffset() const
|
||||
{
|
||||
return m_startOffset;
|
||||
}
|
||||
|
||||
inline std::size_t VertexBuffer::GetVertexCount() const
|
||||
inline UInt64 VertexBuffer::GetVertexCount() const
|
||||
{
|
||||
return m_vertexCount;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -20,10 +20,8 @@ namespace Nz
|
|||
class NAZARA_UTILITY_API VertexMapper
|
||||
{
|
||||
public:
|
||||
VertexMapper(SubMesh& subMesh, BufferAccess access = BufferAccess::ReadWrite);
|
||||
VertexMapper(VertexBuffer& vertexBuffer, BufferAccess access = BufferAccess::ReadWrite);
|
||||
VertexMapper(const SubMesh& subMesh, BufferAccess access = BufferAccess::ReadOnly);
|
||||
VertexMapper(const VertexBuffer& vertexBuffer, BufferAccess access = BufferAccess::ReadOnly);
|
||||
VertexMapper(SubMesh& subMesh);
|
||||
VertexMapper(VertexBuffer& vertexBuffer);
|
||||
~VertexMapper();
|
||||
|
||||
template<typename T> SparsePtr<T> GetComponentPtr(VertexComponent component, std::size_t componentIndex = 0);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
#define NAZARA_VULKANRENDERER_VULKANBUFFER_HPP
|
||||
|
||||
#include <Nazara/Prerequisites.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <Nazara/VulkanRenderer/Config.hpp>
|
||||
#include <Nazara/VulkanRenderer/Wrapper/Buffer.hpp>
|
||||
#include <Nazara/VulkanRenderer/Wrapper/DeviceMemory.hpp>
|
||||
|
|
@ -18,36 +18,30 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
class NAZARA_VULKANRENDERER_API VulkanBuffer : public AbstractBuffer
|
||||
class NAZARA_VULKANRENDERER_API VulkanBuffer : public RenderBuffer
|
||||
{
|
||||
public:
|
||||
inline VulkanBuffer(Vk::Device& device, BufferType type);
|
||||
inline VulkanBuffer(VulkanDevice& device, BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData = nullptr);
|
||||
VulkanBuffer(const VulkanBuffer&) = delete;
|
||||
VulkanBuffer(VulkanBuffer&&) = delete; ///TODO
|
||||
virtual ~VulkanBuffer();
|
||||
|
||||
bool Fill(const void* data, UInt64 offset, UInt64 size) override;
|
||||
|
||||
bool Initialize(UInt64 size, BufferUsageFlags usage) override;
|
||||
|
||||
inline VkBuffer GetBuffer() const;
|
||||
UInt64 GetSize() const override;
|
||||
DataStorage GetStorage() const override;
|
||||
|
||||
void* Map(BufferAccess access, UInt64 offset, UInt64 size) override;
|
||||
void* Map(UInt64 offset, UInt64 size) override;
|
||||
bool Unmap() override;
|
||||
|
||||
VulkanBuffer& operator=(const VulkanBuffer&) = delete;
|
||||
VulkanBuffer& operator=(VulkanBuffer&&) = delete; ///TODO
|
||||
|
||||
private:
|
||||
BufferType m_type;
|
||||
BufferUsageFlags m_usage;
|
||||
UInt64 m_size;
|
||||
VkBuffer m_buffer;
|
||||
VkBuffer m_stagingBuffer;
|
||||
VmaAllocation m_allocation;
|
||||
VmaAllocation m_stagingAllocation;
|
||||
UInt64 m_stagingBufferSize;
|
||||
Vk::Device& m_device;
|
||||
};
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,12 +7,6 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
inline VulkanBuffer::VulkanBuffer(Vk::Device& device, BufferType type) :
|
||||
m_type(type),
|
||||
m_device(device)
|
||||
{
|
||||
}
|
||||
|
||||
inline VkBuffer VulkanBuffer::GetBuffer() const
|
||||
{
|
||||
return m_buffer;
|
||||
|
|
|
|||
|
|
@ -27,11 +27,11 @@ namespace Nz
|
|||
void BeginDebugRegion(const std::string_view& regionName, const Color& color) override;
|
||||
void BeginRenderPass(const Framebuffer& framebuffer, const RenderPass& renderPass, const Recti& renderRect, const ClearValues* clearValues, std::size_t clearValueCount) override;
|
||||
|
||||
void BindIndexBuffer(const AbstractBuffer& indexBuffer, UInt64 offset = 0) override;
|
||||
void BindIndexBuffer(const RenderBuffer& indexBuffer, UInt64 offset = 0) override;
|
||||
void BindPipeline(const RenderPipeline& pipeline) override;
|
||||
void BindShaderBinding(UInt32 set, const ShaderBinding& binding) override;
|
||||
void BindShaderBinding(const RenderPipelineLayout& pipelineLayout, UInt32 set, const ShaderBinding& binding) override;
|
||||
void BindVertexBuffer(UInt32 binding, const AbstractBuffer& vertexBuffer, UInt64 offset = 0) override;
|
||||
void BindVertexBuffer(UInt32 binding, const RenderBuffer& vertexBuffer, UInt64 offset = 0) override;
|
||||
|
||||
void BlitTexture(const Texture& fromTexture, const Boxui& fromBox, TextureLayout fromLayout, const Texture& toTexture, const Boxui& toBox, TextureLayout toLayout, SamplerFilter filter) override;
|
||||
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ namespace Nz
|
|||
const RenderDeviceInfo& GetDeviceInfo() const override;
|
||||
const RenderDeviceFeatures& GetEnabledFeatures() const override;
|
||||
|
||||
std::shared_ptr<AbstractBuffer> InstantiateBuffer(BufferType type) override;
|
||||
std::shared_ptr<RenderBuffer> InstantiateBuffer(BufferType type, UInt64 size, BufferUsageFlags usageFlags, const void* initialData = nullptr) override;
|
||||
std::shared_ptr<CommandPool> InstantiateCommandPool(QueueType queueType) override;
|
||||
std::shared_ptr<Framebuffer> InstantiateFramebuffer(unsigned int width, unsigned int height, const std::shared_ptr<RenderPass>& renderPass, const std::vector<std::shared_ptr<Texture>>& attachments) override;
|
||||
std::shared_ptr<RenderPass> InstantiateRenderPass(std::vector<RenderPass::Attachment> attachments, std::vector<RenderPass::SubpassDescription> subpassDescriptions, std::vector<RenderPass::SubpassDependency> subpassDependencies) override;
|
||||
|
|
|
|||
|
|
@ -307,9 +307,9 @@ std::shared_ptr<Mesh> LoadMesh(Stream& stream, const MeshParams& parameters)
|
|||
// Index buffer
|
||||
bool largeIndices = (vertexCount > std::numeric_limits<UInt16>::max());
|
||||
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::DiscardAndWrite);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
IndexIterator index = indexMapper.begin();
|
||||
|
||||
for (unsigned int faceIdx = 0; faceIdx < iMesh->mNumFaces; ++faceIdx)
|
||||
|
|
@ -329,8 +329,8 @@ std::shared_ptr<Mesh> LoadMesh(Stream& stream, const MeshParams& parameters)
|
|||
if (normalTangentMatrix.HasScale())
|
||||
normalTangentMatrix.ApplyScale(1.f / normalTangentMatrix.GetScale());
|
||||
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ_Normal_UV_Tangent_Skinning), vertexCount, parameters.storage, parameters.vertexBufferFlags);
|
||||
BufferMapper<VertexBuffer> vertexMapper(*vertexBuffer, BufferAccess::ReadWrite);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ_Normal_UV_Tangent_Skinning), vertexCount, parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
BufferMapper<VertexBuffer> vertexMapper(*vertexBuffer, 0, vertexBuffer->GetVertexCount());
|
||||
SkeletalMeshVertex* vertices = static_cast<SkeletalMeshVertex*>(vertexMapper.GetPointer());
|
||||
|
||||
for (std::size_t vertexIdx = 0; vertexIdx < vertexCount; ++vertexIdx)
|
||||
|
|
@ -465,9 +465,9 @@ std::shared_ptr<Mesh> LoadMesh(Stream& stream, const MeshParams& parameters)
|
|||
// Index buffer
|
||||
bool largeIndices = (vertexCount > std::numeric_limits<UInt16>::max());
|
||||
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::DiscardAndWrite);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
IndexIterator index = indexMapper.begin();
|
||||
|
||||
for (unsigned int faceIdx = 0; faceIdx < iMesh->mNumFaces; ++faceIdx)
|
||||
|
|
@ -489,9 +489,9 @@ std::shared_ptr<Mesh> LoadMesh(Stream& stream, const MeshParams& parameters)
|
|||
if (normalTangentMatrix.HasScale())
|
||||
normalTangentMatrix.ApplyScale(1.f / normalTangentMatrix.GetScale());
|
||||
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, vertexCount, parameters.storage, parameters.vertexBufferFlags);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, vertexCount, parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::DiscardAndWrite);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
// Vertex positions
|
||||
if (auto posPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Position))
|
||||
|
|
|
|||
|
|
@ -28,25 +28,19 @@ namespace Nz
|
|||
const std::shared_ptr<VertexBuffer>& vertexBuffer = staticMesh.GetVertexBuffer();
|
||||
|
||||
assert(indexBuffer->GetBuffer()->GetStorage() == DataStorage::Software);
|
||||
const SoftwareBuffer* indexBufferContent = static_cast<const SoftwareBuffer*>(indexBuffer->GetBuffer()->GetImpl());
|
||||
const SoftwareBuffer* indexBufferContent = static_cast<const SoftwareBuffer*>(indexBuffer->GetBuffer().get());
|
||||
|
||||
assert(vertexBuffer->GetBuffer()->GetStorage() == DataStorage::Software);
|
||||
const SoftwareBuffer* vertexBufferContent = static_cast<const SoftwareBuffer*>(vertexBuffer->GetBuffer()->GetImpl());
|
||||
const SoftwareBuffer* vertexBufferContent = static_cast<const SoftwareBuffer*>(vertexBuffer->GetBuffer().get());
|
||||
|
||||
auto& submeshData = m_subMeshes.emplace_back();
|
||||
submeshData.indexBuffer = renderDevice->InstantiateBuffer(BufferType::Index);
|
||||
if (!submeshData.indexBuffer->Initialize(indexBuffer->GetStride() * indexBuffer->GetIndexCount(), BufferUsage::DeviceLocal))
|
||||
throw std::runtime_error("failed to create index buffer");
|
||||
|
||||
submeshData.indexBuffer = renderDevice->InstantiateBuffer(BufferType::Index, indexBuffer->GetStride() * indexBuffer->GetIndexCount(), BufferUsage::DeviceLocal | BufferUsage::Write);
|
||||
if (!submeshData.indexBuffer->Fill(indexBufferContent->GetData() + indexBuffer->GetStartOffset(), 0, indexBuffer->GetEndOffset() - indexBuffer->GetStartOffset()))
|
||||
throw std::runtime_error("failed to fill index buffer");
|
||||
|
||||
submeshData.indexCount = indexBuffer->GetIndexCount();
|
||||
|
||||
submeshData.vertexBuffer = renderDevice->InstantiateBuffer(BufferType::Vertex);
|
||||
if (!submeshData.vertexBuffer->Initialize(vertexBuffer->GetStride() * vertexBuffer->GetVertexCount(), BufferUsage::DeviceLocal))
|
||||
throw std::runtime_error("failed to create vertex buffer");
|
||||
|
||||
submeshData.vertexBuffer = renderDevice->InstantiateBuffer(BufferType::Vertex, vertexBuffer->GetStride() * vertexBuffer->GetVertexCount(), BufferUsage::DeviceLocal | BufferUsage::Write);
|
||||
if (!submeshData.vertexBuffer->Fill(vertexBufferContent->GetData() + vertexBuffer->GetStartOffset(), 0, vertexBuffer->GetEndOffset() - vertexBuffer->GetStartOffset()))
|
||||
throw std::runtime_error("failed to fill vertex buffer");
|
||||
|
||||
|
|
|
|||
|
|
@ -227,12 +227,7 @@ namespace Nz
|
|||
}
|
||||
};
|
||||
|
||||
m_fullscreenVertexBuffer = m_renderDevice->InstantiateBuffer(BufferType::Vertex);
|
||||
if (!m_fullscreenVertexBuffer->Initialize(m_fullscreenVertexDeclaration->GetStride() * vertexData.size(), BufferUsage::DeviceLocal))
|
||||
throw std::runtime_error("failed to initialize fullscreen vertex buffer");
|
||||
|
||||
if (!m_fullscreenVertexBuffer->Fill(vertexData.data(), 0, m_fullscreenVertexDeclaration->GetStride() * vertexData.size()))
|
||||
throw std::runtime_error("failed to fill fullscreen vertex buffer");
|
||||
m_fullscreenVertexBuffer = m_renderDevice->InstantiateBuffer(BufferType::Vertex, m_fullscreenVertexDeclaration->GetStride() * vertexData.size(), BufferUsage::DeviceLocal | BufferUsage::Write, vertexData.data());
|
||||
}
|
||||
|
||||
void Graphics::RegisterMaterialPasses()
|
||||
|
|
|
|||
|
|
@ -50,9 +50,7 @@ namespace Nz
|
|||
{
|
||||
auto& uniformBuffer = m_uniformBuffers.emplace_back();
|
||||
|
||||
uniformBuffer.buffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform);
|
||||
if (!uniformBuffer.buffer->Initialize(uniformBufferInfo.blockSize, BufferUsage::Dynamic))
|
||||
throw std::runtime_error("failed to initialize UBO memory");
|
||||
uniformBuffer.buffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform, uniformBufferInfo.blockSize, BufferUsage::Dynamic | BufferUsage::Write);
|
||||
|
||||
assert(uniformBufferInfo.defaultValues.size() <= uniformBufferInfo.blockSize);
|
||||
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ namespace Nz
|
|||
}
|
||||
}
|
||||
|
||||
const std::shared_ptr<AbstractBuffer>& Model::GetIndexBuffer(std::size_t subMeshIndex) const
|
||||
const std::shared_ptr<RenderBuffer>& Model::GetIndexBuffer(std::size_t subMeshIndex) const
|
||||
{
|
||||
return m_graphicalMesh->GetIndexBuffer(subMeshIndex);
|
||||
}
|
||||
|
|
@ -79,7 +79,7 @@ namespace Nz
|
|||
return subMeshData.vertexBufferData;
|
||||
}
|
||||
|
||||
const std::shared_ptr<AbstractBuffer>& Model::GetVertexBuffer(std::size_t subMeshIndex) const
|
||||
const std::shared_ptr<RenderBuffer>& Model::GetVertexBuffer(std::size_t subMeshIndex) const
|
||||
{
|
||||
return m_graphicalMesh->GetVertexBuffer(subMeshIndex);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,10 +24,6 @@ namespace Nz
|
|||
std::size_t maxQuadCount = m_maxVertexCount / 4;
|
||||
std::size_t indexCount = 6 * maxQuadCount;
|
||||
|
||||
m_indexBuffer = m_device.InstantiateBuffer(BufferType::Index);
|
||||
if (!m_indexBuffer->Initialize(indexCount * sizeof(UInt16), BufferUsage::DeviceLocal))
|
||||
throw std::runtime_error("failed to initialize index buffer");
|
||||
|
||||
// Generate indices for quad (0, 1, 2, 2, 1, 3, ...)
|
||||
std::vector<UInt16> indices(indexCount);
|
||||
UInt16* indexPtr = indices.data();
|
||||
|
|
@ -45,7 +41,7 @@ namespace Nz
|
|||
*indexPtr++ = index * 4 + 3;
|
||||
}
|
||||
|
||||
m_indexBuffer->Fill(indices.data(), 0, indexCount * sizeof(UInt16));
|
||||
m_indexBuffer = m_device.InstantiateBuffer(BufferType::Index, indexCount * sizeof(UInt16), BufferUsage::DeviceLocal | BufferUsage::Write, indices.data());
|
||||
}
|
||||
|
||||
std::unique_ptr<ElementRendererData> SpriteChainRenderer::InstanciateData()
|
||||
|
|
@ -66,7 +62,7 @@ namespace Nz
|
|||
UploadPool::Allocation* currentAllocation = nullptr;
|
||||
UInt8* currentAllocationMemPtr = nullptr;
|
||||
const VertexDeclaration* currentVertexDeclaration = nullptr;
|
||||
AbstractBuffer* currentVertexBuffer = nullptr;
|
||||
RenderBuffer* currentVertexBuffer = nullptr;
|
||||
const MaterialPass* currentMaterialPass = nullptr;
|
||||
const RenderPipeline* currentPipeline = nullptr;
|
||||
const ShaderBinding* currentShaderBinding = nullptr;
|
||||
|
|
@ -170,7 +166,7 @@ namespace Nz
|
|||
currentAllocation = ¤tFrame.GetUploadPool().Allocate(m_maxVertexBufferSize);
|
||||
currentAllocationMemPtr = static_cast<UInt8*>(currentAllocation->mappedPtr);
|
||||
|
||||
std::shared_ptr<AbstractBuffer> vertexBuffer;
|
||||
std::shared_ptr<RenderBuffer> vertexBuffer;
|
||||
|
||||
// Try to reuse vertex buffers from pool if any
|
||||
if (!m_vertexBufferPool->vertexBuffers.empty())
|
||||
|
|
@ -179,10 +175,7 @@ namespace Nz
|
|||
m_vertexBufferPool->vertexBuffers.pop_back();
|
||||
}
|
||||
else
|
||||
{
|
||||
vertexBuffer = m_device.InstantiateBuffer(BufferType::Vertex);
|
||||
vertexBuffer->Initialize(m_maxVertexBufferSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic);
|
||||
}
|
||||
vertexBuffer = m_device.InstantiateBuffer(BufferType::Vertex, m_maxVertexBufferSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic | BufferUsage::Write);
|
||||
|
||||
currentVertexBuffer = vertexBuffer.get();
|
||||
|
||||
|
|
@ -308,7 +301,7 @@ namespace Nz
|
|||
Vector2f targetSize = viewerInstance.GetTargetSize();
|
||||
Recti fullscreenScissorBox(0, 0, SafeCast<int>(std::floor(targetSize.x)), SafeCast<int>(std::floor(targetSize.y)));
|
||||
|
||||
const AbstractBuffer* currentVertexBuffer = nullptr;
|
||||
const RenderBuffer* currentVertexBuffer = nullptr;
|
||||
const RenderPipeline* currentPipeline = nullptr;
|
||||
const ShaderBinding* currentShaderBinding = nullptr;
|
||||
Recti currentScissorBox(-1, -1, -1, -1);
|
||||
|
|
|
|||
|
|
@ -29,8 +29,8 @@ namespace Nz
|
|||
|
||||
Recti invalidScissorBox(-1, -1, -1, -1);
|
||||
|
||||
const AbstractBuffer* currentIndexBuffer = nullptr;
|
||||
const AbstractBuffer* currentVertexBuffer = nullptr;
|
||||
const RenderBuffer* currentIndexBuffer = nullptr;
|
||||
const RenderBuffer* currentVertexBuffer = nullptr;
|
||||
const MaterialPass* currentMaterialPass = nullptr;
|
||||
const RenderPipeline* currentPipeline = nullptr;
|
||||
const ShaderBinding* currentShaderBinding = nullptr;
|
||||
|
|
@ -69,13 +69,13 @@ namespace Nz
|
|||
currentMaterialPass = materialPass;
|
||||
}
|
||||
|
||||
if (const AbstractBuffer* indexBuffer = submesh.GetIndexBuffer(); currentIndexBuffer != indexBuffer)
|
||||
if (const RenderBuffer* indexBuffer = submesh.GetIndexBuffer(); currentIndexBuffer != indexBuffer)
|
||||
{
|
||||
FlushDrawCall();
|
||||
currentIndexBuffer = indexBuffer;
|
||||
}
|
||||
|
||||
if (const AbstractBuffer* vertexBuffer = submesh.GetVertexBuffer(); currentVertexBuffer != vertexBuffer)
|
||||
if (const RenderBuffer* vertexBuffer = submesh.GetVertexBuffer(); currentVertexBuffer != vertexBuffer)
|
||||
{
|
||||
FlushDrawCall();
|
||||
currentVertexBuffer = vertexBuffer;
|
||||
|
|
@ -164,8 +164,8 @@ namespace Nz
|
|||
Vector2f targetSize = viewerInstance.GetTargetSize();
|
||||
Recti fullscreenScissorBox(0, 0, SafeCast<int>(std::floor(targetSize.x)), SafeCast<int>(std::floor(targetSize.y)));
|
||||
|
||||
const AbstractBuffer* currentIndexBuffer = nullptr;
|
||||
const AbstractBuffer* currentVertexBuffer = nullptr;
|
||||
const RenderBuffer* currentIndexBuffer = nullptr;
|
||||
const RenderBuffer* currentVertexBuffer = nullptr;
|
||||
const RenderPipeline* currentPipeline = nullptr;
|
||||
const ShaderBinding* currentShaderBinding = nullptr;
|
||||
Recti currentScissorBox(-1, -1, -1, -1);
|
||||
|
|
|
|||
|
|
@ -25,9 +25,7 @@ namespace Nz
|
|||
{
|
||||
PredefinedViewerData viewerUboOffsets = PredefinedViewerData::GetOffsets();
|
||||
|
||||
m_viewerDataBuffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform);
|
||||
if (!m_viewerDataBuffer->Initialize(viewerUboOffsets.totalSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic))
|
||||
throw std::runtime_error("failed to initialize viewer data UBO");
|
||||
m_viewerDataBuffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform, viewerUboOffsets.totalSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic | BufferUsage::Write);
|
||||
}
|
||||
|
||||
void ViewerInstance::UpdateBuffers(UploadPool& uploadPool, CommandBufferBuilder& builder)
|
||||
|
|
|
|||
|
|
@ -20,9 +20,7 @@ namespace Nz
|
|||
{
|
||||
PredefinedInstanceData instanceUboOffsets = PredefinedInstanceData::GetOffsets();
|
||||
|
||||
m_instanceDataBuffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform);
|
||||
if (!m_instanceDataBuffer->Initialize(instanceUboOffsets.totalSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic))
|
||||
throw std::runtime_error("failed to initialize viewer data UBO");
|
||||
m_instanceDataBuffer = Graphics::Instance()->GetRenderDevice()->InstantiateBuffer(BufferType::Uniform, instanceUboOffsets.totalSize, BufferUsage::DeviceLocal | BufferUsage::Dynamic | BufferUsage::Write);
|
||||
}
|
||||
|
||||
void WorldInstance::UpdateBuffers(UploadPool& uploadPool, CommandBufferBuilder& builder)
|
||||
|
|
|
|||
|
|
@ -9,34 +9,21 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
|
||||
OpenGLBuffer::OpenGLBuffer(OpenGLDevice& device, BufferType type) :
|
||||
m_type(type)
|
||||
OpenGLBuffer::OpenGLBuffer(OpenGLDevice& device, BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData) :
|
||||
RenderBuffer(device, type, size, usage)
|
||||
{
|
||||
if (!m_buffer.Create(device))
|
||||
throw std::runtime_error("failed to create buffer"); //< TODO: Handle error
|
||||
}
|
||||
|
||||
bool OpenGLBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
m_buffer.SubData(GLintptr(offset), GLsizeiptr(size), data);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool OpenGLBuffer::Initialize(UInt64 size, BufferUsageFlags usage)
|
||||
{
|
||||
m_size = size;
|
||||
m_usage = usage;
|
||||
throw std::runtime_error("failed to create buffer"); //< TODO: Handle OpenGL error
|
||||
|
||||
GL::BufferTarget target;
|
||||
switch (m_type)
|
||||
switch (type)
|
||||
{
|
||||
case BufferType::Index: target = GL::BufferTarget::ElementArray; break;
|
||||
case BufferType::Uniform: target = GL::BufferTarget::Uniform; break;
|
||||
case BufferType::Vertex: target = GL::BufferTarget::Array; break;
|
||||
|
||||
default:
|
||||
throw std::runtime_error("unknown buffer type 0x" + NumberToString(UnderlyingCast(m_type), 16));
|
||||
throw std::runtime_error("unknown buffer type 0x" + NumberToString(UnderlyingCast(type), 16));
|
||||
}
|
||||
|
||||
GLenum hint = GL_STREAM_COPY;
|
||||
|
|
@ -49,49 +36,23 @@ namespace Nz
|
|||
if (usage & BufferUsage::DirectMapping)
|
||||
hint = GL_DYNAMIC_COPY;
|
||||
|
||||
m_buffer.Reset(target, size, nullptr, hint);
|
||||
m_buffer.Reset(target, size, initialData, hint);
|
||||
}
|
||||
|
||||
bool OpenGLBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
m_buffer.SubData(GLintptr(offset), GLsizeiptr(size), data);
|
||||
return true;
|
||||
}
|
||||
|
||||
UInt64 OpenGLBuffer::GetSize() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
DataStorage OpenGLBuffer::GetStorage() const
|
||||
{
|
||||
return DataStorage::Hardware;
|
||||
}
|
||||
|
||||
void* OpenGLBuffer::Map(BufferAccess access, UInt64 offset, UInt64 size)
|
||||
void* OpenGLBuffer::Map(UInt64 offset, UInt64 size)
|
||||
{
|
||||
GLbitfield accessBit = 0;
|
||||
switch (access)
|
||||
{
|
||||
case BufferAccess::DiscardAndWrite:
|
||||
accessBit |= GL_MAP_WRITE_BIT;
|
||||
if (offset == 0 && size == m_size)
|
||||
accessBit |= GL_MAP_INVALIDATE_BUFFER_BIT;
|
||||
else
|
||||
accessBit |= GL_MAP_INVALIDATE_RANGE_BIT;
|
||||
|
||||
break;
|
||||
|
||||
case BufferAccess::ReadOnly:
|
||||
if (GetUsageFlags() & BufferUsage::Read)
|
||||
accessBit |= GL_MAP_READ_BIT;
|
||||
break;
|
||||
|
||||
case BufferAccess::ReadWrite:
|
||||
accessBit |= GL_MAP_READ_BIT | GL_MAP_WRITE_BIT;
|
||||
break;
|
||||
|
||||
case BufferAccess::WriteOnly:
|
||||
if (GetUsageFlags() & BufferUsage::Write)
|
||||
accessBit |= GL_MAP_WRITE_BIT;
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
return m_buffer.MapRange(offset, size, accessBit);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ namespace Nz
|
|||
m_commandBuffer.SetFramebuffer(static_cast<const OpenGLFramebuffer&>(framebuffer), static_cast<const OpenGLRenderPass&>(renderPass), clearValues, clearValueCount);
|
||||
}
|
||||
|
||||
void OpenGLCommandBufferBuilder::BindIndexBuffer(const AbstractBuffer& indexBuffer, UInt64 offset)
|
||||
void OpenGLCommandBufferBuilder::BindIndexBuffer(const RenderBuffer& indexBuffer, UInt64 offset)
|
||||
{
|
||||
const OpenGLBuffer& glBuffer = static_cast<const OpenGLBuffer&>(indexBuffer);
|
||||
|
||||
|
|
@ -54,7 +54,7 @@ namespace Nz
|
|||
m_commandBuffer.BindShaderBinding(glPipelineLayout, set, &glBinding);
|
||||
}
|
||||
|
||||
void OpenGLCommandBufferBuilder::BindVertexBuffer(UInt32 binding, const AbstractBuffer& vertexBuffer, UInt64 offset)
|
||||
void OpenGLCommandBufferBuilder::BindVertexBuffer(UInt32 binding, const RenderBuffer& vertexBuffer, UInt64 offset)
|
||||
{
|
||||
const OpenGLBuffer& glBuffer = static_cast<const OpenGLBuffer&>(vertexBuffer);
|
||||
|
||||
|
|
|
|||
|
|
@ -79,7 +79,7 @@ namespace Nz
|
|||
|
||||
OpenGLDevice::~OpenGLDevice()
|
||||
{
|
||||
// Free context first as it will unregister itself from m_contexts
|
||||
// Free reference context first as it will unregister itself from m_contexts
|
||||
m_referenceContext.reset();
|
||||
}
|
||||
|
||||
|
|
@ -114,9 +114,9 @@ namespace Nz
|
|||
return m_deviceInfo.features;
|
||||
}
|
||||
|
||||
std::shared_ptr<AbstractBuffer> OpenGLDevice::InstantiateBuffer(BufferType type)
|
||||
std::shared_ptr<RenderBuffer> OpenGLDevice::InstantiateBuffer(BufferType type, UInt64 size, BufferUsageFlags usageFlags, const void* initialData)
|
||||
{
|
||||
return std::make_shared<OpenGLBuffer>(*this, type);
|
||||
return std::make_shared<OpenGLBuffer>(*this, type, size, usageFlags, initialData);
|
||||
}
|
||||
|
||||
std::shared_ptr<CommandPool> OpenGLDevice::InstantiateCommandPool(QueueType /*queueType*/)
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@
|
|||
#include <Nazara/Core/PrimitiveList.hpp>
|
||||
#include <Nazara/Physics3D/PhysWorld3D.hpp>
|
||||
#include <Nazara/Utility/IndexBuffer.hpp>
|
||||
#include <Nazara/Utility/SoftwareBuffer.hpp>
|
||||
#include <Nazara/Utility/StaticMesh.hpp>
|
||||
#include <Nazara/Utility/VertexBuffer.hpp>
|
||||
#include <newton/Newton.h>
|
||||
|
|
@ -168,11 +169,8 @@ namespace Nz
|
|||
}
|
||||
});
|
||||
|
||||
std::shared_ptr<VertexBuffer> colliderVB = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ), colliderVertices.size(), DataStorage::Software, 0);
|
||||
colliderVB->Fill(colliderVertices.data(), 0, colliderVertices.size());
|
||||
|
||||
std::shared_ptr<IndexBuffer> colliderIB = std::make_shared<IndexBuffer>(false, colliderIndices.size(), DataStorage::Software, 0);
|
||||
colliderIB->Fill(colliderIndices.data(), 0, colliderIndices.size());
|
||||
std::shared_ptr<VertexBuffer> colliderVB = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ), colliderVertices.size(), BufferUsage::Write, SoftwareBufferFactory, colliderVertices.data());
|
||||
std::shared_ptr<IndexBuffer> colliderIB = std::make_shared<IndexBuffer>(false, colliderIndices.size(), BufferUsage::Write, SoftwareBufferFactory, colliderIndices.data());
|
||||
|
||||
std::shared_ptr<StaticMesh> colliderSubMesh = std::make_shared<StaticMesh>(std::move(colliderVB), std::move(colliderIB));
|
||||
colliderSubMesh->GenerateAABB();
|
||||
|
|
|
|||
|
|
@ -3,100 +3,18 @@
|
|||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <Nazara/Core/Error.hpp>
|
||||
#include <Nazara/Renderer/RenderDevice.hpp>
|
||||
#include <Nazara/Renderer/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
bool RenderBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
RenderBuffer::~RenderBuffer() = default;
|
||||
|
||||
BufferFactory GetRenderBufferFactory(std::shared_ptr<RenderDevice> device)
|
||||
{
|
||||
if (m_softwareBuffer.Fill(data, offset, size))
|
||||
return [device = std::move(device)](BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData) -> std::shared_ptr<Buffer>
|
||||
{
|
||||
for (auto& bufferPair : m_hardwareBuffers)
|
||||
bufferPair.second.synchronized = false;
|
||||
|
||||
return true;
|
||||
return device->InstantiateBuffer(type, size, usage, initialData);
|
||||
};
|
||||
}
|
||||
else
|
||||
return false;
|
||||
}
|
||||
|
||||
bool RenderBuffer::Initialize(UInt64 size, BufferUsageFlags usage)
|
||||
{
|
||||
m_size = size;
|
||||
m_softwareBuffer.Initialize(size, usage);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
AbstractBuffer* RenderBuffer::GetHardwareBuffer(RenderDevice* device)
|
||||
{
|
||||
if (HardwareBuffer* hwBuffer = GetHardwareBufferData(device))
|
||||
return hwBuffer->buffer.get();
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
UInt64 RenderBuffer::GetSize() const
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
DataStorage RenderBuffer::GetStorage() const
|
||||
{
|
||||
return DataStorage::Hardware;
|
||||
}
|
||||
|
||||
void* RenderBuffer::Map(BufferAccess access, UInt64 offset, UInt64 size)
|
||||
{
|
||||
if (void* ptr = m_softwareBuffer.Map(access, offset, size))
|
||||
{
|
||||
if (access != BufferAccess::ReadOnly)
|
||||
{
|
||||
for (auto& bufferPair : m_hardwareBuffers)
|
||||
bufferPair.second.synchronized = false;
|
||||
}
|
||||
|
||||
return ptr;
|
||||
}
|
||||
else
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool RenderBuffer::Unmap()
|
||||
{
|
||||
return m_softwareBuffer.Unmap();
|
||||
}
|
||||
|
||||
bool RenderBuffer::Synchronize(RenderDevice* device)
|
||||
{
|
||||
HardwareBuffer* hwBuffer = GetHardwareBufferData(device);
|
||||
if (!hwBuffer)
|
||||
return false;
|
||||
|
||||
if (hwBuffer->synchronized)
|
||||
return true;
|
||||
|
||||
return hwBuffer->buffer->Fill(m_softwareBuffer.GetData(), 0, m_size);
|
||||
}
|
||||
|
||||
auto RenderBuffer::GetHardwareBufferData(RenderDevice* device) -> HardwareBuffer*
|
||||
{
|
||||
auto it = m_hardwareBuffers.find(device);
|
||||
if (it == m_hardwareBuffers.end())
|
||||
{
|
||||
HardwareBuffer hwBuffer;
|
||||
hwBuffer.buffer = device->InstantiateBuffer(m_type);
|
||||
if (!hwBuffer.buffer->Initialize(m_size, m_usage))
|
||||
{
|
||||
NazaraError("Failed to initialize hardware buffer");
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
it = m_hardwareBuffers.emplace(device, std::move(hwBuffer)).first;
|
||||
}
|
||||
|
||||
return &it->second;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,6 @@
|
|||
#include <Nazara/Core/StringExt.hpp>
|
||||
#include <Nazara/Platform/Platform.hpp>
|
||||
#include <Nazara/Renderer/RenderBuffer.hpp>
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/Buffer.hpp>
|
||||
#include <Nazara/Utility/Image.hpp>
|
||||
#include <Nazara/Utility/Utility.hpp>
|
||||
|
|
@ -35,15 +34,11 @@ namespace Nz
|
|||
ModuleBase("Renderer", this)
|
||||
{
|
||||
LoadBackend(config);
|
||||
|
||||
Buffer::SetBufferFactory(DataStorage::Hardware, [](Buffer* parent, BufferType type) -> std::unique_ptr<AbstractBuffer> { return std::make_unique<RenderBuffer>(parent, type); });
|
||||
}
|
||||
|
||||
Renderer::~Renderer()
|
||||
{
|
||||
// Uninitialize module here
|
||||
Buffer::SetBufferFactory(DataStorage::Hardware, nullptr);
|
||||
|
||||
// reset Renderer impl before unloading library
|
||||
m_rendererImpl.reset();
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +0,0 @@
|
|||
// Copyright (C) 2022 Jérôme "Lynix" Leclercq (lynix680@gmail.com)
|
||||
// This file is part of the "Nazara Engine - Utility module"
|
||||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Utility/AbstractBuffer.hpp>
|
||||
#include <Nazara/Utility/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
AbstractBuffer::~AbstractBuffer() = default;
|
||||
}
|
||||
|
|
@ -76,32 +76,32 @@ namespace Nz
|
|||
triangles.reserve(20 * IntegralPow(4, recursionLevel));
|
||||
|
||||
// Cinq triangles autour du premier point
|
||||
triangles.push_back({0, 11, 5});
|
||||
triangles.push_back({0, 5, 1});
|
||||
triangles.push_back({0, 1, 7});
|
||||
triangles.push_back({0, 7, 10});
|
||||
triangles.push_back({0, 10, 11});
|
||||
triangles.emplace_back(0, 11, 5);
|
||||
triangles.emplace_back(0, 5, 1);
|
||||
triangles.emplace_back(0, 1, 7);
|
||||
triangles.emplace_back(0, 7, 10);
|
||||
triangles.emplace_back(0, 10, 11);
|
||||
|
||||
// Cinq faces adjaçentes
|
||||
triangles.push_back({ 1, 5, 9});
|
||||
triangles.push_back({ 5, 11, 4});
|
||||
triangles.push_back({11, 10, 2});
|
||||
triangles.push_back({10, 7, 6});
|
||||
triangles.push_back({ 7, 1, 8});
|
||||
triangles.emplace_back(1, 5, 9);
|
||||
triangles.emplace_back(5, 11, 4);
|
||||
triangles.emplace_back(11, 10, 2);
|
||||
triangles.emplace_back(10, 7, 6);
|
||||
triangles.emplace_back(7, 1, 8);
|
||||
|
||||
// Cinq triangles autour du troisième point
|
||||
triangles.push_back({3, 9, 4});
|
||||
triangles.push_back({3, 4, 2});
|
||||
triangles.push_back({3, 2, 6});
|
||||
triangles.push_back({3, 6, 8});
|
||||
triangles.push_back({3, 8, 9});
|
||||
triangles.emplace_back(3, 9, 4);
|
||||
triangles.emplace_back(3, 4, 2);
|
||||
triangles.emplace_back(3, 2, 6);
|
||||
triangles.emplace_back(3, 6, 8);
|
||||
triangles.emplace_back(3, 8, 9);
|
||||
|
||||
// Cinq faces adjaçentes
|
||||
triangles.push_back({4, 9, 5});
|
||||
triangles.push_back({2, 4, 11});
|
||||
triangles.push_back({6, 2, 10});
|
||||
triangles.push_back({8, 6, 7});
|
||||
triangles.push_back({9, 8, 1});
|
||||
triangles.emplace_back(4, 9, 5);
|
||||
triangles.emplace_back(2, 4, 11);
|
||||
triangles.emplace_back(6, 2, 10);
|
||||
triangles.emplace_back(8, 6, 7);
|
||||
triangles.emplace_back(9, 8, 1);
|
||||
|
||||
// Et maintenant on affine la sphère
|
||||
for (unsigned int i = 0; i < recursionLevel; ++i)
|
||||
|
|
@ -115,11 +115,11 @@ namespace Nz
|
|||
unsigned int b = GetMiddleVertex(triangle.y, triangle.z);
|
||||
unsigned int c = GetMiddleVertex(triangle.z, triangle.x);
|
||||
|
||||
triangles.push_back({triangle.x, a, c});
|
||||
triangles.push_back({triangle.y, b, a});
|
||||
triangles.push_back({triangle.z, c, b});
|
||||
triangles.emplace_back(triangle.x, a, c);
|
||||
triangles.emplace_back(triangle.y, b, a);
|
||||
triangles.emplace_back(triangle.z, c, b);
|
||||
|
||||
triangle.Set(a, b, c); // Réutilisation du triangle
|
||||
triangle.Set(a, b, c); // Reuse triangle
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -175,29 +175,28 @@ namespace Nz
|
|||
///TODO: Déplacer dans un fichier à part ?
|
||||
struct VertexCacheData
|
||||
{
|
||||
int position_in_cache = -1;
|
||||
float current_score = 0.f;
|
||||
int total_valence = 0; // toatl number of triangles using this vertex
|
||||
int remaining_valence = 0; // number of triangles using it but not yet rendered
|
||||
std::vector<int> tri_indices; // indices to the indices that use this vertex
|
||||
int positionInCache = -1;
|
||||
float score = 0.f;
|
||||
int totalValence = 0; // total number of triangles using this vertex
|
||||
int remainingValence = 0; // number of triangles using it but not yet rendered
|
||||
std::vector<int> triIndices; // indices to the indices that use this vertex
|
||||
bool calculated; // was the score calculated during this iteration?
|
||||
|
||||
|
||||
int FindTriangle(int tri)
|
||||
{
|
||||
for (unsigned int i = 0; i < tri_indices.size(); ++i)
|
||||
if (tri_indices[i] == tri) return i;
|
||||
for (unsigned int i = 0; i < triIndices.size(); ++i)
|
||||
if (triIndices[i] == tri) return i;
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
void MoveTriangleToEnd(int tri)
|
||||
{
|
||||
auto it = std::find(tri_indices.begin(), tri_indices.end(), tri);
|
||||
NazaraAssert(it != tri_indices.end(), "Triangle not found");
|
||||
auto it = std::find(triIndices.begin(), triIndices.end(), tri);
|
||||
NazaraAssert(it != triIndices.end(), "Triangle not found");
|
||||
|
||||
tri_indices.erase(it);
|
||||
tri_indices.push_back(tri);
|
||||
triIndices.erase(it);
|
||||
triIndices.push_back(tri);
|
||||
}
|
||||
};
|
||||
|
||||
|
|
@ -217,18 +216,18 @@ namespace Nz
|
|||
Clear();
|
||||
}
|
||||
|
||||
VertexCache(IndexIterator indices, unsigned int indexCount)
|
||||
VertexCache(IndexIterator indices, UInt32 indexCount)
|
||||
{
|
||||
Clear();
|
||||
|
||||
for (unsigned int i = 0; i < indexCount; ++i)
|
||||
for (UInt32 i = 0; i < indexCount; ++i)
|
||||
AddVertex(*indices++);
|
||||
}
|
||||
|
||||
// the vertex will be placed on top
|
||||
// if the vertex didn't exist previewsly in
|
||||
// the cache, then miss count is incermented
|
||||
void AddVertex(unsigned int v)
|
||||
// if the vertex didn't exist previously in
|
||||
// the cache, then miss count is incremented
|
||||
void AddVertex(UInt32 v)
|
||||
{
|
||||
int w = FindVertex(v);
|
||||
if (w >= 0)
|
||||
|
|
@ -248,13 +247,11 @@ namespace Nz
|
|||
|
||||
void Clear()
|
||||
{
|
||||
for (int i=0; i<40; i++)
|
||||
m_cache[i] = -1;
|
||||
|
||||
m_cache.fill(-1);
|
||||
m_misses = 0;
|
||||
}
|
||||
|
||||
int GetMissCount() const
|
||||
UInt64 GetMissCount() const
|
||||
{
|
||||
return m_misses;
|
||||
}
|
||||
|
|
@ -276,14 +273,14 @@ namespace Nz
|
|||
return -1;
|
||||
}
|
||||
|
||||
void RemoveVertex(int stack_index)
|
||||
void RemoveVertex(int stackIndex)
|
||||
{
|
||||
for (int i=stack_index; i<38; i++)
|
||||
for (int i = stackIndex; i < 38; i++)
|
||||
m_cache[i] = m_cache[i+1];
|
||||
}
|
||||
|
||||
int m_cache[40];
|
||||
int m_misses; // cache miss count
|
||||
std::array<int, 40> m_cache;
|
||||
UInt64 m_misses; // cache miss count
|
||||
};
|
||||
|
||||
class VertexCacheOptimizer
|
||||
|
|
@ -337,18 +334,19 @@ namespace Nz
|
|||
private:
|
||||
float CalculateVertexScore(VertexCacheData& vertex) const
|
||||
{
|
||||
if (vertex.remaining_valence <= 0)
|
||||
if (vertex.remainingValence <= 0)
|
||||
// No tri needs this vertex!
|
||||
return -1.0f;
|
||||
|
||||
float ret = 0.0f;
|
||||
if (vertex.position_in_cache < 0)
|
||||
float ret;
|
||||
if (vertex.positionInCache < 0)
|
||||
{
|
||||
// Vertex is not in FIFO cache - no score.
|
||||
ret = 0.0f;
|
||||
}
|
||||
else
|
||||
{
|
||||
if (vertex.position_in_cache < 3)
|
||||
if (vertex.positionInCache < 3)
|
||||
{
|
||||
// This vertex was used in the last triangle,
|
||||
// so it has a fixed score, whichever of the three
|
||||
|
|
@ -361,14 +359,14 @@ namespace Nz
|
|||
{
|
||||
// Points for being high in the cache.
|
||||
const float Scaler = 1.0f / (32 - 3);
|
||||
ret = 1.0f - (vertex.position_in_cache - 3) * Scaler;
|
||||
ret = 1.0f - (vertex.positionInCache - 3) * Scaler;
|
||||
ret = std::pow(ret, m_cacheDecayPower);
|
||||
}
|
||||
}
|
||||
|
||||
// Bonus points for having a low number of tris still to
|
||||
// use the vert, so we get rid of lone verts quickly.
|
||||
float valence_boost = std::pow(static_cast<float>(vertex.remaining_valence), -m_valenceBoostPower);
|
||||
float valence_boost = std::pow(static_cast<float>(vertex.remainingValence), -m_valenceBoostPower);
|
||||
ret += m_valenceBoostScale * valence_boost;
|
||||
|
||||
return ret;
|
||||
|
|
@ -380,11 +378,11 @@ namespace Nz
|
|||
{
|
||||
// calculate score for all vertices
|
||||
for (VertexCacheData& vertex : m_vertices)
|
||||
vertex.current_score = CalculateVertexScore(vertex);
|
||||
vertex.score = CalculateVertexScore(vertex);
|
||||
|
||||
// calculate scores for all active triangles
|
||||
float max_score = std::numeric_limits<float>::lowest();
|
||||
int max_score_tri = -1;
|
||||
float maxScore = std::numeric_limits<float>::lowest();
|
||||
int maxScoreTri = -1;
|
||||
|
||||
for (unsigned int i = 0; i < m_triangles.size(); ++i)
|
||||
{
|
||||
|
|
@ -392,20 +390,20 @@ namespace Nz
|
|||
continue;
|
||||
|
||||
// sum the score of all the triangle's vertices
|
||||
float sc = m_vertices[m_triangles[i].verts[0]].current_score +
|
||||
m_vertices[m_triangles[i].verts[1]].current_score +
|
||||
m_vertices[m_triangles[i].verts[2]].current_score;
|
||||
float sc = m_vertices[m_triangles[i].verts[0]].score +
|
||||
m_vertices[m_triangles[i].verts[1]].score +
|
||||
m_vertices[m_triangles[i].verts[2]].score;
|
||||
|
||||
m_triangles[i].current_score = sc;
|
||||
|
||||
if (sc > max_score)
|
||||
if (sc > maxScore)
|
||||
{
|
||||
max_score = sc;
|
||||
max_score_tri = i;
|
||||
maxScore = sc;
|
||||
maxScoreTri = i;
|
||||
}
|
||||
}
|
||||
|
||||
return max_score_tri;
|
||||
return maxScoreTri;
|
||||
}
|
||||
|
||||
Result InitialPass()
|
||||
|
|
@ -416,10 +414,10 @@ namespace Nz
|
|||
if (index < 0 || index >= static_cast<int>(m_vertices.size()))
|
||||
return Fail_BadIndex;
|
||||
|
||||
m_vertices[index].total_valence++;
|
||||
m_vertices[index].remaining_valence++;
|
||||
m_vertices[index].totalValence++;
|
||||
m_vertices[index].remainingValence++;
|
||||
|
||||
m_vertices[index].tri_indices.push_back(i/3);
|
||||
m_vertices[index].triIndices.push_back(i/3);
|
||||
}
|
||||
|
||||
m_bestTri = FullScoreRecalculation();
|
||||
|
|
@ -427,14 +425,14 @@ namespace Nz
|
|||
return Success;
|
||||
}
|
||||
|
||||
Result Init(IndexIterator indices, unsigned int indexCount, int vertex_count)
|
||||
Result Init(IndexIterator indices, unsigned int indexCount, int vertexCount)
|
||||
{
|
||||
// clear the draw list
|
||||
m_drawList.clear();
|
||||
|
||||
// allocate and initialize vertices and triangles
|
||||
m_vertices.clear(); // Pour reconstruire tous les éléments
|
||||
m_vertices.resize(vertex_count);
|
||||
m_vertices.resize(vertexCount);
|
||||
|
||||
m_triangles.clear();
|
||||
for (unsigned int i = 0; i < indexCount; i += 3)
|
||||
|
|
@ -466,22 +464,22 @@ namespace Nz
|
|||
if (ind < 0)
|
||||
continue;
|
||||
|
||||
m_vertices[ind].position_in_cache = -1;
|
||||
m_vertices[ind].positionInCache = -1;
|
||||
}
|
||||
|
||||
TriangleCacheData* t = &m_triangles[tri];
|
||||
if (t->rendered)
|
||||
return; // triangle is already in the draw list
|
||||
|
||||
for (unsigned int i = 0; i < 3; ++i)
|
||||
for (int vert : t->verts)
|
||||
{
|
||||
// add all triangle vertices to the cache
|
||||
m_vertexCache.AddVertex(t->verts[i]);
|
||||
m_vertexCache.AddVertex(vert);
|
||||
|
||||
VertexCacheData *v = &m_vertices[t->verts[i]];
|
||||
VertexCacheData *v = &m_vertices[vert];
|
||||
|
||||
// decrease remaining velence
|
||||
v->remaining_valence--;
|
||||
// decrease remaining valence
|
||||
v->remainingValence--;
|
||||
|
||||
// move the added triangle to the end of the vertex's
|
||||
// triangle index list, so that the first 'remaining_valence'
|
||||
|
|
@ -500,11 +498,11 @@ namespace Nz
|
|||
if (ind < 0)
|
||||
continue;
|
||||
|
||||
m_vertices[ind].position_in_cache = i;
|
||||
m_vertices[ind].positionInCache = i;
|
||||
}
|
||||
}
|
||||
|
||||
// Optimization: to avoid duplicate calculations durind the same iteration,
|
||||
// Optimization: to avoid duplicate calculations during the same iteration,
|
||||
// both vertices and triangles have a 'calculated' flag. This flag
|
||||
// must be cleared at the beginning of the iteration to all *active* triangles
|
||||
// that have one or more of their vertices currently cached, and all their
|
||||
|
|
@ -522,9 +520,9 @@ namespace Nz
|
|||
|
||||
VertexCacheData *v = &m_vertices[vert];
|
||||
|
||||
for (int j = 0; j < v->remaining_valence; j++)
|
||||
for (int j = 0; j < v->remainingValence; j++)
|
||||
{
|
||||
TriangleCacheData *t = &m_triangles[v->tri_indices[j]];
|
||||
TriangleCacheData *t = &m_triangles[v->triIndices[j]];
|
||||
|
||||
// we actually found a triangle to process
|
||||
ret = true;
|
||||
|
|
@ -533,8 +531,8 @@ namespace Nz
|
|||
t->calculated = false;
|
||||
|
||||
// clear vertex flags
|
||||
for (unsigned int k = 0; k < 3; ++k)
|
||||
m_vertices[t->verts[k]].calculated = false;
|
||||
for (int i : t->verts)
|
||||
m_vertices[i].calculated = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -547,14 +545,14 @@ namespace Nz
|
|||
|
||||
// calculate vertex scores
|
||||
float sum = 0.f;
|
||||
for (unsigned int i = 0; i < 3; ++i)
|
||||
for (int vert : t->verts)
|
||||
{
|
||||
VertexCacheData& v = m_vertices[t->verts[i]];
|
||||
float sc = v.current_score;
|
||||
VertexCacheData& v = m_vertices[vert];
|
||||
float sc = v.score;
|
||||
if (!v.calculated)
|
||||
sc = CalculateVertexScore(v);
|
||||
|
||||
v.current_score = sc;
|
||||
v.score = sc;
|
||||
v.calculated = true;
|
||||
sum += sc;
|
||||
}
|
||||
|
|
@ -566,8 +564,8 @@ namespace Nz
|
|||
int PartialScoreRecalculation()
|
||||
{
|
||||
// iterate through all the vertices of the cache
|
||||
float max_score = std::numeric_limits<float>::lowest();
|
||||
int max_score_tri = -1;
|
||||
float maxScore = std::numeric_limits<float>::lowest();
|
||||
int maxScoreTri = -1;
|
||||
|
||||
for (unsigned int i = 0; i < 32; ++i)
|
||||
{
|
||||
|
|
@ -578,9 +576,9 @@ namespace Nz
|
|||
const VertexCacheData* v = &m_vertices[vert];
|
||||
|
||||
// iterate through all *active* triangles of this vertex
|
||||
for (int j = 0; j < v->remaining_valence; j++)
|
||||
for (int j = 0; j < v->remainingValence; j++)
|
||||
{
|
||||
int tri = v->tri_indices[j];
|
||||
int tri = v->triIndices[j];
|
||||
TriangleCacheData* t = &m_triangles[tri];
|
||||
if (!t->calculated)
|
||||
// calculate triangle score
|
||||
|
|
@ -589,15 +587,15 @@ namespace Nz
|
|||
float sc = t->current_score;
|
||||
|
||||
// we actually found a triangle to process
|
||||
if (sc > max_score)
|
||||
if (sc > maxScore)
|
||||
{
|
||||
max_score = sc;
|
||||
max_score_tri = tri;
|
||||
maxScore = sc;
|
||||
maxScoreTri = tri;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return max_score_tri;
|
||||
return maxScoreTri;
|
||||
}
|
||||
|
||||
// returns true while there are more steps to take
|
||||
|
|
@ -667,7 +665,7 @@ namespace Nz
|
|||
*vertexCount = xVertexCount*2 + yVertexCount*2 + zVertexCount*2;
|
||||
}
|
||||
|
||||
unsigned int ComputeCacheMissCount(IndexIterator indices, std::size_t indexCount)
|
||||
UInt64 ComputeCacheMissCount(IndexIterator indices, std::size_t indexCount)
|
||||
{
|
||||
VertexCache cache(indices, indexCount);
|
||||
return cache.GetMissCount();
|
||||
|
|
|
|||
|
|
@ -3,174 +3,26 @@
|
|||
// For conditions of distribution and use, see copyright notice in Config.hpp
|
||||
|
||||
#include <Nazara/Utility/Buffer.hpp>
|
||||
#include <Nazara/Core/Algorithm.hpp>
|
||||
#include <Nazara/Core/CallOnExit.hpp>
|
||||
#include <Nazara/Core/Error.hpp>
|
||||
#include <Nazara/Core/ErrorFlags.hpp>
|
||||
#include <Nazara/Utility/BufferMapper.hpp>
|
||||
#include <Nazara/Utility/Config.hpp>
|
||||
#include <Nazara/Utility/SoftwareBuffer.hpp>
|
||||
#include <memory>
|
||||
#include <stdexcept>
|
||||
#include <vector>
|
||||
#include <Nazara/Utility/Debug.hpp>
|
||||
|
||||
namespace Nz
|
||||
{
|
||||
Buffer::Buffer(BufferType type) :
|
||||
m_type(type),
|
||||
m_usage(0),
|
||||
m_size(0)
|
||||
Buffer::~Buffer() = default;
|
||||
|
||||
std::shared_ptr<Buffer> Buffer::CopyContent(const BufferFactory& bufferFactory)
|
||||
{
|
||||
if (GetUsageFlags() & BufferUsage::DirectMapping)
|
||||
{
|
||||
BufferMapper<Buffer> mapper(*this, 0, GetSize());
|
||||
return bufferFactory(GetType(), GetSize(), GetUsageFlags(), mapper.GetPointer());
|
||||
}
|
||||
|
||||
Buffer::Buffer(BufferType type, UInt32 size, DataStorage storage, BufferUsageFlags usage) :
|
||||
Buffer(type)
|
||||
else
|
||||
{
|
||||
ErrorFlags flags(ErrorMode::ThrowException, true);
|
||||
|
||||
Create(size, storage, usage);
|
||||
// TODO: Implement GPU to CPU
|
||||
throw std::runtime_error("buffer is not mappable not implemented");
|
||||
}
|
||||
|
||||
bool Buffer::CopyContent(const Buffer& buffer)
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
NazaraAssert(buffer.IsValid(), "Invalid source buffer");
|
||||
|
||||
BufferMapper<Buffer> mapper(buffer, BufferAccess::ReadOnly);
|
||||
return Fill(mapper.GetPointer(), 0, buffer.GetSize());
|
||||
}
|
||||
|
||||
bool Buffer::Create(UInt32 size, DataStorage storage, BufferUsageFlags usage)
|
||||
{
|
||||
Destroy();
|
||||
|
||||
// Notre buffer est-il supporté ?
|
||||
if (!IsStorageSupported(storage))
|
||||
{
|
||||
NazaraError("Buffer storage not supported");
|
||||
return false;
|
||||
}
|
||||
|
||||
std::unique_ptr<AbstractBuffer> impl = s_bufferFactories[UnderlyingCast(storage)](this, m_type);
|
||||
if (!impl->Initialize(size, usage))
|
||||
{
|
||||
NazaraError("Failed to create buffer");
|
||||
return false;
|
||||
}
|
||||
|
||||
m_impl = std::move(impl);
|
||||
m_size = size;
|
||||
m_usage = usage;
|
||||
|
||||
return true; // Si on arrive ici c'est que tout s'est bien passé.
|
||||
}
|
||||
|
||||
void Buffer::Destroy()
|
||||
{
|
||||
m_impl.reset();
|
||||
}
|
||||
|
||||
bool Buffer::Fill(const void* data, UInt32 offset, UInt32 size)
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
NazaraAssert(offset + size <= m_size, "Exceeding buffer size");
|
||||
|
||||
return m_impl->Fill(data, offset, (size == 0) ? m_size - offset : size);
|
||||
}
|
||||
|
||||
void* Buffer::Map(BufferAccess access, UInt32 offset, UInt32 size)
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
NazaraAssert(offset + size <= m_size, "Exceeding buffer size");
|
||||
|
||||
return m_impl->Map(access, offset, (size == 0) ? m_size - offset : size);
|
||||
}
|
||||
|
||||
void* Buffer::Map(BufferAccess access, UInt32 offset, UInt32 size) const
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
NazaraAssert(access == BufferAccess::ReadOnly, "Buffer access must be read-only when used const");
|
||||
NazaraAssert(offset + size <= m_size, "Exceeding buffer size");
|
||||
|
||||
return m_impl->Map(access, offset, (size == 0) ? m_size - offset : size);
|
||||
}
|
||||
|
||||
bool Buffer::SetStorage(DataStorage storage)
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
|
||||
if (HasStorage(storage))
|
||||
return true;
|
||||
|
||||
if (!IsStorageSupported(storage))
|
||||
{
|
||||
NazaraError("Storage not supported");
|
||||
return false;
|
||||
}
|
||||
|
||||
void* ptr = m_impl->Map(BufferAccess::ReadOnly, 0, m_size);
|
||||
if (!ptr)
|
||||
{
|
||||
NazaraError("Failed to map buffer");
|
||||
return false;
|
||||
}
|
||||
|
||||
CallOnExit unmapMyImpl([this]()
|
||||
{
|
||||
m_impl->Unmap();
|
||||
});
|
||||
|
||||
std::unique_ptr<AbstractBuffer> impl(s_bufferFactories[UnderlyingCast(storage)](this, m_type));
|
||||
if (!impl->Initialize(m_size, m_usage))
|
||||
{
|
||||
NazaraError("Failed to create buffer");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!impl->Fill(ptr, 0, m_size))
|
||||
{
|
||||
NazaraError("Failed to fill buffer");
|
||||
return false;
|
||||
}
|
||||
|
||||
unmapMyImpl.CallAndReset();
|
||||
|
||||
m_impl = std::move(impl);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Buffer::Unmap() const
|
||||
{
|
||||
NazaraAssert(m_impl, "Invalid buffer");
|
||||
|
||||
if (!m_impl->Unmap())
|
||||
NazaraWarning("Failed to unmap buffer (it's content may be undefined)"); ///TODO: Unexpected ?
|
||||
}
|
||||
|
||||
bool Buffer::IsStorageSupported(DataStorage storage)
|
||||
{
|
||||
return s_bufferFactories[UnderlyingCast(storage)] != nullptr;
|
||||
}
|
||||
|
||||
void Buffer::SetBufferFactory(DataStorage storage, BufferFactory func)
|
||||
{
|
||||
s_bufferFactories[UnderlyingCast(storage)] = func;
|
||||
}
|
||||
|
||||
bool Buffer::Initialize()
|
||||
{
|
||||
SetBufferFactory(DataStorage::Software, [](Buffer* parent, BufferType type) -> std::unique_ptr<AbstractBuffer>
|
||||
{
|
||||
return std::make_unique<SoftwareBuffer>(parent, type);
|
||||
});
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void Buffer::Uninitialize()
|
||||
{
|
||||
std::fill(s_bufferFactories.begin(), s_bufferFactories.end(), nullptr);
|
||||
}
|
||||
|
||||
std::array<Buffer::BufferFactory, DataStorageCount> Buffer::s_bufferFactories;
|
||||
}
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ namespace Nz
|
|||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(false, header.num_tris*3, parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(false, 3 * header.num_tris, parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
// Extract triangles data
|
||||
std::vector<MD2_Triangle> triangles(header.num_tris);
|
||||
|
|
@ -117,7 +117,7 @@ namespace Nz
|
|||
stream.Read(&triangles[0], header.num_tris*sizeof(MD2_Triangle));
|
||||
|
||||
// And convert them into an index buffer
|
||||
BufferMapper<IndexBuffer> indexMapper(*indexBuffer, BufferAccess::DiscardAndWrite);
|
||||
BufferMapper<IndexBuffer> indexMapper(*indexBuffer, 0, indexBuffer->GetIndexCount());
|
||||
UInt16* index = static_cast<UInt16*>(indexMapper.GetPointer());
|
||||
|
||||
for (unsigned int i = 0; i < header.num_tris; ++i)
|
||||
|
|
@ -159,7 +159,7 @@ namespace Nz
|
|||
}
|
||||
#endif
|
||||
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, header.num_vertices, parameters.storage, parameters.vertexBufferFlags);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, header.num_vertices, parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
std::shared_ptr<StaticMesh> subMesh = std::make_shared<StaticMesh>(vertexBuffer, indexBuffer);
|
||||
|
||||
// Extracting vertices
|
||||
|
|
@ -187,7 +187,7 @@ namespace Nz
|
|||
scale *= ScaleAdjust;
|
||||
translate *= ScaleAdjust;
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::DiscardAndWrite);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
// Loading texture coordinates
|
||||
if (auto uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord))
|
||||
|
|
@ -247,7 +247,7 @@ namespace Nz
|
|||
|
||||
vertexMapper.Unmap();
|
||||
|
||||
subMesh->SetIndexBuffer(indexBuffer);
|
||||
subMesh->SetIndexBuffer(std::move(indexBuffer));
|
||||
subMesh->SetMaterialIndex(0);
|
||||
|
||||
subMesh->GenerateAABB();
|
||||
|
|
|
|||
|
|
@ -91,16 +91,16 @@ namespace Nz
|
|||
{
|
||||
const MD5MeshParser::Mesh& md5Mesh = meshes[i];
|
||||
|
||||
std::size_t indexCount = md5Mesh.triangles.size()*3;
|
||||
std::size_t vertexCount = md5Mesh.vertices.size();
|
||||
UInt64 indexCount = md5Mesh.triangles.size() * 3;
|
||||
UInt64 vertexCount = md5Mesh.vertices.size();
|
||||
|
||||
bool largeIndices = (vertexCount > std::numeric_limits<UInt16>::max());
|
||||
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, UInt32(indexCount), parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ_Normal_UV_Tangent_Skinning), UInt32(vertexCount), parameters.storage, parameters.vertexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(VertexDeclaration::Get(VertexLayout::XYZ_Normal_UV_Tangent_Skinning), UInt32(vertexCount), parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
// Index buffer
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::DiscardAndWrite);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
|
||||
// Le format définit un set de triangles nous permettant de retrouver facilement les indices
|
||||
// Cependant les sommets des triangles ne sont pas spécifiés dans le même ordre que ceux du moteur
|
||||
|
|
@ -128,7 +128,7 @@ namespace Nz
|
|||
|
||||
std::vector<Weight> tempWeights;
|
||||
|
||||
BufferMapper<VertexBuffer> vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
BufferMapper<VertexBuffer> vertexMapper(*vertexBuffer, 0, vertexBuffer->GetVertexCount());
|
||||
SkeletalMeshVertex* vertices = static_cast<SkeletalMeshVertex*>(vertexMapper.GetPointer());
|
||||
|
||||
for (const MD5MeshParser::Vertex& vertex : md5Mesh.vertices)
|
||||
|
|
@ -235,15 +235,15 @@ namespace Nz
|
|||
for (UInt32 i = 0; i < meshCount; ++i)
|
||||
{
|
||||
const MD5MeshParser::Mesh& md5Mesh = meshes[i];
|
||||
std::size_t indexCount = md5Mesh.triangles.size()*3;
|
||||
std::size_t vertexCount = md5Mesh.vertices.size();
|
||||
UInt64 indexCount = md5Mesh.triangles.size() * 3;
|
||||
UInt64 vertexCount = md5Mesh.vertices.size();
|
||||
|
||||
// Index buffer
|
||||
bool largeIndices = (vertexCount > std::numeric_limits<UInt16>::max());
|
||||
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, UInt32(indexCount), parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(largeIndices, indexCount, parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::DiscardAndWrite);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
IndexIterator index = indexMapper.begin();
|
||||
|
||||
for (const MD5MeshParser::Triangle& triangle : md5Mesh.triangles)
|
||||
|
|
@ -259,9 +259,9 @@ namespace Nz
|
|||
indexBuffer->Optimize();
|
||||
|
||||
// Vertex buffer
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, UInt32(vertexCount), parameters.storage, parameters.vertexBufferFlags);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, vertexCount, parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::DiscardAndWrite);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
// Vertex positions
|
||||
if (auto posPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Position))
|
||||
|
|
|
|||
|
|
@ -185,15 +185,15 @@ namespace Nz
|
|||
texCoords != nullptr && meshes != nullptr && meshCount > 0,
|
||||
"Invalid OBJParser output");
|
||||
|
||||
// Un conteneur temporaire pour contenir les indices de face avant triangulation
|
||||
std::vector<std::size_t> faceIndices(3); // Comme il y aura au moins trois sommets
|
||||
// Triangulation temporary vector
|
||||
std::vector<UInt32> faceIndices;
|
||||
for (std::size_t i = 0; i < meshCount; ++i)
|
||||
{
|
||||
std::size_t faceCount = meshes[i].faces.size();
|
||||
if (faceCount == 0)
|
||||
continue;
|
||||
|
||||
std::vector<std::size_t> indices;
|
||||
std::vector<UInt32> indices;
|
||||
indices.reserve(faceCount*3); // Pire cas si les faces sont des triangles
|
||||
|
||||
// Afin d'utiliser OBJParser::FaceVertex comme clé dans un unordered_map,
|
||||
|
|
@ -227,7 +227,7 @@ namespace Nz
|
|||
std::unordered_map<OBJParser::FaceVertex, unsigned int, FaceVertexHasher, FaceVertexComparator> vertices;
|
||||
vertices.reserve(meshes[i].vertices.size());
|
||||
|
||||
unsigned int vertexCount = 0;
|
||||
UInt32 vertexCount = 0;
|
||||
for (unsigned int j = 0; j < faceCount; ++j)
|
||||
{
|
||||
std::size_t faceVertexCount = meshes[i].faces[j].vertexCount;
|
||||
|
|
@ -254,13 +254,13 @@ namespace Nz
|
|||
}
|
||||
|
||||
// Création des buffers
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), std::size_t(indices.size()), parameters.storage, parameters.indexBufferFlags);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, std::size_t(vertexCount), parameters.storage, parameters.vertexBufferFlags);
|
||||
std::shared_ptr<IndexBuffer> indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indices.size(), parameters.indexBufferFlags, parameters.bufferFactory);
|
||||
std::shared_ptr<VertexBuffer> vertexBuffer = std::make_shared<VertexBuffer>(parameters.vertexDeclaration, vertexCount, parameters.vertexBufferFlags, parameters.bufferFactory);
|
||||
|
||||
// Remplissage des indices
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
for (std::size_t j = 0; j < indices.size(); ++j)
|
||||
indexMapper.Set(j, UInt32(indices[j]));
|
||||
indexMapper.Set(j, indices[j]);
|
||||
|
||||
indexMapper.Unmap(); // Pour laisser les autres tâches affecter l'index buffer
|
||||
|
||||
|
|
@ -277,7 +277,7 @@ namespace Nz
|
|||
bool hasNormals = true;
|
||||
bool hasTexCoords = true;
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::DiscardAndWrite);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
auto normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
auto posPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Position);
|
||||
|
|
@ -325,8 +325,8 @@ namespace Nz
|
|||
// Official .obj files have no vertex color, fill it with white
|
||||
if (auto colorPtr = vertexMapper.GetComponentPtr<Color>(VertexComponent::Color))
|
||||
{
|
||||
for (unsigned int i = 0; i < vertexCount; ++i)
|
||||
colorPtr[i] = Color::White;
|
||||
for (UInt32 j = 0; j < vertexCount; ++j)
|
||||
colorPtr[j] = Color::White;
|
||||
}
|
||||
|
||||
vertexMapper.Unmap();
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ namespace Nz
|
|||
OBJParser::Mesh* meshes = objFormat.SetMeshCount(meshCount);
|
||||
for (std::size_t i = 0; i < meshCount; ++i)
|
||||
{
|
||||
const StaticMesh& staticMesh = static_cast<const StaticMesh&>(*mesh.GetSubMesh(i));
|
||||
StaticMesh& staticMesh = static_cast<StaticMesh&>(*mesh.GetSubMesh(i));
|
||||
|
||||
std::size_t triangleCount = staticMesh.GetTriangleCount();
|
||||
|
||||
|
|
|
|||
|
|
@ -13,60 +13,79 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer)
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_endOffset(m_buffer->GetSize()),
|
||||
m_startOffset(0),
|
||||
m_largeIndices(largeIndices)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(largeIndices, std::move(buffer));
|
||||
NazaraAssert(m_buffer, "invalid buffer");
|
||||
NazaraAssert(m_buffer->GetType() == BufferType::Index, "buffer must be an index buffer");
|
||||
|
||||
m_endOffset = m_buffer->GetSize();
|
||||
m_indexCount = m_endOffset / GetStride();
|
||||
}
|
||||
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size)
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_endOffset(offset + size),
|
||||
m_startOffset(offset),
|
||||
m_largeIndices(largeIndices)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(largeIndices, std::move(buffer), offset, size);
|
||||
NazaraAssert(m_buffer, "invalid buffer");
|
||||
NazaraAssert(m_buffer->GetType() == BufferType::Index, "buffer must be an index buffer");
|
||||
NazaraAssert(size > 0, "invalid size");
|
||||
|
||||
m_indexCount = size / GetStride();
|
||||
}
|
||||
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, std::size_t length, DataStorage storage, BufferUsageFlags usage)
|
||||
IndexBuffer::IndexBuffer(bool largeIndices, UInt64 indexCount, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData) :
|
||||
m_indexCount(indexCount),
|
||||
m_startOffset(0),
|
||||
m_largeIndices(largeIndices)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(largeIndices, length, storage, usage);
|
||||
NazaraAssert(indexCount > 0, "invalid index count");
|
||||
|
||||
m_endOffset = indexCount * GetStride();
|
||||
m_buffer = bufferFactory(BufferType::Index, m_endOffset, usage, initialData);
|
||||
}
|
||||
|
||||
unsigned int IndexBuffer::ComputeCacheMissCount() const
|
||||
unsigned int IndexBuffer::ComputeCacheMissCount()
|
||||
{
|
||||
IndexMapper mapper(*this);
|
||||
|
||||
return Nz::ComputeCacheMissCount(mapper.begin(), m_indexCount);
|
||||
}
|
||||
|
||||
bool IndexBuffer::Fill(const void* data, std::size_t startIndex, std::size_t length)
|
||||
bool IndexBuffer::Fill(const void* data, UInt64 startIndex, UInt64 length)
|
||||
{
|
||||
std::size_t stride = GetStride();
|
||||
UInt64 stride = GetStride();
|
||||
|
||||
return FillRaw(data, startIndex*stride, length*stride);
|
||||
}
|
||||
|
||||
bool IndexBuffer::FillRaw(const void* data, std::size_t offset, std::size_t size)
|
||||
bool IndexBuffer::FillRaw(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Fill(data, m_startOffset+offset, size);
|
||||
}
|
||||
|
||||
void* IndexBuffer::MapRaw(BufferAccess access, std::size_t offset, std::size_t size)
|
||||
void* IndexBuffer::MapRaw(UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
return m_buffer->Map(offset, size);
|
||||
}
|
||||
|
||||
void* IndexBuffer::MapRaw(BufferAccess access, std::size_t offset, std::size_t size) const
|
||||
void* IndexBuffer::MapRaw(UInt64 offset, UInt64 size) const
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
return m_buffer->Map(offset, size);
|
||||
}
|
||||
|
||||
void IndexBuffer::Optimize()
|
||||
|
|
@ -76,55 +95,6 @@ namespace Nz
|
|||
OptimizeIndices(mapper.begin(), m_indexCount);
|
||||
}
|
||||
|
||||
void IndexBuffer::Reset()
|
||||
{
|
||||
m_buffer.reset();
|
||||
}
|
||||
|
||||
void IndexBuffer::Reset(bool largeIndices, std::shared_ptr<Buffer> buffer)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
|
||||
Reset(largeIndices, buffer, 0, buffer->GetSize());
|
||||
}
|
||||
|
||||
void IndexBuffer::Reset(bool largeIndices, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(buffer->GetType() == BufferType::Index, "Buffer must be an index buffer");
|
||||
NazaraAssert(size > 0, "Invalid size");
|
||||
NazaraAssert(offset + size > buffer->GetSize(), "Virtual buffer exceed buffer bounds");
|
||||
|
||||
std::size_t stride = static_cast<std::size_t>((largeIndices) ? sizeof(UInt32) : sizeof(UInt16));
|
||||
|
||||
m_buffer = buffer;
|
||||
m_endOffset = offset + size;
|
||||
m_indexCount = size / stride;
|
||||
m_largeIndices = largeIndices;
|
||||
m_startOffset = offset;
|
||||
}
|
||||
|
||||
void IndexBuffer::Reset(bool largeIndices, std::size_t length, DataStorage storage, BufferUsageFlags usage)
|
||||
{
|
||||
std::size_t stride = static_cast<std::size_t>((largeIndices) ? sizeof(UInt32) : sizeof(UInt16));
|
||||
|
||||
m_endOffset = length * stride;
|
||||
m_indexCount = length;
|
||||
m_largeIndices = largeIndices;
|
||||
m_startOffset = 0;
|
||||
|
||||
m_buffer = std::make_shared<Buffer>(BufferType::Index, m_endOffset, storage, usage);
|
||||
}
|
||||
|
||||
void IndexBuffer::Reset(const IndexBuffer& indexBuffer)
|
||||
{
|
||||
m_buffer = indexBuffer.m_buffer;
|
||||
m_endOffset = indexBuffer.m_endOffset;
|
||||
m_indexCount = indexBuffer.m_indexCount;
|
||||
m_largeIndices = indexBuffer.m_largeIndices;
|
||||
m_startOffset = indexBuffer.m_startOffset;
|
||||
}
|
||||
|
||||
void IndexBuffer::Unmap() const
|
||||
{
|
||||
m_buffer->Unmap();
|
||||
|
|
|
|||
|
|
@ -31,6 +31,12 @@ namespace Nz
|
|||
return ptr[i];
|
||||
}
|
||||
|
||||
UInt32 GetterError(const void*, std::size_t)
|
||||
{
|
||||
NazaraError("index buffer has no read flag");
|
||||
return 0;
|
||||
}
|
||||
|
||||
void Setter16(void* buffer, std::size_t i, UInt32 value)
|
||||
{
|
||||
UInt16* ptr = static_cast<UInt16*>(buffer);
|
||||
|
|
@ -45,54 +51,29 @@ namespace Nz
|
|||
|
||||
void SetterError(void*, std::size_t, UInt32)
|
||||
{
|
||||
NazaraError("Index buffer opened with read-only access");
|
||||
NazaraError("index buffer has no write flag");
|
||||
}
|
||||
}
|
||||
|
||||
IndexMapper::IndexMapper(IndexBuffer& indexBuffer, BufferAccess access, std::size_t indexCount) :
|
||||
IndexMapper::IndexMapper(IndexBuffer& indexBuffer, std::size_t indexCount) :
|
||||
m_indexCount((indexCount != 0) ? indexCount : indexBuffer.GetIndexCount())
|
||||
{
|
||||
if (!m_mapper.Map(indexBuffer, access))
|
||||
if (!m_mapper.Map(indexBuffer, 0, m_indexCount))
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexcepted
|
||||
|
||||
if (indexBuffer.HasLargeIndices())
|
||||
{
|
||||
m_getter = Getter32;
|
||||
if (access != BufferAccess::ReadOnly)
|
||||
m_setter = Setter32;
|
||||
if (indexBuffer.GetBuffer()->GetUsageFlags().Test(BufferUsage::Read))
|
||||
m_getter = (indexBuffer.HasLargeIndices()) ? Getter32 : Getter16;
|
||||
else
|
||||
m_getter = GetterError;
|
||||
|
||||
if (indexBuffer.GetBuffer()->GetUsageFlags().Test(BufferUsage::Write))
|
||||
m_setter = (indexBuffer.HasLargeIndices()) ? Setter32 : Setter16;
|
||||
else
|
||||
m_setter = SetterError;
|
||||
}
|
||||
else
|
||||
{
|
||||
m_getter = Getter16;
|
||||
if (access != BufferAccess::ReadOnly)
|
||||
m_setter = Setter16;
|
||||
else
|
||||
m_setter = SetterError;
|
||||
}
|
||||
}
|
||||
|
||||
IndexMapper::IndexMapper(SubMesh& subMesh, BufferAccess access) :
|
||||
IndexMapper(*subMesh.GetIndexBuffer(), access, (subMesh.GetIndexBuffer()) ? 0 : subMesh.GetVertexCount())
|
||||
{
|
||||
}
|
||||
|
||||
IndexMapper::IndexMapper(const IndexBuffer& indexBuffer, BufferAccess access, std::size_t indexCount) :
|
||||
m_setter(SetterError),
|
||||
m_indexCount((indexCount != 0) ? indexCount : indexBuffer.GetIndexCount())
|
||||
{
|
||||
if (!m_mapper.Map(indexBuffer, access))
|
||||
NazaraError("Failed to map buffer"); ///TODO: Unexcepted
|
||||
|
||||
if (indexBuffer.HasLargeIndices())
|
||||
m_getter = Getter32;
|
||||
else
|
||||
m_getter = Getter16;
|
||||
}
|
||||
|
||||
IndexMapper::IndexMapper(const SubMesh& subMesh, BufferAccess access) :
|
||||
IndexMapper(*subMesh.GetIndexBuffer(), access, (subMesh.GetIndexBuffer()) ? 0 : subMesh.GetVertexCount())
|
||||
IndexMapper::IndexMapper(SubMesh& subMesh) :
|
||||
IndexMapper(*subMesh.GetIndexBuffer(), (subMesh.GetIndexBuffer()) ? 0 : subMesh.GetVertexCount())
|
||||
{
|
||||
}
|
||||
|
||||
|
|
@ -127,11 +108,11 @@ namespace Nz
|
|||
|
||||
IndexIterator IndexMapper::begin()
|
||||
{
|
||||
return IndexIterator(this, 0);
|
||||
return {this, 0};
|
||||
}
|
||||
|
||||
IndexIterator IndexMapper::end()
|
||||
{
|
||||
return IndexIterator(this, m_indexCount); // Post-end
|
||||
return {this, m_indexCount}; // Post-end
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,20 +23,8 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
MeshParams::MeshParams()
|
||||
{
|
||||
if (!Buffer::IsStorageSupported(storage))
|
||||
storage = DataStorage::Software;
|
||||
}
|
||||
|
||||
bool MeshParams::IsValid() const
|
||||
{
|
||||
if (!Buffer::IsStorageSupported(storage))
|
||||
{
|
||||
NazaraError("Storage not supported");
|
||||
return false;
|
||||
}
|
||||
|
||||
if (matrix == Matrix4f::Zero())
|
||||
{
|
||||
NazaraError("Invalid matrix");
|
||||
|
|
@ -112,10 +100,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputeBoxIndexVertexCount(primitive.box.subdivision, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -123,7 +111,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GenerateBox(primitive.box.lengths, primitive.box.subdivision, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -134,10 +122,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputeConeIndexVertexCount(primitive.cone.subdivision, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -145,7 +133,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GenerateCone(primitive.cone.length, primitive.cone.radius, primitive.cone.subdivision, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -156,10 +144,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputePlaneIndexVertexCount(primitive.plane.subdivision, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -167,7 +155,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GeneratePlane(primitive.plane.subdivision, primitive.plane.size, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -182,10 +170,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputeCubicSphereIndexVertexCount(primitive.sphere.cubic.subdivision, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::ReadWrite);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -193,7 +181,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GenerateCubicSphere(primitive.sphere.size, primitive.sphere.cubic.subdivision, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -204,10 +192,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputeIcoSphereIndexVertexCount(primitive.sphere.ico.recursionLevel, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -215,7 +203,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GenerateIcoSphere(primitive.sphere.size, primitive.sphere.ico.recursionLevel, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -226,10 +214,10 @@ namespace Nz
|
|||
std::size_t vertexCount;
|
||||
ComputeUvSphereIndexVertexCount(primitive.sphere.uv.sliceCount, primitive.sphere.uv.stackCount, &indexCount, &vertexCount);
|
||||
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.storage, params.indexBufferFlags);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.storage, params.vertexBufferFlags);
|
||||
indexBuffer = std::make_shared<IndexBuffer>(vertexCount > std::numeric_limits<UInt16>::max(), indexCount, params.indexBufferFlags, params.bufferFactory);
|
||||
vertexBuffer = std::make_shared<VertexBuffer>(declaration, vertexCount, params.vertexBufferFlags, params.bufferFactory);
|
||||
|
||||
VertexMapper vertexMapper(*vertexBuffer, BufferAccess::WriteOnly);
|
||||
VertexMapper vertexMapper(*vertexBuffer);
|
||||
|
||||
VertexPointers pointers;
|
||||
pointers.normalPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Normal);
|
||||
|
|
@ -237,7 +225,7 @@ namespace Nz
|
|||
pointers.tangentPtr = vertexMapper.GetComponentPtr<Vector3f>(VertexComponent::Tangent);
|
||||
pointers.uvPtr = vertexMapper.GetComponentPtr<Vector2f>(VertexComponent::TexCoord);
|
||||
|
||||
IndexMapper indexMapper(*indexBuffer, BufferAccess::WriteOnly);
|
||||
IndexMapper indexMapper(*indexBuffer);
|
||||
GenerateUvSphere(primitive.sphere.size, primitive.sphere.uv.sliceCount, primitive.sphere.uv.stackCount, matrix, primitive.textureCoords, pointers, indexMapper.begin(), &aabb);
|
||||
break;
|
||||
}
|
||||
|
|
@ -613,7 +601,7 @@ namespace Nz
|
|||
{
|
||||
StaticMesh& staticMesh = static_cast<StaticMesh&>(*data.subMesh);
|
||||
|
||||
BufferMapper<VertexBuffer> mapper(*staticMesh.GetVertexBuffer(), BufferAccess::ReadWrite);
|
||||
BufferMapper<VertexBuffer> mapper(*staticMesh.GetVertexBuffer(), 0, staticMesh.GetVertexCount());
|
||||
MeshVertex* vertices = static_cast<MeshVertex*>(mapper.GetPointer());
|
||||
|
||||
Boxf aabb(vertices->position.x, vertices->position.y, vertices->position.z, 0.f, 0.f, 0.f);
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
SkeletalMesh::SkeletalMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<const IndexBuffer> indexBuffer) :
|
||||
SkeletalMesh::SkeletalMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<IndexBuffer> indexBuffer) :
|
||||
m_aabb(Nz::Boxf::Zero()),
|
||||
m_indexBuffer(std::move(indexBuffer)),
|
||||
m_vertexBuffer(std::move(vertexBuffer))
|
||||
|
|
@ -26,7 +26,7 @@ namespace Nz
|
|||
return AnimationType::Skeletal;
|
||||
}
|
||||
|
||||
const std::shared_ptr<const IndexBuffer>& SkeletalMesh::GetIndexBuffer() const
|
||||
const std::shared_ptr<IndexBuffer>& SkeletalMesh::GetIndexBuffer() const
|
||||
{
|
||||
return m_indexBuffer;
|
||||
}
|
||||
|
|
@ -58,7 +58,7 @@ namespace Nz
|
|||
OnSubMeshInvalidateAABB(this);
|
||||
}
|
||||
|
||||
void SkeletalMesh::SetIndexBuffer(std::shared_ptr<const IndexBuffer> indexBuffer)
|
||||
void SkeletalMesh::SetIndexBuffer(std::shared_ptr<IndexBuffer> indexBuffer)
|
||||
{
|
||||
m_indexBuffer = std::move(indexBuffer);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,8 +10,13 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
SoftwareBuffer::SoftwareBuffer(Buffer* /*parent*/, BufferType /*type*/)
|
||||
SoftwareBuffer::SoftwareBuffer(BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData) :
|
||||
Buffer(DataStorage::Software, type, size, usage | BufferUsage::DirectMapping | BufferUsage::Dynamic | BufferUsage::PersistentMapping | BufferUsage::Read | BufferUsage::Write),
|
||||
m_mapped(false)
|
||||
{
|
||||
m_buffer = std::make_unique<UInt8[]>(size);
|
||||
if (initialData)
|
||||
std::memcpy(&m_buffer[0], initialData, size);
|
||||
}
|
||||
|
||||
bool SoftwareBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
|
|
@ -22,40 +27,12 @@ namespace Nz
|
|||
return true;
|
||||
}
|
||||
|
||||
bool SoftwareBuffer::Initialize(UInt64 size, BufferUsageFlags /*usage*/)
|
||||
{
|
||||
// Protect the allocation to prevent a memory exception to escape the function
|
||||
try
|
||||
{
|
||||
m_buffer.resize(size);
|
||||
}
|
||||
catch (const std::exception& e)
|
||||
{
|
||||
NazaraError("Failed to allocate software buffer (" + std::string(e.what()) + ')');
|
||||
return false;
|
||||
}
|
||||
|
||||
m_mapped = false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
const UInt8* SoftwareBuffer::GetData() const
|
||||
{
|
||||
return m_buffer.data();
|
||||
return &m_buffer[0];
|
||||
}
|
||||
|
||||
UInt64 SoftwareBuffer::GetSize() const
|
||||
{
|
||||
return UInt64(m_buffer.size());
|
||||
}
|
||||
|
||||
DataStorage SoftwareBuffer::GetStorage() const
|
||||
{
|
||||
return DataStorage::Software;
|
||||
}
|
||||
|
||||
void* SoftwareBuffer::Map(BufferAccess /*access*/, UInt64 offset, UInt64 /*size*/)
|
||||
void* SoftwareBuffer::Map(UInt64 offset, UInt64 /*size*/)
|
||||
{
|
||||
NazaraAssert(!m_mapped, "Buffer is already mapped");
|
||||
|
||||
|
|
@ -72,4 +49,9 @@ namespace Nz
|
|||
|
||||
return true;
|
||||
}
|
||||
|
||||
std::shared_ptr<Buffer> SoftwareBufferFactory(BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData)
|
||||
{
|
||||
return std::make_shared<SoftwareBuffer>(type, size, usage, initialData);
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
StaticMesh::StaticMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<const IndexBuffer> indexBuffer) :
|
||||
StaticMesh::StaticMesh(std::shared_ptr<VertexBuffer> vertexBuffer, std::shared_ptr<IndexBuffer> indexBuffer) :
|
||||
m_aabb(Nz::Boxf::Zero()),
|
||||
m_indexBuffer(std::move(indexBuffer)),
|
||||
m_vertexBuffer(std::move(vertexBuffer))
|
||||
|
|
@ -37,7 +37,7 @@ namespace Nz
|
|||
bool StaticMesh::GenerateAABB()
|
||||
{
|
||||
// On lock le buffer pour itérer sur toutes les positions et composer notre AABB
|
||||
VertexMapper mapper(*m_vertexBuffer, BufferAccess::ReadOnly);
|
||||
VertexMapper mapper(*m_vertexBuffer);
|
||||
SetAABB(ComputeAABB(mapper.GetComponentPtr<const Vector3f>(VertexComponent::Position), m_vertexBuffer->GetVertexCount()));
|
||||
|
||||
return true;
|
||||
|
|
@ -53,7 +53,7 @@ namespace Nz
|
|||
return AnimationType::Static;
|
||||
}
|
||||
|
||||
const std::shared_ptr<const IndexBuffer>& StaticMesh::GetIndexBuffer() const
|
||||
const std::shared_ptr<IndexBuffer>& StaticMesh::GetIndexBuffer() const
|
||||
{
|
||||
return m_indexBuffer;
|
||||
}
|
||||
|
|
@ -85,7 +85,7 @@ namespace Nz
|
|||
OnSubMeshInvalidateAABB(this);
|
||||
}
|
||||
|
||||
void StaticMesh::SetIndexBuffer(std::shared_ptr<const IndexBuffer> indexBuffer)
|
||||
void StaticMesh::SetIndexBuffer(std::shared_ptr<IndexBuffer> indexBuffer)
|
||||
{
|
||||
m_indexBuffer = std::move(indexBuffer);
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,9 +8,9 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
TriangleIterator::TriangleIterator(PrimitiveMode primitiveMode, const IndexBuffer& indexBuffer) :
|
||||
TriangleIterator::TriangleIterator(PrimitiveMode primitiveMode, IndexBuffer& indexBuffer) :
|
||||
m_primitiveMode(primitiveMode),
|
||||
m_indexMapper(indexBuffer, BufferAccess::ReadOnly)
|
||||
m_indexMapper(indexBuffer)
|
||||
{
|
||||
m_currentIndex = 3;
|
||||
m_triangleIndices[0] = m_indexMapper.Get(0);
|
||||
|
|
@ -20,9 +20,9 @@ namespace Nz
|
|||
m_indexCount = m_indexMapper.GetIndexCount();
|
||||
}
|
||||
|
||||
TriangleIterator::TriangleIterator(const SubMesh& subMesh) :
|
||||
TriangleIterator::TriangleIterator(SubMesh& subMesh) :
|
||||
m_primitiveMode(subMesh.GetPrimitiveMode()),
|
||||
m_indexMapper(subMesh, BufferAccess::ReadOnly)
|
||||
m_indexMapper(subMesh)
|
||||
{
|
||||
m_currentIndex = 3;
|
||||
m_triangleIndices[0] = m_indexMapper.Get(0);
|
||||
|
|
|
|||
|
|
@ -10,85 +10,54 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
UniformBuffer::UniformBuffer(std::shared_ptr<Buffer> buffer)
|
||||
UniformBuffer::UniformBuffer(std::shared_ptr<Buffer> buffer) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_startOffset(0)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(std::move(buffer));
|
||||
NazaraAssert(m_buffer, "invalid buffer");
|
||||
NazaraAssert(m_buffer->GetType() == BufferType::Uniform, "buffer must be an uniform buffer");
|
||||
|
||||
m_endOffset = m_buffer->GetSize();
|
||||
}
|
||||
|
||||
UniformBuffer::UniformBuffer(std::shared_ptr<Buffer> buffer, UInt32 offset, UInt32 size)
|
||||
UniformBuffer::UniformBuffer(std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_endOffset(size),
|
||||
m_startOffset(offset)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(std::move(buffer), offset, size);
|
||||
}
|
||||
|
||||
UniformBuffer::UniformBuffer(UInt32 length, DataStorage storage, BufferUsageFlags usage)
|
||||
UniformBuffer::UniformBuffer(UInt64 size, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData) :
|
||||
m_endOffset(size),
|
||||
m_startOffset(0)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(length, storage, usage);
|
||||
NazaraAssert(size > 0, "invalid size");
|
||||
|
||||
m_buffer = bufferFactory(BufferType::Uniform, size, usage, initialData);
|
||||
}
|
||||
|
||||
bool UniformBuffer::Fill(const void* data, UInt32 offset, UInt32 size)
|
||||
bool UniformBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Fill(data, m_startOffset + offset, size);
|
||||
}
|
||||
|
||||
void* UniformBuffer::Map(BufferAccess access, UInt32 offset, UInt32 size)
|
||||
void* UniformBuffer::Map(UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
return m_buffer->Map(m_startOffset + offset, size);
|
||||
}
|
||||
|
||||
void* UniformBuffer::Map(BufferAccess access, UInt32 offset, UInt32 size) const
|
||||
void* UniformBuffer::Map(UInt64 offset, UInt64 size) const
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
}
|
||||
|
||||
void UniformBuffer::Reset()
|
||||
{
|
||||
m_buffer.reset();
|
||||
}
|
||||
|
||||
void UniformBuffer::Reset(std::shared_ptr<Buffer> buffer)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
|
||||
Reset(buffer, 0, buffer->GetSize());
|
||||
}
|
||||
|
||||
void UniformBuffer::Reset(std::shared_ptr<Buffer> buffer, UInt32 offset, UInt32 size)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(buffer->GetType() == BufferType::Uniform, "Buffer must be an uniform buffer");
|
||||
NazaraAssert(size > 0, "Invalid size");
|
||||
NazaraAssert(offset + size > buffer->GetSize(), "Virtual buffer exceed buffer bounds");
|
||||
|
||||
m_buffer = buffer;
|
||||
m_endOffset = offset + size;
|
||||
m_startOffset = offset;
|
||||
}
|
||||
|
||||
void UniformBuffer::Reset(UInt32 size, DataStorage storage, BufferUsageFlags usage)
|
||||
{
|
||||
m_endOffset = size;
|
||||
m_startOffset = 0;
|
||||
|
||||
m_buffer = std::make_shared<Buffer>(BufferType::Uniform, m_endOffset, storage, usage);
|
||||
}
|
||||
|
||||
void UniformBuffer::Reset(const UniformBuffer& uniformBuffer)
|
||||
{
|
||||
m_buffer = uniformBuffer.m_buffer;
|
||||
m_endOffset = uniformBuffer.m_endOffset;
|
||||
m_startOffset = uniformBuffer.m_startOffset;
|
||||
return m_buffer->Map(m_startOffset + offset, size);
|
||||
}
|
||||
|
||||
void UniformBuffer::Unmap() const
|
||||
|
|
|
|||
|
|
@ -42,9 +42,6 @@ namespace Nz
|
|||
{
|
||||
ECS::RegisterComponents();
|
||||
|
||||
if (!Buffer::Initialize())
|
||||
throw std::runtime_error("failed to initialize buffers");
|
||||
|
||||
if (!Font::Initialize())
|
||||
throw std::runtime_error("failed to initialize fonts");
|
||||
|
||||
|
|
@ -91,7 +88,6 @@ namespace Nz
|
|||
VertexDeclaration::Uninitialize();
|
||||
PixelFormatInfo::Uninitialize();
|
||||
Font::Uninitialize();
|
||||
Buffer::Uninitialize();
|
||||
}
|
||||
|
||||
AnimationLoader& Utility::GetAnimationLoader()
|
||||
|
|
|
|||
|
|
@ -9,123 +9,94 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer)
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_vertexDeclaration(std::move(vertexDeclaration)),
|
||||
m_startOffset(0)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(std::move(vertexDeclaration), std::move(buffer));
|
||||
NazaraAssert(m_buffer, "invalid buffer");
|
||||
NazaraAssert(m_buffer->GetType() == BufferType::Vertex, "buffer must be an vertex buffer");
|
||||
|
||||
m_endOffset = m_buffer->GetSize();
|
||||
m_vertexCount = (m_vertexDeclaration) ? m_endOffset / m_vertexDeclaration->GetStride() : 0;
|
||||
}
|
||||
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size)
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, UInt64 offset, UInt64 size) :
|
||||
m_buffer(std::move(buffer)),
|
||||
m_vertexDeclaration(std::move(vertexDeclaration)),
|
||||
m_endOffset(size),
|
||||
m_startOffset(offset)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(std::move(vertexDeclaration), std::move(buffer), offset, size);
|
||||
NazaraAssert(m_buffer, "invalid buffer");
|
||||
NazaraAssert(m_buffer->GetType() == BufferType::Vertex, "buffer must be an vertex buffer");
|
||||
|
||||
m_vertexCount = (m_vertexDeclaration) ? m_endOffset / m_vertexDeclaration->GetStride() : 0;
|
||||
}
|
||||
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::size_t length, DataStorage storage, BufferUsageFlags usage)
|
||||
VertexBuffer::VertexBuffer(std::shared_ptr<const VertexDeclaration> vertexDeclaration, UInt64 vertexCount, BufferUsageFlags usage, const BufferFactory& bufferFactory, const void* initialData) :
|
||||
m_vertexDeclaration(std::move(vertexDeclaration)),
|
||||
m_startOffset(0),
|
||||
m_vertexCount(vertexCount)
|
||||
{
|
||||
ErrorFlags(ErrorMode::ThrowException, true);
|
||||
Reset(std::move(vertexDeclaration), length, storage, usage);
|
||||
NazaraAssert(m_vertexDeclaration, "invalid vertex declaration");
|
||||
NazaraAssert(vertexCount > 0, "invalid vertex count");
|
||||
|
||||
m_endOffset = vertexCount * m_vertexDeclaration->GetStride();
|
||||
m_buffer = bufferFactory(BufferType::Vertex, m_endOffset, usage, initialData);
|
||||
}
|
||||
|
||||
bool VertexBuffer::Fill(const void* data, std::size_t startVertex, std::size_t length)
|
||||
bool VertexBuffer::Fill(const void* data, UInt64 startVertex, UInt64 length)
|
||||
{
|
||||
std::size_t stride = static_cast<std::size_t>(m_vertexDeclaration->GetStride());
|
||||
UInt64 stride = m_vertexDeclaration->GetStride();
|
||||
return FillRaw(data, startVertex*stride, length*stride);
|
||||
}
|
||||
|
||||
bool VertexBuffer::FillRaw(const void* data, std::size_t offset, std::size_t size)
|
||||
bool VertexBuffer::FillRaw(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Fill(data, m_startOffset + offset, size);
|
||||
}
|
||||
|
||||
void* VertexBuffer::Map(BufferAccess access, std::size_t startVertex, std::size_t length)
|
||||
void* VertexBuffer::Map(UInt64 startVertex, UInt64 length)
|
||||
{
|
||||
std::size_t stride = static_cast<std::size_t>(m_vertexDeclaration->GetStride());
|
||||
UInt64 stride = m_vertexDeclaration->GetStride();
|
||||
|
||||
return MapRaw(access, startVertex*stride, length*stride);
|
||||
return MapRaw(startVertex * stride, length * stride);
|
||||
}
|
||||
|
||||
void* VertexBuffer::Map(BufferAccess access, std::size_t startVertex, std::size_t length) const
|
||||
void* VertexBuffer::Map(UInt64 startVertex, UInt64 length) const
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_vertexDeclaration, "Invalid vertex declaration");
|
||||
|
||||
std::size_t stride = static_cast<std::size_t>(m_vertexDeclaration->GetStride());
|
||||
UInt64 stride = m_vertexDeclaration->GetStride();
|
||||
|
||||
return MapRaw(access, startVertex*stride, length*stride);
|
||||
return MapRaw(startVertex * stride, length * stride);
|
||||
}
|
||||
|
||||
void* VertexBuffer::MapRaw(BufferAccess access, std::size_t offset, std::size_t size)
|
||||
void* VertexBuffer::MapRaw(UInt64 offset, UInt64 size)
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
return m_buffer->Map(offset, size);
|
||||
}
|
||||
|
||||
void* VertexBuffer::MapRaw(BufferAccess access, std::size_t offset, std::size_t size) const
|
||||
void* VertexBuffer::MapRaw(UInt64 offset, UInt64 size) const
|
||||
{
|
||||
NazaraAssert(m_buffer && m_buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(m_buffer, "Invalid buffer");
|
||||
NazaraAssert(m_startOffset + offset + size <= m_endOffset, "Exceeding virtual buffer size");
|
||||
|
||||
return m_buffer->Map(access, offset, size);
|
||||
}
|
||||
|
||||
void VertexBuffer::Reset()
|
||||
{
|
||||
m_buffer.reset();
|
||||
m_vertexDeclaration.reset();
|
||||
}
|
||||
|
||||
void VertexBuffer::Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(buffer->GetType() == BufferType::Vertex, "Buffer must be a vertex buffer");
|
||||
|
||||
std::size_t size = buffer->GetSize();
|
||||
Reset(std::move(vertexDeclaration), std::move(buffer), 0, size);
|
||||
}
|
||||
|
||||
void VertexBuffer::Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::shared_ptr<Buffer> buffer, std::size_t offset, std::size_t size)
|
||||
{
|
||||
NazaraAssert(buffer && buffer->IsValid(), "Invalid buffer");
|
||||
NazaraAssert(size > 0, "Invalid size");
|
||||
NazaraAssert(offset + size <= buffer->GetSize(), "Virtual buffer exceed buffer bounds");
|
||||
|
||||
m_buffer = buffer;
|
||||
m_endOffset = offset + size;
|
||||
m_startOffset = offset;
|
||||
m_vertexCount = (vertexDeclaration) ? (size / static_cast<std::size_t>(vertexDeclaration->GetStride())) : 0;
|
||||
m_vertexDeclaration = vertexDeclaration;
|
||||
}
|
||||
|
||||
void VertexBuffer::Reset(std::shared_ptr<const VertexDeclaration> vertexDeclaration, std::size_t length, DataStorage storage, BufferUsageFlags usage)
|
||||
{
|
||||
m_endOffset = length * ((vertexDeclaration) ? static_cast<std::size_t>(vertexDeclaration->GetStride()) : 1);
|
||||
m_startOffset = 0;
|
||||
m_vertexCount = length;
|
||||
m_vertexDeclaration = std::move(vertexDeclaration);
|
||||
|
||||
m_buffer = std::make_shared<Buffer>(BufferType::Vertex, m_endOffset, storage, usage);
|
||||
}
|
||||
|
||||
void VertexBuffer::Reset(const VertexBuffer& vertexBuffer)
|
||||
{
|
||||
m_buffer = vertexBuffer.m_buffer;
|
||||
m_endOffset = vertexBuffer.m_endOffset;
|
||||
m_startOffset = vertexBuffer.m_startOffset;
|
||||
m_vertexCount = vertexBuffer.m_vertexCount;
|
||||
m_vertexDeclaration = vertexBuffer.m_vertexDeclaration;
|
||||
return m_buffer->Map(offset, size);
|
||||
}
|
||||
|
||||
void VertexBuffer::SetVertexDeclaration(std::shared_ptr<const VertexDeclaration> vertexDeclaration)
|
||||
{
|
||||
NazaraAssert(vertexDeclaration, "Invalid vertex declaration");
|
||||
|
||||
m_vertexCount = (m_endOffset - m_startOffset) / static_cast<std::size_t>(vertexDeclaration->GetStride());
|
||||
m_vertexCount = (m_endOffset - m_startOffset) / vertexDeclaration->GetStride();
|
||||
m_vertexDeclaration = std::move(vertexDeclaration);
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
VertexMapper::VertexMapper(SubMesh& subMesh, BufferAccess access)
|
||||
VertexMapper::VertexMapper(SubMesh& subMesh)
|
||||
{
|
||||
ErrorFlags flags(ErrorMode::ThrowException, true);
|
||||
|
||||
|
|
@ -39,49 +39,13 @@ namespace Nz
|
|||
NazaraInternalError("Animation type not handled (0x" + NumberToString(UnderlyingCast(subMesh.GetAnimationType()), 16) + ')');
|
||||
}
|
||||
|
||||
m_mapper.Map(*buffer, access);
|
||||
m_mapper.Map(*buffer, 0, buffer->GetVertexCount());
|
||||
}
|
||||
|
||||
VertexMapper::VertexMapper(VertexBuffer& vertexBuffer, BufferAccess access)
|
||||
VertexMapper::VertexMapper(VertexBuffer& vertexBuffer)
|
||||
{
|
||||
ErrorFlags flags(ErrorMode::ThrowException, true);
|
||||
m_mapper.Map(vertexBuffer, access);
|
||||
}
|
||||
|
||||
VertexMapper::VertexMapper(const SubMesh& subMesh, BufferAccess access)
|
||||
{
|
||||
ErrorFlags flags(ErrorMode::ThrowException, true);
|
||||
|
||||
std::shared_ptr<VertexBuffer> buffer = nullptr;
|
||||
switch (subMesh.GetAnimationType())
|
||||
{
|
||||
case AnimationType::Skeletal:
|
||||
{
|
||||
const SkeletalMesh& skeletalMesh = static_cast<const SkeletalMesh&>(subMesh);
|
||||
buffer = skeletalMesh.GetVertexBuffer();
|
||||
break;
|
||||
}
|
||||
|
||||
case AnimationType::Static:
|
||||
{
|
||||
const StaticMesh& staticMesh = static_cast<const StaticMesh&>(subMesh);
|
||||
buffer = staticMesh.GetVertexBuffer();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!buffer)
|
||||
{
|
||||
NazaraInternalError("Animation type not handled (0x" + NumberToString(UnderlyingCast(subMesh.GetAnimationType()), 16) + ')');
|
||||
}
|
||||
|
||||
m_mapper.Map(*buffer, access);
|
||||
}
|
||||
|
||||
VertexMapper::VertexMapper(const VertexBuffer& vertexBuffer, BufferAccess access)
|
||||
{
|
||||
ErrorFlags flags(ErrorMode::ThrowException, true);
|
||||
m_mapper.Map(vertexBuffer, access);
|
||||
m_mapper.Map(vertexBuffer, 0, vertexBuffer.GetVertexCount());
|
||||
}
|
||||
|
||||
VertexMapper::~VertexMapper() = default;
|
||||
|
|
|
|||
|
|
@ -4,6 +4,7 @@
|
|||
|
||||
#include <Nazara/VulkanRenderer/VulkanBuffer.hpp>
|
||||
#include <Nazara/Core/CallOnExit.hpp>
|
||||
#include <Nazara/VulkanRenderer/VulkanDevice.hpp>
|
||||
#include <Nazara/VulkanRenderer/Wrapper/CommandBuffer.hpp>
|
||||
#include <Nazara/VulkanRenderer/Wrapper/QueueHandle.hpp>
|
||||
#include <vma/vk_mem_alloc.h>
|
||||
|
|
@ -11,30 +12,11 @@
|
|||
|
||||
namespace Nz
|
||||
{
|
||||
VulkanBuffer::~VulkanBuffer()
|
||||
VulkanBuffer::VulkanBuffer(VulkanDevice& device, BufferType type, UInt64 size, BufferUsageFlags usage, const void* initialData) :
|
||||
RenderBuffer(device, type, size, usage),
|
||||
m_device(device)
|
||||
{
|
||||
vmaDestroyBuffer(m_device.GetMemoryAllocator(), m_buffer, m_allocation);
|
||||
}
|
||||
|
||||
bool VulkanBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
void* ptr = Map(BufferAccess::WriteOnly, offset, size);
|
||||
if (!ptr)
|
||||
return false;
|
||||
|
||||
Nz::CallOnExit unmapOnExit([this]() { Unmap(); });
|
||||
|
||||
std::memcpy(ptr, data, size);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VulkanBuffer::Initialize(UInt64 size, BufferUsageFlags usage)
|
||||
{
|
||||
m_size = size;
|
||||
m_usage = usage;
|
||||
|
||||
VkBufferUsageFlags bufferUsage = ToVulkan(m_type);
|
||||
VkBufferUsageFlags bufferUsage = ToVulkan(type);
|
||||
|
||||
if ((usage & BufferUsage::DirectMapping) == 0)
|
||||
bufferUsage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
|
||||
|
|
@ -60,27 +42,35 @@ namespace Nz
|
|||
|
||||
VkResult result = vmaCreateBuffer(m_device.GetMemoryAllocator(), &createInfo, &allocInfo, &m_buffer, &m_allocation, nullptr);
|
||||
if (result != VK_SUCCESS)
|
||||
throw std::runtime_error("failed to allocate buffer: " + TranslateVulkanError(result));
|
||||
|
||||
if (initialData)
|
||||
{
|
||||
NazaraError("Failed to allocate buffer: " + TranslateVulkanError(result));
|
||||
return false;
|
||||
if (!Fill(initialData, 0, size))
|
||||
throw std::runtime_error("failed to fill buffer");
|
||||
}
|
||||
}
|
||||
|
||||
VulkanBuffer::~VulkanBuffer()
|
||||
{
|
||||
vmaDestroyBuffer(m_device.GetMemoryAllocator(), m_buffer, m_allocation);
|
||||
}
|
||||
|
||||
bool VulkanBuffer::Fill(const void* data, UInt64 offset, UInt64 size)
|
||||
{
|
||||
void* ptr = Map(offset, size);
|
||||
if (!ptr)
|
||||
return false;
|
||||
|
||||
CallOnExit unmapOnExit([this]() { Unmap(); });
|
||||
|
||||
std::memcpy(ptr, data, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
UInt64 VulkanBuffer::GetSize() const
|
||||
void* VulkanBuffer::Map(UInt64 offset, UInt64 size)
|
||||
{
|
||||
return m_size;
|
||||
}
|
||||
|
||||
DataStorage VulkanBuffer::GetStorage() const
|
||||
{
|
||||
return DataStorage::Hardware;
|
||||
}
|
||||
|
||||
void* VulkanBuffer::Map(BufferAccess /*access*/, UInt64 offset, UInt64 size)
|
||||
{
|
||||
if (m_usage & BufferUsage::DirectMapping)
|
||||
if (GetUsageFlags() & BufferUsage::DirectMapping)
|
||||
{
|
||||
void* mappedPtr;
|
||||
VkResult result = vmaMapMemory(m_device.GetMemoryAllocator(), m_allocation, &mappedPtr);
|
||||
|
|
@ -112,13 +102,15 @@ namespace Nz
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
m_stagingBufferSize = size;
|
||||
|
||||
return allocationInfo.pMappedData;
|
||||
}
|
||||
}
|
||||
|
||||
bool VulkanBuffer::Unmap()
|
||||
{
|
||||
if (m_usage & BufferUsage::DirectMapping)
|
||||
if (GetUsageFlags() & BufferUsage::DirectMapping)
|
||||
{
|
||||
vmaUnmapMemory(m_device.GetMemoryAllocator(), m_allocation);
|
||||
return true;
|
||||
|
|
@ -129,7 +121,7 @@ namespace Nz
|
|||
if (!copyCommandBuffer->Begin(VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT))
|
||||
return false;
|
||||
|
||||
copyCommandBuffer->CopyBuffer(m_stagingBuffer, m_buffer, m_size);
|
||||
copyCommandBuffer->CopyBuffer(m_stagingBuffer, m_buffer, m_stagingBufferSize);
|
||||
if (!copyCommandBuffer->End())
|
||||
return false;
|
||||
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ namespace Nz
|
|||
m_currentSubpassIndex = 0;
|
||||
}
|
||||
|
||||
void VulkanCommandBufferBuilder::BindIndexBuffer(const AbstractBuffer& indexBuffer, UInt64 offset)
|
||||
void VulkanCommandBufferBuilder::BindIndexBuffer(const RenderBuffer& indexBuffer, UInt64 offset)
|
||||
{
|
||||
const VulkanBuffer& vkBuffer = static_cast<const VulkanBuffer&>(indexBuffer);
|
||||
|
||||
|
|
@ -105,7 +105,7 @@ namespace Nz
|
|||
m_commandBuffer.BindDescriptorSet(VK_PIPELINE_BIND_POINT_GRAPHICS, vkPipelineLayout.GetPipelineLayout(), set, vkBinding.GetDescriptorSet());
|
||||
}
|
||||
|
||||
void VulkanCommandBufferBuilder::BindVertexBuffer(UInt32 binding, const AbstractBuffer& vertexBuffer, UInt64 offset)
|
||||
void VulkanCommandBufferBuilder::BindVertexBuffer(UInt32 binding, const RenderBuffer& vertexBuffer, UInt64 offset)
|
||||
{
|
||||
const VulkanBuffer& vkBuffer = static_cast<const VulkanBuffer&>(vertexBuffer);
|
||||
|
||||
|
|
|
|||
|
|
@ -27,9 +27,9 @@ namespace Nz
|
|||
return m_enabledFeatures;
|
||||
}
|
||||
|
||||
std::shared_ptr<AbstractBuffer> VulkanDevice::InstantiateBuffer(BufferType type)
|
||||
std::shared_ptr<RenderBuffer> VulkanDevice::InstantiateBuffer(BufferType type, UInt64 size, BufferUsageFlags usageFlags, const void* initialData)
|
||||
{
|
||||
return std::make_shared<VulkanBuffer>(*this, type);
|
||||
return std::make_shared<VulkanBuffer>(*this, type, size, usageFlags, initialData);
|
||||
}
|
||||
|
||||
std::shared_ptr<CommandPool> VulkanDevice::InstantiateCommandPool(QueueType queueType)
|
||||
|
|
@ -56,7 +56,7 @@ namespace Nz
|
|||
{
|
||||
auto pipelineLayout = std::make_shared<VulkanRenderPipelineLayout>();
|
||||
if (!pipelineLayout->Create(*this, std::move(pipelineLayoutInfo)))
|
||||
throw std::runtime_error("failed to instanciate vulkan render pipeline layout");
|
||||
throw std::runtime_error("failed to instantiate vulkan render pipeline layout");
|
||||
|
||||
return pipelineLayout;
|
||||
}
|
||||
|
|
@ -65,7 +65,7 @@ namespace Nz
|
|||
{
|
||||
auto stage = std::make_shared<VulkanShaderModule>();
|
||||
if (!stage->Create(*this, stages, shaderAst, states))
|
||||
throw std::runtime_error("failed to instanciate vulkan shader module");
|
||||
throw std::runtime_error("failed to instantiate vulkan shader module");
|
||||
|
||||
return stage;
|
||||
}
|
||||
|
|
@ -74,7 +74,7 @@ namespace Nz
|
|||
{
|
||||
auto stage = std::make_shared<VulkanShaderModule>();
|
||||
if (!stage->Create(*this, stages, lang, source, sourceSize, states))
|
||||
throw std::runtime_error("failed to instanciate vulkan shader module");
|
||||
throw std::runtime_error("failed to instantiate vulkan shader module");
|
||||
|
||||
return stage;
|
||||
}
|
||||
|
|
|
|||
Loading…
Reference in New Issue