More code cleanup

This commit is contained in:
2020-10-18 21:56:30 +02:00
parent d50a14f9c9
commit 0790e40294
9 changed files with 328 additions and 241 deletions

View File

@@ -5,6 +5,9 @@
*/
#include "Renderer.hpp"
#include "Scene/VulkanGeometry.hpp"
#include "Scene/VulkanNode.hpp"
#include "Scene/VulkanShader.hpp"
#include <stdexcept>
namespace openVulkanoCpp::Vulkan

View File

@@ -0,0 +1,22 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
#include "ManagedResource.hpp"
#include "../../Base/Logger.hpp"
namespace openVulkanoCpp::Vulkan
{
void MemoryAllocation::HandleChildMappingValidation() const
{
if (IsChildMapped())
{
Logger::RENDER->error("A single memory allocation should only be mapped once! Mapping a single allocation multiple times might not work or might not work reliable with all driver implementations.");
#ifdef CRASH_ON_MULTIPLE_MAPPINGS_TO_SAME_ALLOCATION
throw std::runtime_error("A single memory allocation should only be mapped once!");
#endif
}
}
}

View File

@@ -1,3 +1,9 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
#pragma once
#define CRASH_ON_MULTIPLE_MAPPINGS_TO_SAME_ALLOCATION
@@ -27,21 +33,12 @@ namespace openVulkanoCpp
{
}
size_t FreeSpace() const
[[nodiscard]] size_t FreeSpace() const
{
return size - used;
}
void HandleChildMappingValidation() const
{
if (IsChildMapped())
{
Logger::RENDER->error("A single memory allocation should only be mapped once! Mapping a single allocation multiple times might not work or might not work reliable with all driver implementations.");
#ifdef CRASH_ON_MULTIPLE_MAPPINGS_TO_SAME_ALLOCATION
throw std::runtime_error("A single memory allocation should only be mapped once!");
#endif
}
}
void HandleChildMappingValidation() const;
void* Map()
{

View File

@@ -0,0 +1,223 @@
/*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at https://mozilla.org/MPL/2.0/.
*/
#include "ResourceManager.hpp"
#include "../Context.hpp"
#include "../Scene/VulkanShader.hpp"
#include "../Scene/VulkanGeometry.hpp"
#include "../Scene/VulkanNode.hpp"
namespace openVulkanoCpp::Vulkan
{
ResourceManager::ResourceManager() = default;
ResourceManager::~ResourceManager() noexcept
{
if (device) ResourceManager::Close();
}
void ResourceManager::Init(Context* context, int buffers)
{
this->context = context;
this->device = context->device->device;
this->buffers = buffers;
uniformBufferAlignment = context->device->properties.limits.minUniformBufferOffsetAlignment;
cmdPools = new vk::CommandPool[buffers];
cmdBuffers = new vk::CommandBuffer[buffers];
semaphores = new vk::Semaphore[buffers];
for (int i = 0; i < buffers; i++)
{
cmdPools[i] = this->device.createCommandPool({ {}, context->device->queueIndices.transfer });
cmdBuffers[i] = this->device.allocateCommandBuffers({ cmdPools[i], vk::CommandBufferLevel::ePrimary, 1 })[0];
semaphores[i] = this->device.createSemaphore({});
}
toFree.resize(buffers);
transferQueue = this->device.getQueue(context->device->queueIndices.transfer, 0);
}
void ResourceManager::Close()
{
transferQueue.waitIdle();
for (int i = 0; i < buffers; i++)
{
device.freeCommandBuffers(cmdPools[i], 1, &cmdBuffers[i]);
device.destroyCommandPool(cmdPools[0]);
}
for (auto shader : shaders)
{
shader->Close();
}
cmdBuffers = nullptr;
cmdPools = nullptr;
device = nullptr;
}
void ResourceManager::StartFrame(uint64_t frameId)
{
currentBuffer = frameId;
FreeBuffers();
device.resetCommandPool(cmdPools[currentBuffer], {});
cmdBuffers[currentBuffer].begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
}
vk::Semaphore ResourceManager::EndFrame()
{
cmdBuffers[currentBuffer].end();
vk::SubmitInfo si = { 0, nullptr, nullptr, 1, &cmdBuffers[currentBuffer], 1, &semaphores[currentBuffer] };
transferQueue.submit(1, &si, vk::Fence());
return semaphores[currentBuffer];
}
void ResourceManager::Resize()
{
for (auto shader : shaders)
{
shader->Resize();
}
}
void ResourceManager::PrepareGeometry(Scene::Geometry* geometry)
{
const std::unique_lock lock(mutex);
if(!geometry->renderGeo)
{
VulkanGeometry* vkGeometry = new VulkanGeometry();
ManagedBuffer* vertexBuffer = CreateDeviceOnlyBufferWithData(sizeof(Vertex) * geometry->GetVertexCount(), vk::BufferUsageFlagBits::eVertexBuffer, geometry->GetVertices());
ManagedBuffer* indexBuffer = CreateDeviceOnlyBufferWithData(Utils::EnumAsInt(geometry->indexType) * geometry->GetIndexCount(), vk::BufferUsageFlagBits::eIndexBuffer, geometry->GetIndices());
vkGeometry->Init(geometry, vertexBuffer->buffer, indexBuffer->buffer);
geometry->renderGeo = vkGeometry;
}
}
void ResourceManager::PrepareMaterial(Scene::Material* material)
{
const std::unique_lock lock(mutex);
if(!material->shader->renderShader)
{
material->shader->renderShader = CreateShader(material->shader);
}
}
void ResourceManager::PrepareNode(Scene::Node* node)
{
const std::unique_lock lock(mutex);
if (!node->renderNode)
{
UniformBuffer* uBuffer = new UniformBuffer();
ManagedBuffer* buffer;
VulkanNode* vkNode;
const vk::DeviceSize allocSize = Utils::Align(sizeof(glm::mat4x4), uniformBufferAlignment);
if (node->GetUpdateFrequency() != Scene::UpdateFrequency::Never)
{
vkNode = new VulkanNodeDynamic();
uint32_t imgs = context->swapChain.GetImageCount();
buffer = CreateBuffer(imgs * allocSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible);
buffer->Map();
}
else
{
vkNode = new VulkanNode();
buffer = CreateDeviceOnlyBufferWithData(sizeof(glm::mat4), vk::BufferUsageFlagBits::eUniformBuffer, &node->worldMat);
}
uBuffer->Init(buffer, allocSize, &context->pipeline.descriptorSetLayout, context->pipeline.pipelineLayout);
vkNode->Init(node, uBuffer);
node->renderNode = vkNode;
}
}
void ResourceManager::RemoveShader(VulkanShader* shader)
{
Utils::Remove(shaders, shader);
}
void ResourceManager::FreeBuffer(ManagedBuffer* buffer)
{
toFree[currentBuffer].push_back(buffer);
}
void ResourceManager::DoFreeBuffer(ManagedBuffer* buffer)
{
if (buffer->IsLast())
{
device.destroyBuffer(buffer->buffer);
buffer->allocation->used -= buffer->size;
}
else
{
recycleBuffers.push_back(buffer);
}
}
void ResourceManager::FreeBuffers()
{
for (auto& i : toFree[currentBuffer])
{
DoFreeBuffer(i);
}
toFree[currentBuffer].clear();
}
ManagedBuffer* ResourceManager::CreateDeviceOnlyBufferWithData(vk::DeviceSize size, vk::BufferUsageFlagBits usage, void* data)
{
ManagedBuffer* target = CreateBuffer(size, usage | vk::BufferUsageFlagBits::eTransferDst, vk::MemoryPropertyFlagBits::eDeviceLocal);
ManagedBuffer* uploadBuffer = CreateBuffer(size, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible);
uploadBuffer->Copy(data, size, 0);
RecordCopy(uploadBuffer->buffer, target->buffer, size);
FreeBuffer(uploadBuffer);
return target;
}
ManagedBuffer* ResourceManager::CreateBuffer(vk::DeviceSize size, const vk::BufferUsageFlags& usage, const vk::MemoryPropertyFlags& properties)
{
size = Utils::Align(size, 16);
const vk::BufferCreateInfo bufferCreateInfo = { {}, size, usage, vk::SharingMode::eExclusive };
vk::Buffer buffer = device.createBuffer(bufferCreateInfo);
const vk::MemoryRequirements memoryRequirements = device.getBufferMemoryRequirements(buffer);
uint32_t memtype = context->device->GetMemoryType(memoryRequirements.memoryTypeBits, properties);
if (memoryRequirements.size != size) Logger::DATA->warn("Memory Requirement Size ({0}) != Size ({1})", memoryRequirements.size, size);
MemoryAllocation* allocation = GetFreeMemoryAllocation(memoryRequirements.size, memtype);
uint32_t offset = allocation->used;
device.bindBufferMemory(buffer, allocation->memory, offset);
allocation->used += memoryRequirements.size;
return new ManagedBuffer{ allocation, offset, size, buffer, usage, properties, nullptr };
}
MemoryAllocation* ResourceManager::CreateMemoryAllocation(size_t size, uint32_t type, bool addToCache)
{
MemoryAllocation* alloc = new MemoryAllocation(size, type, device);
const vk::MemoryAllocateInfo allocInfo = { size, type };
alloc->memory = device.allocateMemory(allocInfo);
if (addToCache) allocations.push_back(alloc);
return alloc;
}
MemoryAllocation* ResourceManager::GetFreeMemoryAllocation(size_t size, uint32_t type, bool createIfAllFull)
{
MemoryAllocation* alloc = nullptr;
for (MemoryAllocation* allocation : allocations)
{
if (allocation->type == type && allocation->FreeSpace() >= size)
{
alloc = allocation;
break;
}
}
if(!alloc && createIfAllFull) alloc = CreateMemoryAllocation(256 * 1024 * 1024, type, true);
if(alloc) lastAllocation = alloc;
return alloc;
}
VulkanShader* ResourceManager::CreateShader(Scene::Shader* shader)
{
VulkanShader* vkShader = new VulkanShader();
vkShader->Init(context, shader, this);
shaders.push_back(vkShader);
return vkShader;
}
}

View File

@@ -7,16 +7,20 @@
#pragma once
#include "vulkan/vulkan.hpp"
#include "../Device.hpp"
#include "../../Base/ICloseable.hpp"
#include "../Scene/VulkanShader.hpp"
#include "IShaderOwner.hpp"
#include "../Scene/VulkanGeometry.hpp"
#include "ManagedResource.hpp"
#include "../Scene/VulkanNode.hpp"
#include <mutex>
namespace openVulkanoCpp
{
namespace Scene
{
class Node;
class Geometry;
class Material;
}
namespace Vulkan
{
class ResourceManager : virtual public ICloseable, virtual public IShaderOwner
@@ -38,226 +42,50 @@ namespace openVulkanoCpp
int buffers = -1, currentBuffer = -1;
public:
ResourceManager() = default;
virtual ~ResourceManager() { if (device) ResourceManager::Close(); }
ResourceManager();
virtual ~ResourceManager() noexcept;
void Init(Context* context, int buffers = 2)
{
this->context = context;
this->device = context->device->device;
this->buffers = buffers;
void Init(Context* context, int buffers = 2);
uniformBufferAlignment = context->device->properties.limits.minUniformBufferOffsetAlignment;
void Close() override;
cmdPools = new vk::CommandPool[buffers];
cmdBuffers = new vk::CommandBuffer[buffers];
semaphores = new vk::Semaphore[buffers];
for (int i = 0; i < buffers; i++)
{
cmdPools[i] = this->device.createCommandPool({ {}, context->device->queueIndices.transfer });
cmdBuffers[i] = this->device.allocateCommandBuffers({ cmdPools[i], vk::CommandBufferLevel::ePrimary, 1 })[0];
semaphores[i] = this->device.createSemaphore({});
}
toFree.resize(buffers);
void StartFrame(uint64_t frameId);
transferQueue = this->device.getQueue(context->device->queueIndices.transfer, 0);
}
vk::Semaphore EndFrame();
void Close() override
{
transferQueue.waitIdle();
for (int i = 0; i < buffers; i++)
{
device.freeCommandBuffers(cmdPools[i], 1, &cmdBuffers[i]);
device.destroyCommandPool(cmdPools[0]);
}
for (auto shader : shaders)
{
shader->Close();
}
cmdBuffers = nullptr;
cmdPools = nullptr;
device = nullptr;
}
void Resize();
void StartFrame(uint64_t frameId)
{
currentBuffer = frameId;
FreeBuffers();
device.resetCommandPool(cmdPools[currentBuffer], {});
cmdBuffers[currentBuffer].begin({ vk::CommandBufferUsageFlagBits::eOneTimeSubmit });
}
void PrepareGeometry(Scene::Geometry* geometry);
vk::Semaphore EndFrame()
{
cmdBuffers[currentBuffer].end();
vk::SubmitInfo si = { 0, nullptr, nullptr, 1, &cmdBuffers[currentBuffer], 1, &semaphores[currentBuffer] };
transferQueue.submit(1, &si, vk::Fence());
return semaphores[currentBuffer];
}
void PrepareMaterial(Scene::Material* material);
void Resize()
{
for (auto shader : shaders)
{
shader->Resize();
}
}
void PrepareNode(Scene::Node* node);
void PrepareGeometry(Scene::Geometry* geometry)
{
mutex.lock();
if(!geometry->renderGeo)
{
VulkanGeometry* vkGeometry = new VulkanGeometry();
ManagedBuffer* vertexBuffer = CreateDeviceOnlyBufferWithData(sizeof(Vertex) * geometry->GetVertexCount(), vk::BufferUsageFlagBits::eVertexBuffer, geometry->GetVertices());
ManagedBuffer* indexBuffer = CreateDeviceOnlyBufferWithData(Utils::EnumAsInt(geometry->indexType) * geometry->GetIndexCount(), vk::BufferUsageFlagBits::eIndexBuffer, geometry->GetIndices());
vkGeometry->Init(geometry, vertexBuffer->buffer, indexBuffer->buffer);
geometry->renderGeo = vkGeometry;
}
mutex.unlock();
}
void PrepareMaterial(Scene::Material* material)
{
mutex.lock();
if(!material->shader->renderShader)
{
material->shader->renderShader = CreateShader(material->shader);
}
mutex.unlock();
}
void PrepareNode(Scene::Node* node)
{
mutex.lock();
if (!node->renderNode)
{
UniformBuffer* uBuffer = new UniformBuffer();
ManagedBuffer* buffer;
VulkanNode* vkNode;
const vk::DeviceSize allocSize = aligned(sizeof(glm::mat4x4), uniformBufferAlignment);
if (node->GetUpdateFrequency() != Scene::UpdateFrequency::Never)
{
vkNode = new VulkanNodeDynamic();
uint32_t imgs = context->swapChain.GetImageCount();
buffer = CreateBuffer(imgs * allocSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible);
buffer->Map();
}
else
{
vkNode = new VulkanNode();
buffer = CreateDeviceOnlyBufferWithData(sizeof(glm::mat4), vk::BufferUsageFlagBits::eUniformBuffer, &node->worldMat);
}
uBuffer->Init(buffer, allocSize, &context->pipeline.descriptorSetLayout, context->pipeline.pipelineLayout);
vkNode->Init(node, uBuffer);
node->renderNode = vkNode;
}
mutex.unlock();
}
void RemoveShader(VulkanShader* shader) override
{
Utils::Remove(shaders, shader);
}
void RemoveShader(VulkanShader* shader) override;
protected: // Allocation management
static vk::DeviceSize aligned(vk::DeviceSize size, vk::DeviceSize byteAlignment)
{
return (size + byteAlignment - 1) & ~(byteAlignment - 1);
}
void FreeBuffer(ManagedBuffer* buffer);
void FreeBuffer(ManagedBuffer* buffer)
{
toFree[currentBuffer].push_back(buffer);
}
void DoFreeBuffer(ManagedBuffer* buffer);
void DoFreeBuffer(ManagedBuffer* buffer)
{
if (buffer->IsLast())
{
device.destroyBuffer(buffer->buffer);
buffer->allocation->used -= buffer->size;
}
else
{
recycleBuffers.push_back(buffer);
}
}
void FreeBuffers();
void FreeBuffers()
{
for (auto& i : toFree[currentBuffer])
{
DoFreeBuffer(i);
}
toFree[currentBuffer].clear();
}
ManagedBuffer* CreateDeviceOnlyBufferWithData(vk::DeviceSize size, vk::BufferUsageFlagBits usage, void* data);
ManagedBuffer* CreateDeviceOnlyBufferWithData(vk::DeviceSize size, vk::BufferUsageFlagBits usage, void* data)
{
ManagedBuffer* target = CreateBuffer(size, usage | vk::BufferUsageFlagBits::eTransferDst, vk::MemoryPropertyFlagBits::eDeviceLocal);
ManagedBuffer* uploadBuffer = CreateBuffer(size, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostCoherent | vk::MemoryPropertyFlagBits::eHostVisible);
uploadBuffer->Copy(data, size, 0);
RecordCopy(uploadBuffer->buffer, target->buffer, size);
FreeBuffer(uploadBuffer);
return target;
}
void RecordCopy(vk::Buffer src, vk::Buffer dest, vk::DeviceSize size) const
inline void RecordCopy(vk::Buffer src, vk::Buffer dest, vk::DeviceSize size) const
{
vk::BufferCopy copyRegion = { 0, 0, size };
cmdBuffers[currentBuffer].copyBuffer(src, dest, 1, &copyRegion);
}
ManagedBuffer* CreateBuffer(vk::DeviceSize size, const vk::BufferUsageFlags& usage, const vk::MemoryPropertyFlags& properties)
{
size = aligned(size, uniformBufferAlignment);
const vk::BufferCreateInfo bufferCreateInfo = { {}, size, usage, vk::SharingMode::eExclusive };
vk::Buffer buffer = device.createBuffer(bufferCreateInfo);
const vk::MemoryRequirements memoryRequirements = device.getBufferMemoryRequirements(buffer);
uint32_t memtype = context->device->GetMemoryType(memoryRequirements.memoryTypeBits, properties);
if (memoryRequirements.size != size) Logger::DATA->warn("Memory Requirement Size ({0}) != Size ({1})", memoryRequirements.size, size);
MemoryAllocation* allocation = GetFreeMemoryAllocation(memoryRequirements.size, memtype);
uint32_t offset = allocation->used;
device.bindBufferMemory(buffer, allocation->memory, offset);
allocation->used += memoryRequirements.size;
return new ManagedBuffer{ allocation, offset, size, buffer, usage, properties, nullptr };
}
ManagedBuffer* CreateBuffer(vk::DeviceSize size, const vk::BufferUsageFlags& usage, const vk::MemoryPropertyFlags& properties);
MemoryAllocation* CreateMemoryAllocation(size_t size, uint32_t type, bool addToCache = true)
{
MemoryAllocation* alloc = new MemoryAllocation(size, type, device);
const vk::MemoryAllocateInfo allocInfo = { size, type };
alloc->memory = device.allocateMemory(allocInfo);
if (addToCache) allocations.push_back(alloc);
return alloc;
}
MemoryAllocation* CreateMemoryAllocation(size_t size, uint32_t type, bool addToCache = true);
MemoryAllocation* GetFreeMemoryAllocation(size_t size, uint32_t type, bool createIfAllFull = true)
{
MemoryAllocation* alloc = nullptr;
for (MemoryAllocation* allocation : allocations)
{
if (allocation->type == type && allocation->FreeSpace() >= size)
{
alloc = allocation;
break;
}
}
if(!alloc && createIfAllFull) alloc = CreateMemoryAllocation(256 * 1024 * 1024, type, true);
if(alloc) lastAllocation = alloc;
return alloc;
}
MemoryAllocation* GetFreeMemoryAllocation(size_t size, uint32_t type, bool createIfAllFull = true);
public:
VulkanShader* CreateShader(Scene::Shader* shader)
{
VulkanShader* vkShader = new VulkanShader();
vkShader->Init(context, shader, this);
shaders.push_back(vkShader);
return vkShader;
}
VulkanShader* CreateShader(Scene::Shader* shader);
};
}
}

View File

@@ -6,8 +6,9 @@
#include "VulkanShader.hpp"
#include "../Context.hpp"
#include "../Device.hpp"
#include "../../Scene/Vertex.hpp"
#include "../../Scene/Shader.hpp"
#include "../Resources/IShaderOwner.hpp"
namespace openVulkanoCpp::Vulkan
{

View File

@@ -7,37 +7,45 @@
#pragma once
#include <vulkan/vulkan.hpp>
#include "../../Scene/Shader.hpp"
#include "../../Base/ICloseable.hpp"
#include "../Resources/IShaderOwner.hpp"
#include "IRecordable.hpp"
namespace openVulkanoCpp::Vulkan
namespace openVulkanoCpp
{
class Context;
struct VulkanShader final : virtual public ICloseable, virtual public IRecordable
namespace Scene
{
Scene::Shader* shader = nullptr;
vk::Device device;
vk::ShaderModule shaderModuleVertex, shaderModuleFragment;
vk::Pipeline pipeline;
IShaderOwner* owner;
Context* context;
class Shader;
}
VulkanShader() = default;
~VulkanShader() override { if (shader) VulkanShader::Close(); }
namespace Vulkan
{
class Context;
class IShaderOwner;
void Init(Context* context, Scene::Shader* shader, IShaderOwner* owner);
struct VulkanShader final : virtual public ICloseable, virtual public IRecordable
{
Scene::Shader* shader = nullptr;
vk::Device device;
vk::ShaderModule shaderModuleVertex, shaderModuleFragment;
vk::Pipeline pipeline;
IShaderOwner* owner;
Context* context;
void Resize();
VulkanShader() = default;
void Record(vk::CommandBuffer& cmdBuffer, uint32_t bufferId) override;
~VulkanShader() override
{ if (shader) VulkanShader::Close(); }
void Close() override;
void Init(Context* context, Scene::Shader* shader, IShaderOwner* owner);
private:
void BuildPipeline();
};
void Resize();
}
void Record(vk::CommandBuffer& cmdBuffer, uint32_t bufferId) override;
void Close() override;
private:
void BuildPipeline();
};
}
}