-
Tobias Frisch authored
Signed-off-by:
Tobias Frisch <tfrisch@uni-koblenz.de>
Tobias Frisch authoredSigned-off-by:
Tobias Frisch <tfrisch@uni-koblenz.de>
Context.cpp 11.04 KiB
#include "vkcv/Context.hpp"
#include "vkcv/Window.hpp"
#include "vkcv/Core.hpp"
namespace vkcv
{
Context::Context(Context &&other) noexcept:
m_Instance(other.m_Instance),
m_PhysicalDevice(other.m_PhysicalDevice),
m_Device(other.m_Device),
m_FeatureManager(std::move(other.m_FeatureManager)),
m_QueueManager(std::move(other.m_QueueManager)),
m_Allocator(other.m_Allocator)
{
other.m_Instance = nullptr;
other.m_PhysicalDevice = nullptr;
other.m_Device = nullptr;
other.m_Allocator = nullptr;
}
Context & Context::operator=(Context &&other) noexcept
{
m_Instance = other.m_Instance;
m_PhysicalDevice = other.m_PhysicalDevice;
m_Device = other.m_Device;
m_FeatureManager = std::move(other.m_FeatureManager);
m_QueueManager = std::move(other.m_QueueManager);
m_Allocator = other.m_Allocator;
other.m_Instance = nullptr;
other.m_PhysicalDevice = nullptr;
other.m_Device = nullptr;
other.m_Allocator = nullptr;
return *this;
}
Context::Context(vk::Instance instance,
vk::PhysicalDevice physicalDevice,
vk::Device device,
FeatureManager&& featureManager,
QueueManager&& queueManager,
vma::Allocator&& allocator) noexcept :
m_Instance(instance),
m_PhysicalDevice(physicalDevice),
m_Device(device),
m_FeatureManager(std::move(featureManager)),
m_QueueManager(std::move(queueManager)),
m_Allocator(allocator)
{}
Context::~Context() noexcept
{
m_Allocator.destroy();
m_Device.destroy();
m_Instance.destroy();
}
const vk::Instance &Context::getInstance() const
{
return m_Instance;
}
const vk::PhysicalDevice &Context::getPhysicalDevice() const
{
return m_PhysicalDevice;
}
const vk::Device &Context::getDevice() const
{
return m_Device;
}
const FeatureManager& Context::getFeatureManager() const {
return m_FeatureManager;
}
const QueueManager& Context::getQueueManager() const {
return m_QueueManager;
}
const vma::Allocator& Context::getAllocator() const {
return m_Allocator;
}
/**
* @brief The physical device is evaluated by three categories:
* discrete GPU vs. integrated GPU, amount of queues and its abilities, and VRAM.physicalDevice.
* @param physicalDevice The physical device
* @return Device score as integer
*/
int deviceScore(const vk::PhysicalDevice& physicalDevice)
{
int score = 0;
vk::PhysicalDeviceProperties properties = physicalDevice.getProperties();
std::vector<vk::QueueFamilyProperties> qFamilyProperties = physicalDevice.getQueueFamilyProperties();
// for every queue family compute queue flag bits and the amount of queues
for (const auto& qFamily : qFamilyProperties) {
uint32_t qCount = qFamily.queueCount;
uint32_t bitCount = (static_cast<uint32_t>(qFamily.queueFlags & vk::QueueFlagBits::eCompute) != 0)
+ (static_cast<uint32_t>(qFamily.queueFlags & vk::QueueFlagBits::eGraphics) != 0)
+ (static_cast<uint32_t>(qFamily.queueFlags & vk::QueueFlagBits::eTransfer) != 0)
+ (static_cast<uint32_t>(qFamily.queueFlags & vk::QueueFlagBits::eSparseBinding) != 0);
score += static_cast<int>(qCount * bitCount);
}
// compute the VRAM of the physical device
vk::PhysicalDeviceMemoryProperties memoryProperties = physicalDevice.getMemoryProperties();
auto vram = static_cast<int>(memoryProperties.memoryHeaps[0].size / static_cast<uint32_t>(1E9));
score *= vram;
if (properties.deviceType == vk::PhysicalDeviceType::eDiscreteGpu) {
score *= 2;
}
else if (properties.deviceType != vk::PhysicalDeviceType::eIntegratedGpu) {
score = -1;
}
return score;
}
/**
* @brief All existing physical devices will be evaluated by deviceScore.
* @param instance The instance
* @param physicalDevice The optimal physical device
* @return Returns if a suitable GPU is found as physical device
* @see Context.deviceScore
*/
bool pickPhysicalDevice(const vk::Instance& instance, vk::PhysicalDevice& physicalDevice)
{
const std::vector<vk::PhysicalDevice>& devices = instance.enumeratePhysicalDevices();
if (devices.empty()) {
vkcv_log(LogLevel::ERROR, "Failed to find GPUs with Vulkan support");
return false;
}
int max_score = -1;
for (const auto& device : devices) {
int score = deviceScore(device);
if (score > max_score) {
max_score = score;
physicalDevice = device;
}
}
if (max_score == -1) {
vkcv_log(LogLevel::ERROR, "Failed to find a suitable GPU");
return false;
} else {
return true;
}
}
/**
* @brief Check whether all string occurrences in "check" are contained in "supported"
* @param supported The const vector const char* reference used to compare entries in "check"
* @param check The const vector const char* reference elements to be checked by "supported"
* @return True, if all elements in "check" are supported (contained in supported)
*/
bool checkSupport(const std::vector<const char*>& supported, const std::vector<const char*>& check)
{
for (auto checkElem : check) {
bool found = false;
for (auto supportedElem : supported) {
if (strcmp(supportedElem, checkElem) == 0) {
found = true;
break;
}
}
if (!found)
return false;
}
return true;
}
std::vector<std::string> getRequiredExtensions() {
std::vector<std::string> extensions = Window::getExtensions();
#ifndef NDEBUG
extensions.emplace_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME);
#endif
return extensions;
}
Context Context::create(const char *applicationName,
uint32_t applicationVersion,
const std::vector<vk::QueueFlagBits>& queueFlags,
const Features& features,
const std::vector<const char*>& instanceExtensions) {
// check for layer support
const std::vector<vk::LayerProperties>& layerProperties = vk::enumerateInstanceLayerProperties();
std::vector<const char*> supportedLayers;
supportedLayers.reserve(layerProperties.size());
for (auto& elem : layerProperties) {
supportedLayers.push_back(elem.layerName);
}
// if in debug mode, check if validation layers are supported. Enable them if supported
#ifndef NDEBUG
std::vector<const char*> validationLayers = {
"VK_LAYER_KHRONOS_validation"
};
if (!checkSupport(supportedLayers, validationLayers)) {
throw std::runtime_error("Validation layers requested but not available!");
}
#endif
// check for instance extension support
std::vector<vk::ExtensionProperties> instanceExtensionProperties = vk::enumerateInstanceExtensionProperties();
std::vector<const char*> supportedExtensions;
supportedExtensions.reserve(instanceExtensionProperties.size());
for (auto& elem : instanceExtensionProperties) {
supportedExtensions.push_back(elem.extensionName);
}
// for GLFW: get all required extensions
auto requiredStrings = getRequiredExtensions();
std::vector<const char*> requiredExtensions;
for (const auto& extension : requiredStrings) {
requiredExtensions.push_back(extension.c_str());
}
requiredExtensions.insert(requiredExtensions.end(), instanceExtensions.begin(), instanceExtensions.end());
if (!checkSupport(supportedExtensions, requiredExtensions)) {
throw std::runtime_error("The requested instance extensions are not supported!");
}
const vk::ApplicationInfo applicationInfo(
applicationName,
applicationVersion,
VKCV_FRAMEWORK_NAME,
VKCV_FRAMEWORK_VERSION,
VK_HEADER_VERSION_COMPLETE
);
vk::InstanceCreateInfo instanceCreateInfo(
vk::InstanceCreateFlags(),
&applicationInfo,
0,
nullptr,
static_cast<uint32_t>(requiredExtensions.size()),
requiredExtensions.data()
);
#ifndef NDEBUG
instanceCreateInfo.enabledLayerCount = static_cast<uint32_t>(validationLayers.size());
instanceCreateInfo.ppEnabledLayerNames = validationLayers.data();
#endif
vk::Instance instance = vk::createInstance(instanceCreateInfo);
std::vector<vk::PhysicalDevice> physicalDevices = instance.enumeratePhysicalDevices();
vk::PhysicalDevice physicalDevice;
if (!pickPhysicalDevice(instance, physicalDevice)) {
throw std::runtime_error("Picking suitable GPU as physical device failed!");
}
FeatureManager featureManager (physicalDevice);
#ifdef __APPLE__
featureManager.useExtension("VK_KHR_portability_subset", true);
#endif
if (featureManager.useExtension(VK_KHR_SHADER_FLOAT16_INT8_EXTENSION_NAME, false)) {
featureManager.useFeatures<vk::PhysicalDeviceShaderFloat16Int8Features>(
[](vk::PhysicalDeviceShaderFloat16Int8Features& features) {
features.setShaderFloat16(true);
}, false);
}
if (featureManager.useExtension(VK_KHR_16BIT_STORAGE_EXTENSION_NAME, false)) {
featureManager.useFeatures<vk::PhysicalDevice16BitStorageFeatures>(
[](vk::PhysicalDevice16BitStorageFeatures& features) {
features.setStorageBuffer16BitAccess(true);
}, false);
}
featureManager.useFeatures([](vk::PhysicalDeviceFeatures& features) {
features.setFragmentStoresAndAtomics(true);
features.setGeometryShader(true);
features.setDepthClamp(true);
features.setShaderInt16(true);
});
for (const auto& feature : features.getList()) {
feature(featureManager);
}
const auto& extensions = featureManager.getActiveExtensions();
std::vector<vk::DeviceQueueCreateInfo> qCreateInfos;
std::vector<float> qPriorities;
qPriorities.resize(queueFlags.size(), 1.f);
std::vector<std::pair<int, int>> queuePairsGraphics, queuePairsCompute, queuePairsTransfer;
QueueManager::queueCreateInfosQueueHandles(physicalDevice, qPriorities, queueFlags, qCreateInfos, queuePairsGraphics, queuePairsCompute, queuePairsTransfer);
vk::DeviceCreateInfo deviceCreateInfo(
vk::DeviceCreateFlags(),
qCreateInfos.size(),
qCreateInfos.data(),
0,
nullptr,
extensions.size(),
extensions.data(),
nullptr
);
#ifndef NDEBUG
deviceCreateInfo.enabledLayerCount = static_cast<uint32_t>(validationLayers.size());
deviceCreateInfo.ppEnabledLayerNames = validationLayers.data();
#endif
deviceCreateInfo.setPNext(&(featureManager.getFeatures()));
vk::Device device = physicalDevice.createDevice(deviceCreateInfo);
if (featureManager.isExtensionActive(VK_NV_MESH_SHADER_EXTENSION_NAME)) {
InitMeshShaderDrawFunctions(device);
}
QueueManager queueManager = QueueManager::create(
device,
queuePairsGraphics,
queuePairsCompute,
queuePairsTransfer
);
vma::AllocatorCreateFlags vmaFlags;
const vma::AllocatorCreateInfo allocatorCreateInfo (
vmaFlags,
physicalDevice,
device,
0,
nullptr,
nullptr,
nullptr,
nullptr,
instance,
VK_HEADER_VERSION_COMPLETE
);
vma::Allocator allocator = vma::createAllocator(allocatorCreateInfo);
return Context(
instance,
physicalDevice,
device,
std::move(featureManager),
std::move(queueManager),
std::move(allocator)
);
}
}