aboutsummaryrefslogtreecommitdiffstats
path: root/examples
diff options
context:
space:
mode:
Diffstat (limited to 'examples')
-rw-r--r--examples/.clang-tidy11
-rw-r--r--examples/pugl_vulkan_cxx_demo.cpp1839
-rw-r--r--examples/pugl_vulkan_demo.c1134
-rw-r--r--examples/sybok.hpp2358
4 files changed, 5342 insertions, 0 deletions
diff --git a/examples/.clang-tidy b/examples/.clang-tidy
index 0c4a3cd..2a0160d 100644
--- a/examples/.clang-tidy
+++ b/examples/.clang-tidy
@@ -8,10 +8,21 @@ Checks: >
-cert-flp30-c,
-clang-analyzer-alpha.*,
-clang-analyzer-security.FloatLoopCounter,
+ -cppcoreguidelines-pro-bounds-array-to-pointer-decay,
+ -cppcoreguidelines-pro-bounds-constant-array-index,
+ -cppcoreguidelines-pro-bounds-pointer-arithmetic,
+ -cppcoreguidelines-pro-type-reinterpret-cast,
+ -cppcoreguidelines-pro-type-vararg,
+ -fuchsia-default-arguments,
+ -fuchsia-default-arguments-calls,
-google-runtime-references,
-hicpp-multiway-paths-covered,
+ -hicpp-no-array-decay,
-hicpp-signed-bitwise,
+ -hicpp-vararg,
-llvm-header-guard,
+ -misc-misplaced-const,
+ -misc-non-private-member-variables-in-classes,
-modernize-use-trailing-return-type,
-readability-else-after-return,
-readability-implicit-bool-conversion,
diff --git a/examples/pugl_vulkan_cxx_demo.cpp b/examples/pugl_vulkan_cxx_demo.cpp
new file mode 100644
index 0000000..fa40734
--- /dev/null
+++ b/examples/pugl_vulkan_cxx_demo.cpp
@@ -0,0 +1,1839 @@
+/*
+ Copyright 2019-2020 David Robillard <http://drobilla.net>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file pugl_vulkan_cxx_demo.cpp
+ @brief An example of drawing with Vulkan.
+
+ This is an example of using Vulkan for pixel-perfect 2D drawing. It uses
+ the same data and shaders as pugl_shader_demo.c and attempts to draw the
+ same thing, except using Vulkan.
+
+ Since Vulkan is a complicated and very verbose API, this example is
+ unfortunately much larger than the others. You should not use this as a
+ resource to learn Vulkan, but it provides a decent demo of using Vulkan with
+ Pugl that works nicely on all supported platforms.
+*/
+
+#include "demo_utils.h"
+#include "rects.h"
+#include "test/test_utils.h"
+
+#include "sybok.hpp"
+
+#include "pugl/pugl.h"
+#include "pugl/pugl.hpp"
+#include "pugl/pugl.ipp" // IWYU pragma: keep
+#include "pugl/pugl_vulkan.hpp"
+
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan_core.h>
+
+#include <algorithm>
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
+#include <initializer_list>
+#include <iomanip>
+#include <iostream>
+#include <memory>
+#include <string>
+#include <vector>
+
+namespace {
+
+constexpr uintptr_t resizeTimerId = 1u;
+
+struct PhysicalDeviceSelection {
+ sk::PhysicalDevice physicalDevice;
+ uint32_t graphicsFamilyIndex;
+};
+
+/// Basic Vulkan context associated with the window
+struct VulkanContext {
+ VkResult init(pugl::VulkanLoader& loader, const PuglTestOptions& opts);
+
+ sk::VulkanApi vk;
+ sk::Instance instance;
+ sk::DebugReportCallbackEXT debugCallback;
+};
+
+/// Basic setup of graphics device
+struct GraphicsDevice {
+ VkResult init(const pugl::VulkanLoader& loader,
+ const VulkanContext& context,
+ pugl::View& view,
+ const PuglTestOptions& opts);
+
+ sk::SurfaceKHR surface;
+ sk::PhysicalDevice physicalDevice{};
+ uint32_t graphicsIndex{};
+ VkSurfaceFormatKHR surfaceFormat{};
+ VkPresentModeKHR presentMode{};
+ VkPresentModeKHR resizePresentMode{};
+ sk::Device device{};
+ sk::Queue graphicsQueue{};
+ sk::CommandPool commandPool{};
+};
+
+/// Buffer allocated on the GPU
+struct Buffer {
+ VkResult init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ VkDeviceSize size,
+ VkBufferUsageFlags usage,
+ VkMemoryPropertyFlags properties);
+
+ sk::Buffer buffer;
+ sk::DeviceMemory deviceMemory;
+};
+
+/// A set of frames that can be rendered concurrently
+struct Swapchain {
+ VkResult init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ VkSurfaceCapabilitiesKHR capabilities,
+ VkExtent2D extent,
+ VkSwapchainKHR oldSwapchain,
+ bool resizing);
+
+ VkSurfaceCapabilitiesKHR capabilities{};
+ VkExtent2D extent{};
+ sk::SwapchainKHR swapchain{};
+ std::vector<sk::ImageView> imageViews{};
+};
+
+/// A pass that renders to a target
+struct RenderPass {
+ VkResult init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const Swapchain& swapchain);
+
+ sk::RenderPass renderPass;
+ std::vector<sk::Framebuffer> framebuffers;
+ sk::CommandBuffers<std::vector<VkCommandBuffer>> commandBuffers;
+};
+
+/// Uniform buffer for constant data used in shaders
+struct UniformBufferObject {
+ mat4 projection;
+};
+
+/// Rectangle data that does not depend on renderer configuration
+struct RectData {
+ VkResult
+ init(const sk::VulkanApi& vk, const GraphicsDevice& gpu, size_t nRects);
+
+ sk::DescriptorSetLayout descriptorSetLayout{};
+ Buffer uniformBuffer{};
+ sk::MappedMemory uniformData{};
+ Buffer modelBuffer{};
+ Buffer instanceBuffer{};
+ sk::MappedMemory vertexData{};
+ size_t numRects{};
+};
+
+/// Shader modules for drawing rectangles
+struct RectShaders {
+ VkResult init(const sk::VulkanApi& vk, const GraphicsDevice& gpu);
+
+ sk::ShaderModule vert{};
+ sk::ShaderModule frag{};
+};
+
+/// A pipeline to render rectangles with our shaders
+struct RectPipeline {
+ VkResult init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& shaders,
+ const Swapchain& swapchain,
+ const RenderPass& renderPass);
+
+ sk::DescriptorPool descriptorPool{};
+ sk::DescriptorSets<std::vector<VkDescriptorSet>> descriptorSets{};
+ sk::PipelineLayout pipelineLayout{};
+ std::array<sk::Pipeline, 1> pipelines{};
+ uint32_t numImages{};
+};
+
+/// Synchronization primites used to coordinate drawing frames
+struct RenderSync {
+ VkResult
+ init(const sk::VulkanApi& vk, const sk::Device& device, uint32_t numImages);
+
+ std::vector<sk::Semaphore> imageAvailable{};
+ std::vector<sk::Semaphore> renderFinished{};
+ std::vector<sk::Fence> inFlight{};
+ size_t currentFrame{};
+};
+
+/// Renderer that owns the above and everything required to draw
+struct Renderer {
+ VkResult init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& rectShaders,
+ VkExtent2D extent,
+ bool resizing);
+
+ VkResult recreate(const sk::VulkanApi& vk,
+ const sk::SurfaceKHR& surface,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& rectShaders,
+ VkExtent2D extent,
+ bool resizing);
+
+ Swapchain swapchain;
+ RenderPass renderPass;
+ RectPipeline rectPipeline;
+ RenderSync sync;
+};
+
+VkResult
+selectSurfaceFormat(const sk::VulkanApi& vk,
+ const sk::PhysicalDevice& physicalDevice,
+ const sk::SurfaceKHR& surface,
+ VkSurfaceFormatKHR& surfaceFormat)
+{
+ std::vector<VkSurfaceFormatKHR> formats;
+ if (VkResult r = vk.getPhysicalDeviceSurfaceFormatsKHR(physicalDevice,
+ surface,
+ formats)) {
+ return r;
+ }
+
+ for (const auto& format : formats) {
+ if (format.format == VK_FORMAT_B8G8R8A8_UNORM &&
+ format.colorSpace == VK_COLOR_SPACE_SRGB_NONLINEAR_KHR) {
+ surfaceFormat = format;
+ return VK_SUCCESS;
+ }
+ }
+
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+}
+
+VkResult
+selectPresentMode(const sk::VulkanApi& vk,
+ const sk::PhysicalDevice& physicalDevice,
+ const sk::SurfaceKHR& surface,
+ const bool multiBuffer,
+ const bool sync,
+ VkPresentModeKHR& presentMode)
+{
+ // Map command line options to mode priorities
+ static constexpr VkPresentModeKHR priorities[][2][4] = {
+ {
+ // No double buffer, no sync
+ {VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_FIFO_KHR},
+
+ // No double buffer, sync (nonsense, map to FIFO relaxed)
+ {VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_IMMEDIATE_KHR},
+ },
+ {
+ // Double buffer, no sync
+ {
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_IMMEDIATE_KHR,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+ },
+
+ // Double buffer, sync
+ {VK_PRESENT_MODE_FIFO_KHR,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_IMMEDIATE_KHR},
+ },
+ };
+
+ std::vector<VkPresentModeKHR> modes;
+ if (VkResult r = vk.getPhysicalDeviceSurfacePresentModesKHR(physicalDevice,
+ surface,
+ modes)) {
+ return r;
+ }
+
+ const auto& tryModes = priorities[bool(multiBuffer)][bool(sync)];
+ for (const auto m : tryModes) {
+ if (std::find(modes.begin(), modes.end(), m) != modes.end()) {
+ presentMode = m;
+ return VK_SUCCESS;
+ }
+ }
+
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+}
+
+VkResult
+openDevice(const sk::VulkanApi& vk,
+ const sk::PhysicalDevice& physicalDevice,
+ const uint32_t graphicsFamilyIndex,
+ sk::Device& device)
+{
+ const float graphicsQueuePriority = 1.0f;
+ const char* const swapchainName = "VK_KHR_swapchain";
+
+ const VkDeviceQueueCreateInfo queueCreateInfo{
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ nullptr,
+ 0u,
+ graphicsFamilyIndex,
+ SK_COUNTED(1u, &graphicsQueuePriority),
+ };
+
+ const VkDeviceCreateInfo createInfo{VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ nullptr,
+ 0u,
+ SK_COUNTED(1u, &queueCreateInfo),
+ SK_COUNTED(0u, nullptr), // Deprecated
+ SK_COUNTED(1u, &swapchainName),
+ nullptr};
+
+ return vk.createDevice(physicalDevice, createInfo, device);
+}
+
+/// Return whether the physical device supports the extensions we require
+VkResult
+deviceSupportsRequiredExtensions(const sk::VulkanApi& vk,
+ const sk::PhysicalDevice& device,
+ bool& supported)
+{
+ VkResult r = VK_SUCCESS;
+
+ std::vector<VkExtensionProperties> props;
+ if ((r = vk.enumerateDeviceExtensionProperties(device, props))) {
+ return r;
+ }
+
+ supported = std::any_of(props.begin(),
+ props.end(),
+ [&](const VkExtensionProperties& e) {
+ return !strcmp(e.extensionName,
+ "VK_KHR_swapchain");
+ });
+
+ return VK_SUCCESS;
+}
+
+/// Return the index of the graphics queue, if there is one
+VkResult
+findGraphicsQueue(const sk::VulkanApi& vk,
+ const sk::SurfaceKHR& surface,
+ const sk::PhysicalDevice& device,
+ uint32_t& queueIndex)
+{
+ VkResult r = VK_SUCCESS;
+
+ std::vector<VkQueueFamilyProperties> queueProps;
+ if ((r = vk.getPhysicalDeviceQueueFamilyProperties(device, queueProps))) {
+ return r;
+ }
+
+ for (uint32_t q = 0u; q < queueProps.size(); ++q) {
+ if (queueProps[q].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ bool supported = false;
+ if ((r = vk.getPhysicalDeviceSurfaceSupportKHR(
+ device, q, surface, supported))) {
+ return r;
+ }
+
+ if (supported) {
+ queueIndex = q;
+ return VK_SUCCESS;
+ }
+ }
+ }
+
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+}
+
+/// Select a physical graphics device to use (simply the first found)
+VkResult
+selectPhysicalDevice(const sk::VulkanApi& vk,
+ const sk::Instance& instance,
+ const sk::SurfaceKHR& surface,
+ PhysicalDeviceSelection& selection)
+{
+ VkResult r = VK_SUCCESS;
+
+ std::vector<sk::PhysicalDevice> devices;
+ if ((r = vk.enumeratePhysicalDevices(instance, devices))) {
+ return r;
+ }
+
+ for (const auto& device : devices) {
+ auto supported = false;
+ if ((r = deviceSupportsRequiredExtensions(vk, device, supported))) {
+ return r;
+ }
+
+ if (supported) {
+ auto queueIndex = 0u;
+ if ((r = findGraphicsQueue(vk, surface, device, queueIndex))) {
+ return r;
+ }
+
+ selection = PhysicalDeviceSelection{device, queueIndex};
+ return VK_SUCCESS;
+ }
+ }
+
+ return VK_ERROR_INCOMPATIBLE_DISPLAY_KHR;
+}
+
+VkResult
+GraphicsDevice::init(const pugl::VulkanLoader& loader,
+ const VulkanContext& context,
+ pugl::View& view,
+ const PuglTestOptions& opts)
+{
+ const auto& vk = context.vk;
+ VkResult r = VK_SUCCESS;
+
+ // Create a Vulkan surface for the window using the Pugl API
+ VkSurfaceKHR surfaceHandle = {};
+ if ((r = pugl::createSurface(
+ loader, view, context.instance, nullptr, &surfaceHandle))) {
+ return r;
+ }
+
+ // Wrap surface in a safe RAII handle
+ surface = sk::SurfaceKHR{surfaceHandle,
+ {context.instance, vk.vkDestroySurfaceKHR}};
+
+ PhysicalDeviceSelection physicalDeviceSelection = {};
+ // Select a physical device to use
+ if ((r = selectPhysicalDevice(
+ vk, context.instance, surface, physicalDeviceSelection))) {
+ return r;
+ }
+
+ physicalDevice = physicalDeviceSelection.physicalDevice;
+ graphicsIndex = physicalDeviceSelection.graphicsFamilyIndex;
+
+ if ((r = selectSurfaceFormat(vk, physicalDevice, surface, surfaceFormat)) ||
+ (r = selectPresentMode(vk,
+ physicalDevice,
+ surface,
+ opts.doubleBuffer,
+ opts.sync,
+ presentMode)) ||
+ (r = selectPresentMode(
+ vk, physicalDevice, surface, true, false, resizePresentMode)) ||
+ (r = openDevice(vk, physicalDevice, graphicsIndex, device))) {
+ return r;
+ }
+
+ const VkCommandPoolCreateInfo commandPoolInfo{
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO, nullptr, {}, graphicsIndex};
+
+ if ((r = vk.createCommandPool(device, commandPoolInfo, commandPool))) {
+ return r;
+ }
+
+ graphicsQueue = vk.getDeviceQueue(device, graphicsIndex, 0);
+ return VK_SUCCESS;
+}
+
+uint32_t
+findMemoryType(const sk::VulkanApi& vk,
+ const sk::PhysicalDevice& physicalDevice,
+ const uint32_t typeFilter,
+ const VkMemoryPropertyFlags& properties)
+{
+ VkPhysicalDeviceMemoryProperties memProperties =
+ vk.getPhysicalDeviceMemoryProperties(physicalDevice);
+
+ for (uint32_t i = 0; i < memProperties.memoryTypeCount; ++i) {
+ if ((typeFilter & (1 << i)) &&
+ (memProperties.memoryTypes[i].propertyFlags & properties) ==
+ properties) {
+ return i;
+ }
+ }
+
+ return UINT32_MAX;
+}
+
+VkResult
+Buffer::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const VkDeviceSize size,
+ const VkBufferUsageFlags usage,
+ const VkMemoryPropertyFlags properties)
+{
+ const VkBufferCreateInfo bufferInfo{VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO,
+ nullptr,
+ {},
+ size,
+ usage,
+ VK_SHARING_MODE_EXCLUSIVE,
+ SK_COUNTED(0, nullptr)};
+
+ const auto& device = gpu.device;
+
+ VkResult r = VK_SUCCESS;
+ if ((r = vk.createBuffer(device, bufferInfo, buffer))) {
+ return r;
+ }
+
+ const auto requirements = vk.getBufferMemoryRequirements(device, buffer);
+ const auto memoryTypeIndex = findMemoryType(vk,
+ gpu.physicalDevice,
+ requirements.memoryTypeBits,
+ properties);
+
+ if (memoryTypeIndex == UINT32_MAX) {
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ const VkMemoryAllocateInfo allocInfo{VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO,
+ nullptr,
+ requirements.size,
+ memoryTypeIndex};
+
+ if ((r = vk.allocateMemory(device, allocInfo, deviceMemory)) ||
+ (r = vk.bindBufferMemory(device, buffer, deviceMemory, 0))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+Swapchain::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const VkSurfaceCapabilitiesKHR surfaceCapabilities,
+ const VkExtent2D surfaceExtent,
+ VkSwapchainKHR oldSwapchain,
+ bool resizing)
+{
+ capabilities = surfaceCapabilities;
+ extent = surfaceExtent;
+
+ const auto minNumImages = (!capabilities.maxImageCount ||
+ capabilities.maxImageCount >= 3u)
+ ? 3u
+ : capabilities.maxImageCount;
+
+ const VkSwapchainCreateInfoKHR swapchainCreateInfo{
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ nullptr,
+ {},
+ gpu.surface,
+ minNumImages,
+ gpu.surfaceFormat.format,
+ gpu.surfaceFormat.colorSpace,
+ surfaceExtent,
+ 1,
+ (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
+ VK_SHARING_MODE_EXCLUSIVE,
+ SK_COUNTED(0, nullptr),
+ capabilities.currentTransform,
+ VK_COMPOSITE_ALPHA_INHERIT_BIT_KHR,
+ resizing ? gpu.resizePresentMode : gpu.presentMode,
+ VK_TRUE,
+ oldSwapchain};
+
+ VkResult r = VK_SUCCESS;
+ std::vector<VkImage> images;
+ if ((r = vk.createSwapchainKHR(gpu.device,
+ swapchainCreateInfo,
+ swapchain)) ||
+ (r = vk.getSwapchainImagesKHR(gpu.device, swapchain, images))) {
+ return r;
+ }
+
+ imageViews = std::vector<sk::ImageView>(images.size());
+ for (size_t i = 0; i < images.size(); ++i) {
+ const VkImageViewCreateInfo imageViewCreateInfo{
+ VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO,
+ nullptr,
+ {},
+ images[i],
+ VK_IMAGE_VIEW_TYPE_2D,
+ gpu.surfaceFormat.format,
+ {},
+ {VK_IMAGE_ASPECT_COLOR_BIT, 0, 1, 0, 1}};
+
+ if ((r = vk.createImageView(gpu.device,
+ imageViewCreateInfo,
+ imageViews[i]))) {
+ return r;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+RenderPass::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const Swapchain& swapchain)
+{
+ const auto numImages = static_cast<uint32_t>(swapchain.imageViews.size());
+
+ assert(numImages > 0);
+
+ // Create command buffers
+ const VkCommandBufferAllocateInfo commandBufferAllocateInfo{
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ nullptr,
+ gpu.commandPool,
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ numImages};
+
+ VkResult r = VK_SUCCESS;
+ if ((r = vk.allocateCommandBuffers(gpu.device,
+ commandBufferAllocateInfo,
+ commandBuffers))) {
+ return r;
+ }
+
+ static constexpr VkAttachmentReference colorAttachmentRef{
+ 0, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL};
+
+ static constexpr VkSubpassDescription subpass{
+ {},
+ VK_PIPELINE_BIND_POINT_GRAPHICS,
+ SK_COUNTED(0, nullptr),
+ SK_COUNTED(1, &colorAttachmentRef, nullptr, nullptr),
+ SK_COUNTED(0u, nullptr)};
+
+ static constexpr VkSubpassDependency dependency{
+ VK_SUBPASS_EXTERNAL,
+ 0,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT,
+ (VK_ACCESS_COLOR_ATTACHMENT_READ_BIT |
+ VK_ACCESS_COLOR_ATTACHMENT_WRITE_BIT),
+ {},
+ {}};
+
+ const VkAttachmentDescription colorAttachment{
+ {},
+ gpu.surfaceFormat.format,
+ VK_SAMPLE_COUNT_1_BIT,
+ VK_ATTACHMENT_LOAD_OP_CLEAR,
+ VK_ATTACHMENT_STORE_OP_STORE,
+ VK_ATTACHMENT_LOAD_OP_DONT_CARE,
+ VK_ATTACHMENT_STORE_OP_DONT_CARE,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ };
+
+ const VkRenderPassCreateInfo renderPassCreateInfo{
+ VK_STRUCTURE_TYPE_RENDER_PASS_CREATE_INFO,
+ nullptr,
+ {},
+ SK_COUNTED(1, &colorAttachment),
+ SK_COUNTED(1, &subpass),
+ SK_COUNTED(1, &dependency)};
+
+ if ((r = vk.createRenderPass(gpu.device,
+ renderPassCreateInfo,
+ renderPass))) {
+ return r;
+ }
+
+ // Create framebuffers
+ framebuffers = std::vector<sk::Framebuffer>(numImages);
+ for (uint32_t i = 0; i < numImages; ++i) {
+ const VkFramebufferCreateInfo framebufferCreateInfo{
+ VK_STRUCTURE_TYPE_FRAMEBUFFER_CREATE_INFO,
+ nullptr,
+ {},
+ renderPass,
+ SK_COUNTED(1, &swapchain.imageViews[i].get()),
+ swapchain.extent.width,
+ swapchain.extent.height,
+ 1};
+
+ if ((r = vk.createFramebuffer(gpu.device,
+ framebufferCreateInfo,
+ framebuffers[i]))) {
+ return r;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+std::vector<uint32_t>
+readFile(const std::string& filename)
+{
+ std::unique_ptr<FILE, decltype(&fclose)> file{fopen(filename.c_str(), "rb"),
+ &fclose};
+
+ if (!file) {
+ return {};
+ }
+
+ fseek(file.get(), 0, SEEK_END);
+ const auto fileSize = static_cast<size_t>(ftell(file.get()));
+ fseek(file.get(), 0, SEEK_SET);
+
+ const auto numWords = fileSize / sizeof(uint32_t);
+ std::vector<uint32_t> buffer(numWords);
+
+ fread(buffer.data(), sizeof(uint32_t), numWords, file.get());
+
+ return buffer;
+}
+
+VkResult
+createShaderModule(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const std::vector<uint32_t>& code,
+ sk::ShaderModule& shaderModule)
+{
+ const VkShaderModuleCreateInfo createInfo{
+ VK_STRUCTURE_TYPE_SHADER_MODULE_CREATE_INFO,
+ nullptr,
+ {},
+ code.size() * sizeof(uint32_t),
+ code.data()};
+
+ return vk.createShaderModule(gpu.device, createInfo, shaderModule);
+}
+
+VkResult
+RectShaders::init(const sk::VulkanApi& vk, const GraphicsDevice& gpu)
+{
+ auto vertShaderCode = readFile("build/shaders/rect.vert.spv");
+ auto fragShaderCode = readFile("build/shaders/rect.frag.spv");
+
+ if (vertShaderCode.empty() || fragShaderCode.empty()) {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ VkResult r = VK_SUCCESS;
+ if ((r = createShaderModule(vk, gpu, vertShaderCode, vert)) ||
+ (r = createShaderModule(vk, gpu, fragShaderCode, frag))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+RectPipeline::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& shaders,
+ const Swapchain& swapchain,
+ const RenderPass& renderPass)
+{
+ const auto oldNumImages = numImages;
+ VkResult r = VK_SUCCESS;
+
+ numImages = static_cast<uint32_t>(swapchain.imageViews.size());
+ pipelines = {};
+ pipelineLayout = {};
+ descriptorSets = {};
+
+ if (numImages != oldNumImages) {
+ // Create layout descriptor pool
+
+ const VkDescriptorPoolSize poolSize{VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ numImages};
+
+ const VkDescriptorPoolCreateInfo descriptorPoolCreateInfo{
+ VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO,
+ nullptr,
+ VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT,
+ numImages,
+ 1u,
+ &poolSize};
+ if ((r = vk.createDescriptorPool(gpu.device,
+ descriptorPoolCreateInfo,
+ descriptorPool))) {
+ return r;
+ }
+ }
+
+ const std::vector<VkDescriptorSetLayout> layouts(
+ numImages, rectData.descriptorSetLayout.get());
+
+ const VkDescriptorSetAllocateInfo descriptorSetAllocateInfo{
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO,
+ nullptr,
+ descriptorPool,
+ numImages,
+ layouts.data()};
+ if ((r = vk.allocateDescriptorSets(gpu.device,
+ descriptorSetAllocateInfo,
+ descriptorSets))) {
+ return r;
+ }
+
+ const VkDescriptorBufferInfo bufferInfo{rectData.uniformBuffer.buffer,
+ 0,
+ sizeof(UniformBufferObject)};
+
+ const std::array<VkWriteDescriptorSet, 1> descriptorWrites{
+ {{VK_STRUCTURE_TYPE_WRITE_DESCRIPTOR_SET,
+ nullptr,
+ descriptorSets[0],
+ 0,
+ 0,
+ 1,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ nullptr,
+ &bufferInfo,
+ nullptr}}};
+
+ const std::array<VkCopyDescriptorSet, 0> descriptorCopies{};
+
+ vk.updateDescriptorSets(gpu.device, descriptorWrites, descriptorCopies);
+
+ static constexpr std::array<VkVertexInputAttributeDescription, 4>
+ vertexAttributeDescriptions{
+ {// Model
+ {0u, 0u, VK_FORMAT_R32G32_SFLOAT, 0},
+
+ // Rect instance attributes
+ {1u, 1u, VK_FORMAT_R32G32_SFLOAT, offsetof(Rect, pos)},
+ {2u, 1u, VK_FORMAT_R32G32_SFLOAT, offsetof(Rect, size)},
+ {3u,
+ 1u,
+ VK_FORMAT_R32G32B32A32_SFLOAT,
+ offsetof(Rect, fillColor)}}};
+
+ static constexpr std::array<VkVertexInputBindingDescription, 2>
+ vertexBindingDescriptions{
+ VkVertexInputBindingDescription{0,
+ sizeof(vec2),
+ VK_VERTEX_INPUT_RATE_VERTEX},
+ VkVertexInputBindingDescription{1u,
+ sizeof(Rect),
+ VK_VERTEX_INPUT_RATE_INSTANCE}};
+
+ static constexpr VkPipelineInputAssemblyStateCreateInfo inputAssembly{
+ VK_STRUCTURE_TYPE_PIPELINE_INPUT_ASSEMBLY_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ VK_PRIMITIVE_TOPOLOGY_TRIANGLE_STRIP,
+ false};
+
+ static constexpr VkPipelineRasterizationStateCreateInfo rasterizer{
+ VK_STRUCTURE_TYPE_PIPELINE_RASTERIZATION_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ 0,
+ 0,
+ VK_POLYGON_MODE_FILL,
+ VK_CULL_MODE_BACK_BIT,
+ VK_FRONT_FACE_CLOCKWISE,
+ 0,
+ 0,
+ 0,
+ 0,
+ 1.0f};
+
+ static constexpr VkPipelineMultisampleStateCreateInfo multisampling{
+ VK_STRUCTURE_TYPE_PIPELINE_MULTISAMPLE_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ VK_SAMPLE_COUNT_1_BIT,
+ false,
+ 0.0f,
+ nullptr,
+ false,
+ false};
+
+ static constexpr VkPipelineColorBlendAttachmentState colorBlendAttachment{
+ true,
+ VK_BLEND_FACTOR_SRC_ALPHA,
+ VK_BLEND_FACTOR_ONE_MINUS_SRC_ALPHA,
+ VK_BLEND_OP_ADD,
+ VK_BLEND_FACTOR_ONE,
+ VK_BLEND_FACTOR_ZERO,
+ VK_BLEND_OP_ADD,
+ (VK_COLOR_COMPONENT_R_BIT | VK_COLOR_COMPONENT_G_BIT |
+ VK_COLOR_COMPONENT_B_BIT | VK_COLOR_COMPONENT_A_BIT)};
+
+ const VkPipelineShaderStageCreateInfo shaderStages[] = {
+ {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ nullptr,
+ {},
+ VK_SHADER_STAGE_VERTEX_BIT,
+ shaders.vert.get(),
+ "main",
+ nullptr},
+ {VK_STRUCTURE_TYPE_PIPELINE_SHADER_STAGE_CREATE_INFO,
+ nullptr,
+ {},
+ VK_SHADER_STAGE_FRAGMENT_BIT,
+ shaders.frag.get(),
+ "main",
+ nullptr}};
+
+ const VkPipelineVertexInputStateCreateInfo vertexInputInfo{
+ VK_STRUCTURE_TYPE_PIPELINE_VERTEX_INPUT_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ SK_COUNTED(static_cast<uint32_t>(vertexBindingDescriptions.size()),
+ vertexBindingDescriptions.data()),
+ SK_COUNTED(static_cast<uint32_t>(vertexAttributeDescriptions.size()),
+ vertexAttributeDescriptions.data())};
+
+ const VkViewport viewport{0.0f,
+ 0.0f,
+ float(swapchain.extent.width),
+ float(swapchain.extent.height),
+ 0.0f,
+ 1.0f};
+
+ const VkRect2D scissor{{0, 0}, swapchain.extent};
+
+ const VkPipelineViewportStateCreateInfo viewportState{
+ VK_STRUCTURE_TYPE_PIPELINE_VIEWPORT_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ SK_COUNTED(1, &viewport),
+ SK_COUNTED(1, &scissor)};
+
+ const VkPipelineColorBlendStateCreateInfo colorBlending{
+ VK_STRUCTURE_TYPE_PIPELINE_COLOR_BLEND_STATE_CREATE_INFO,
+ nullptr,
+ {},
+ false,
+ VK_LOGIC_OP_COPY,
+ SK_COUNTED(1, &colorBlendAttachment),
+ {1.0f, 0.0f, 0.0f, 0.0f}};
+
+ const VkPipelineLayoutCreateInfo layoutInfo{
+ VK_STRUCTURE_TYPE_PIPELINE_LAYOUT_CREATE_INFO,
+ nullptr,
+ {},
+ SK_COUNTED(1, &rectData.descriptorSetLayout.get()),
+ SK_COUNTED(0, nullptr)};
+
+ if ((r = vk.createPipelineLayout(gpu.device, layoutInfo, pipelineLayout))) {
+ return r;
+ }
+
+ const std::array<VkGraphicsPipelineCreateInfo, 1> pipelineInfos{
+ {{VK_STRUCTURE_TYPE_GRAPHICS_PIPELINE_CREATE_INFO,
+ nullptr,
+ {},
+ SK_COUNTED(2, shaderStages),
+ &vertexInputInfo,
+ &inputAssembly,
+ nullptr,
+ &viewportState,
+ &rasterizer,
+ &multisampling,
+ nullptr,
+ &colorBlending,
+ nullptr,
+ pipelineLayout,
+ renderPass.renderPass,
+ 0u,
+ {},
+ 0}}};
+
+ if ((r = vk.createGraphicsPipelines(
+ gpu.device, {}, pipelineInfos, pipelines))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+RectData::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const size_t nRects)
+{
+ numRects = nRects;
+
+ static constexpr VkDescriptorSetLayoutBinding uboLayoutBinding{
+ 0,
+ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER,
+ 1,
+ VK_SHADER_STAGE_VERTEX_BIT,
+ nullptr};
+
+ const VkDescriptorSetLayoutCreateInfo descriptorSetLayoutInfo{
+ VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO,
+ nullptr,
+ {},
+ 1,
+ &uboLayoutBinding};
+
+ VkResult r = VK_SUCCESS;
+ if ((r = vk.createDescriptorSetLayout(gpu.device,
+ descriptorSetLayoutInfo,
+ descriptorSetLayout)) ||
+ (r = uniformBuffer.init(vk,
+ gpu,
+ sizeof(UniformBufferObject),
+ VK_BUFFER_USAGE_UNIFORM_BUFFER_BIT,
+ VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT)) ||
+ (r = vk.mapMemory(gpu.device,
+ uniformBuffer.deviceMemory,
+ 0,
+ sizeof(UniformBufferObject),
+ {},
+ uniformData))) {
+ return r;
+ }
+
+ const VkBufferUsageFlags usageFlags = (VK_BUFFER_USAGE_VERTEX_BUFFER_BIT |
+ VK_BUFFER_USAGE_TRANSFER_SRC_BIT |
+ VK_BUFFER_USAGE_TRANSFER_DST_BIT);
+
+ const VkMemoryPropertyFlags propertyFlags =
+ (VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT |
+ VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
+
+ if ((r = modelBuffer.init(
+ vk, gpu, sizeof(rectVertices), usageFlags, propertyFlags))) {
+ return r;
+ }
+
+ {
+ // Copy model vertices (directly, we do this only once)
+ sk::MappedMemory modelData;
+ if ((r = vk.mapMemory(gpu.device,
+ modelBuffer.deviceMemory,
+ 0,
+ static_cast<VkDeviceSize>(sizeof(rectVertices)),
+ {},
+ modelData))) {
+ return r;
+ }
+
+ memcpy(modelData.get(), rectVertices, sizeof(rectVertices));
+ }
+
+ if ((r = instanceBuffer.init(
+ vk, gpu, sizeof(Rect) * numRects, usageFlags, propertyFlags))) {
+ return r;
+ }
+
+ // Map attribute vertices (we will update them every frame)
+ const auto rectsSize = static_cast<VkDeviceSize>(sizeof(Rect) * numRects);
+ if ((r = vk.mapMemory(gpu.device,
+ instanceBuffer.deviceMemory,
+ 0,
+ rectsSize,
+ {},
+ vertexData))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+RenderSync::init(const sk::VulkanApi& vk,
+ const sk::Device& device,
+ const uint32_t numImages)
+{
+ const auto maxInFlight = std::max(1u, numImages - 1u);
+ VkResult r = VK_SUCCESS;
+
+ imageAvailable = std::vector<sk::Semaphore>(numImages);
+ renderFinished = std::vector<sk::Semaphore>(numImages);
+ for (uint32_t i = 0; i < numImages; ++i) {
+ static constexpr VkSemaphoreCreateInfo semaphoreInfo{
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO, nullptr, {}};
+
+ if ((r = vk.createSemaphore(device,
+ semaphoreInfo,
+ imageAvailable[i])) ||
+ (r = vk.createSemaphore(device,
+ semaphoreInfo,
+ renderFinished[i]))) {
+ return r;
+ }
+ }
+
+ inFlight = std::vector<sk::Fence>(maxInFlight);
+ for (uint32_t i = 0; i < maxInFlight; ++i) {
+ static constexpr VkFenceCreateInfo fenceInfo{
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ nullptr,
+ VK_FENCE_CREATE_SIGNALED_BIT};
+
+ if ((r = vk.createFence(device, fenceInfo, inFlight[i]))) {
+ return r;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+VkResult
+Renderer::init(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& rectShaders,
+ const VkExtent2D extent,
+ bool resizing)
+{
+ VkResult r = VK_SUCCESS;
+ VkSurfaceCapabilitiesKHR capabilities = {};
+
+ if ((r = vk.getPhysicalDeviceSurfaceCapabilitiesKHR(gpu.physicalDevice,
+ gpu.surface,
+ capabilities)) ||
+ (r = swapchain.init(vk, gpu, capabilities, extent, {}, resizing)) ||
+ (r = renderPass.init(vk, gpu, swapchain)) ||
+ (r = rectPipeline.init(
+ vk, gpu, rectData, rectShaders, swapchain, renderPass))) {
+ return r;
+ }
+
+ const auto numFrames = static_cast<uint32_t>(swapchain.imageViews.size());
+ return sync.init(vk, gpu.device, numFrames);
+}
+
+VkResult
+Renderer::recreate(const sk::VulkanApi& vk,
+ const sk::SurfaceKHR& surface,
+ const GraphicsDevice& gpu,
+ const RectData& rectData,
+ const RectShaders& rectShaders,
+ const VkExtent2D extent,
+ bool resizing)
+{
+ VkResult r = VK_SUCCESS;
+ const auto oldNumImages = swapchain.imageViews.size();
+
+ VkSurfaceCapabilitiesKHR capabilities = {};
+ if ((r = vk.getPhysicalDeviceSurfaceCapabilitiesKHR(gpu.physicalDevice,
+ surface,
+ capabilities)) ||
+ (r = swapchain.init(
+ vk, gpu, capabilities, extent, swapchain.swapchain, resizing)) ||
+ (r = renderPass.init(vk, gpu, swapchain)) ||
+ (r = rectPipeline.init(
+ vk, gpu, rectData, rectShaders, swapchain, renderPass))) {
+ return r;
+ }
+
+ const auto numFrames = static_cast<uint32_t>(swapchain.imageViews.size());
+ if (swapchain.imageViews.size() != oldNumImages) {
+ return sync.init(vk, gpu.device, numFrames);
+ }
+
+ return VK_SUCCESS;
+}
+
+VKAPI_ATTR
+VkBool32 VKAPI_CALL
+debugCallback(VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT,
+ uint64_t,
+ size_t,
+ int32_t,
+ const char* layerPrefix,
+ const char* msg,
+ void*)
+{
+ std::cerr << sk::string(static_cast<VkDebugReportFlagBitsEXT>(flags))
+ << ": " << layerPrefix << ": " << msg << std::endl;
+
+ return VK_FALSE;
+}
+
+bool
+hasExtension(const char* name,
+ const std::vector<VkExtensionProperties>& properties)
+{
+ for (const auto& p : properties) {
+ if (!strcmp(p.extensionName, name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+bool
+hasLayer(const char* name, const std::vector<VkLayerProperties>& properties)
+{
+ for (const auto& p : properties) {
+ if (!strcmp(p.layerName, name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+template<class Value>
+void
+logInfo(const char* heading, const Value& value)
+{
+ std::cout << std::setw(26) << std::left << (std::string(heading) + ":")
+ << value << std::endl;
+}
+
+VkResult
+createInstance(sk::VulkanInitApi& initApi,
+ const PuglTestOptions& opts,
+ sk::Instance& instance)
+{
+ VkResult r = VK_SUCCESS;
+
+ std::vector<VkLayerProperties> layerProps;
+ std::vector<VkExtensionProperties> extProps;
+ if ((r = initApi.enumerateInstanceLayerProperties(layerProps)) ||
+ (r = initApi.enumerateInstanceExtensionProperties(extProps))) {
+ return r;
+ }
+
+ const auto puglExtensions = pugl::getInstanceExtensions();
+ auto extensions = std::vector<const char*>(puglExtensions.begin(),
+ puglExtensions.end());
+
+ // Add extra extensions we want to use if they are supported
+ if (hasExtension("VK_EXT_debug_report", extProps)) {
+ extensions.push_back("VK_EXT_debug_report");
+ }
+
+ // Add validation layers if error checking is enabled
+ std::vector<const char*> layers;
+ if (opts.errorChecking) {
+ for (const char* l : {"VK_LAYER_KHRONOS_validation",
+ "VK_LAYER_LUNARG_standard_validation"}) {
+ if (hasLayer(l, layerProps)) {
+ layers.push_back(l);
+ }
+ }
+ }
+
+ for (const auto& e : extensions) {
+ logInfo("Using instance extension", e);
+ }
+
+ for (const auto& l : layers) {
+ logInfo("Using instance layer", l);
+ }
+
+ static constexpr VkApplicationInfo appInfo{
+ VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ nullptr,
+ "Pugl Vulkan Demo",
+ 0,
+ nullptr,
+ 0,
+ VK_MAKE_VERSION(1, 0, 0),
+ };
+
+ const VkInstanceCreateInfo createInfo{
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ nullptr,
+ VkInstanceCreateFlags{},
+ &appInfo,
+ SK_COUNTED(uint32_t(layers.size()), layers.data()),
+ SK_COUNTED(uint32_t(extensions.size()), extensions.data())};
+
+ return initApi.createInstance(createInfo, instance);
+}
+
+VkResult
+getDebugReportCallback(sk::VulkanApi& api,
+ sk::Instance& instance,
+ const bool verbose,
+ sk::DebugReportCallbackEXT& callback)
+{
+ if (api.vkCreateDebugReportCallbackEXT) {
+ VkDebugReportFlagsEXT flags =
+ (VK_DEBUG_REPORT_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT |
+ VK_DEBUG_REPORT_ERROR_BIT_EXT);
+
+ if (verbose) {
+ flags |= VK_DEBUG_REPORT_INFORMATION_BIT_EXT;
+ flags |= VK_DEBUG_REPORT_DEBUG_BIT_EXT;
+ }
+
+ const VkDebugReportCallbackCreateInfoEXT createInfo{
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CREATE_INFO_EXT,
+ nullptr,
+ flags,
+ debugCallback,
+ nullptr};
+
+ return api.createDebugReportCallbackEXT(instance, createInfo, callback);
+ }
+
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+}
+
+void
+recordCommandBuffer(sk::CommandScope& cmd,
+ const Swapchain& swapchain,
+ const RenderPass& renderPass,
+ const RectPipeline& rectPipeline,
+ const RectData& rectData,
+ const size_t imageIndex)
+{
+ const VkClearColorValue clearColorValue{{0.0f, 0.0f, 0.0f, 1.0f}};
+ const VkClearValue clearValue{clearColorValue};
+
+ const VkRenderPassBeginInfo renderPassBegin{
+ VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
+ nullptr,
+ renderPass.renderPass,
+ renderPass.framebuffers[imageIndex],
+ VkRect2D{{0, 0}, swapchain.extent},
+ SK_COUNTED(1, &clearValue)};
+
+ auto pass = cmd.beginRenderPass(renderPassBegin,
+ VK_SUBPASS_CONTENTS_INLINE);
+
+ pass.bindPipeline(VK_PIPELINE_BIND_POINT_GRAPHICS,
+ rectPipeline.pipelines[0]);
+
+ const std::array<VkDeviceSize, 1> offsets{0};
+ pass.bindVertexBuffers(
+ 0u, SK_COUNTED(1u, &rectData.modelBuffer.buffer.get(), offsets.data()));
+
+ pass.bindVertexBuffers(1u,
+ SK_COUNTED(1u,
+ &rectData.instanceBuffer.buffer.get(),
+ offsets.data()));
+
+ pass.bindDescriptorSets(VK_PIPELINE_BIND_POINT_GRAPHICS,
+ rectPipeline.pipelineLayout,
+ 0u,
+ SK_COUNTED(1u, rectPipeline.descriptorSets.get()),
+ 0u,
+ nullptr);
+
+ pass.draw(4u, static_cast<uint32_t>(rectData.numRects), 0u, 0u);
+}
+
+VkResult
+recordCommandBuffers(const sk::VulkanApi& vk,
+ const Swapchain& swapchain,
+ const RenderPass& renderPass,
+ const RectPipeline& rectPipeline,
+ const RectData& rectData)
+{
+ VkResult r = VK_SUCCESS;
+
+ for (size_t i = 0; i < swapchain.imageViews.size(); ++i) {
+ const VkCommandBufferBeginInfo beginInfo{
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ nullptr,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
+ nullptr};
+
+ auto* const commandBuffer = renderPass.commandBuffers[i];
+ auto cmd = vk.beginCommandBuffer(commandBuffer, beginInfo);
+ if (!cmd) {
+ return cmd.error();
+ }
+
+ recordCommandBuffer(
+ cmd, swapchain, renderPass, rectPipeline, rectData, i);
+
+ if ((r = cmd.end())) {
+ return r;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+class PuglVulkanDemo;
+
+class View : public pugl::View
+{
+public:
+ View(pugl::World& world, PuglVulkanDemo& app)
+ : pugl::View{world}
+ , _app{app}
+ {}
+
+ pugl::Status onConfigure(const pugl::ConfigureEvent& event) override;
+ pugl::Status onUpdate(const pugl::UpdateEvent& event) override;
+ pugl::Status onExpose(const pugl::ExposeEvent& event) override;
+ pugl::Status onLoopEnter(const pugl::LoopEnterEvent& event) override;
+ pugl::Status onTimer(const pugl::TimerEvent& event) override;
+ pugl::Status onLoopLeave(const pugl::LoopLeaveEvent& event) override;
+ pugl::Status onKeyPress(const pugl::KeyPressEvent& event) override;
+ pugl::Status onClose(const pugl::CloseEvent& event) override;
+
+private:
+ PuglVulkanDemo& _app;
+};
+
+class PuglVulkanDemo
+{
+public:
+ PuglVulkanDemo(const PuglTestOptions& o, size_t numRects);
+
+ PuglTestOptions opts;
+ pugl::World world;
+ pugl::VulkanLoader loader;
+ View view;
+ VulkanContext vulkan;
+ GraphicsDevice gpu;
+ Renderer renderer;
+ RectData rectData;
+ RectShaders rectShaders;
+ uint32_t framesDrawn{0};
+ VkExtent2D extent{512u, 512u};
+ std::vector<Rect> rects;
+ bool resizing{false};
+ bool quit{false};
+};
+
+std::vector<Rect>
+makeRects(const size_t numRects, const uint32_t windowWidth)
+{
+ std::vector<Rect> rects(numRects);
+ for (size_t i = 0; i < numRects; ++i) {
+ rects[i] = makeRect(i, static_cast<float>(windowWidth));
+ }
+
+ return rects;
+}
+
+PuglVulkanDemo::PuglVulkanDemo(const PuglTestOptions& o, const size_t numRects)
+ : opts{o}
+ , world{pugl::WorldType::program,
+ static_cast<pugl::WorldFlags>(pugl::WorldFlag::threads)} // FIXME?
+ , loader{world}
+ , view{world, *this}
+ , rects{makeRects(numRects, extent.width)}
+{}
+
+VkResult
+recreateRenderer(PuglVulkanDemo& app,
+ const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const VkExtent2D extent,
+ const RectData& rectData,
+ const RectShaders& rectShaders)
+{
+ VkResult r = VK_SUCCESS;
+ VkSurfaceCapabilitiesKHR capabilities = {};
+ if ((r = vk.getPhysicalDeviceSurfaceCapabilitiesKHR(gpu.physicalDevice,
+ gpu.surface,
+ capabilities))) {
+ return r;
+ }
+
+ // There is a known race issue here, so we clamp and hope for the best
+ const VkExtent2D clampedExtent{
+ std::min(capabilities.maxImageExtent.width,
+ std::max(capabilities.minImageExtent.width, extent.width)),
+ std::min(capabilities.maxImageExtent.height,
+ std::max(capabilities.minImageExtent.height, extent.height))};
+
+ if ((r = vk.deviceWaitIdle(gpu.device)) ||
+ (r = app.renderer.recreate(vk,
+ gpu.surface,
+ gpu,
+ rectData,
+ rectShaders,
+ clampedExtent,
+ app.resizing))) {
+ return r;
+ }
+
+ // Reset current (initially signaled) fence because we already waited
+ vk.resetFence(gpu.device,
+ app.renderer.sync.inFlight[app.renderer.sync.currentFrame]);
+
+ // Record new command buffers
+ return recordCommandBuffers(vk,
+ app.renderer.swapchain,
+ app.renderer.renderPass,
+ app.renderer.rectPipeline,
+ rectData);
+}
+
+pugl::Status
+View::onConfigure(const pugl::ConfigureEvent& event)
+{
+ // We just record the size here and lazily resize the surface when exposed
+ _app.extent = {static_cast<uint32_t>(event.width),
+ static_cast<uint32_t>(event.height)};
+
+ return pugl::Status::success;
+}
+
+pugl::Status
+View::onUpdate(const pugl::UpdateEvent&)
+{
+ return postRedisplay();
+}
+
+VkResult
+beginFrame(PuglVulkanDemo& app, const sk::Device& device, uint32_t& imageIndex)
+{
+ const auto& vk = app.vulkan.vk;
+
+ VkResult r = VK_SUCCESS;
+
+ // Wait until we can start rendering the next frame
+ if ((r = vk.waitForFence(
+ device,
+ app.renderer.sync.inFlight[app.renderer.sync.currentFrame])) ||
+ (r = vk.resetFence(
+ device,
+ app.renderer.sync.inFlight[app.renderer.sync.currentFrame]))) {
+ return r;
+ }
+
+ // Rebuild the renderer first if the window size has changed
+ if (app.extent.width != app.renderer.swapchain.extent.width ||
+ app.extent.height != app.renderer.swapchain.extent.height) {
+ if ((r = recreateRenderer(app,
+ vk,
+ app.gpu,
+ app.extent,
+ app.rectData,
+ app.rectShaders))) {
+ return r;
+ }
+ }
+
+ // Acquire the next image to render, rebuilding if necessary
+ while (
+ (r = vk.acquireNextImageKHR(
+ device,
+ app.renderer.swapchain.swapchain,
+ UINT64_MAX,
+ app.renderer.sync.imageAvailable[app.renderer.sync.currentFrame],
+ {},
+ &imageIndex))) {
+ switch (r) {
+ case VK_SUBOPTIMAL_KHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ if ((r = recreateRenderer(app,
+ vk,
+ app.gpu,
+ app.renderer.swapchain.extent,
+ app.rectData,
+ app.rectShaders))) {
+ return r;
+ }
+ continue;
+ default:
+ return r;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+void
+update(PuglVulkanDemo& app, const double time)
+{
+ // Animate rectangles
+ for (size_t i = 0; i < app.rects.size(); ++i) {
+ moveRect(&app.rects[i],
+ i,
+ app.rects.size(),
+ static_cast<float>(app.extent.width),
+ static_cast<float>(app.extent.height),
+ time);
+ }
+
+ // Update vertex buffer
+ memcpy(app.rectData.vertexData.get(),
+ app.rects.data(),
+ sizeof(Rect) * app.rects.size());
+
+ // Update uniform buffer
+ UniformBufferObject ubo = {{}};
+ mat4Ortho(ubo.projection,
+ 0.0f,
+ float(app.renderer.swapchain.extent.width),
+ 0.0f,
+ float(app.renderer.swapchain.extent.height),
+ -1.0f,
+ 1.0f);
+
+ memcpy(app.rectData.uniformData.get(), &ubo, sizeof(ubo));
+}
+
+VkResult
+endFrame(const sk::VulkanApi& vk,
+ const GraphicsDevice& gpu,
+ const Renderer& renderer,
+ const uint32_t imageIndex)
+{
+ const auto currentFrame = renderer.sync.currentFrame;
+ VkResult r = VK_SUCCESS;
+
+ static constexpr VkPipelineStageFlags waitStage =
+ VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ const VkSubmitInfo submitInfo{
+ VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ nullptr,
+ SK_COUNTED(1, &renderer.sync.imageAvailable[currentFrame].get()),
+ &waitStage,
+ SK_COUNTED(1, &renderer.renderPass.commandBuffers[imageIndex]),
+ SK_COUNTED(1, &renderer.sync.renderFinished[imageIndex].get())};
+
+ if ((r = vk.queueSubmit(gpu.graphicsQueue,
+ submitInfo,
+ renderer.sync.inFlight[currentFrame]))) {
+ return r;
+ }
+
+ const VkPresentInfoKHR presentInfo{
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ nullptr,
+ SK_COUNTED(1, &renderer.sync.renderFinished[imageIndex].get()),
+ SK_COUNTED(1, &renderer.swapchain.swapchain.get(), &imageIndex),
+ nullptr};
+
+ switch ((r = vk.queuePresentKHR(gpu.graphicsQueue, presentInfo))) {
+ case VK_SUCCESS: // Sucessfully presented
+ case VK_SUBOPTIMAL_KHR: // Probably a resize race, ignore
+ case VK_ERROR_OUT_OF_DATE_KHR: // Probably a resize race, ignore
+ break;
+ default:
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+pugl::Status
+View::onExpose(const pugl::ExposeEvent&)
+{
+ const auto& vk = _app.vulkan.vk;
+ const auto& gpu = _app.gpu;
+
+ // Acquire the next image, waiting and/or rebuilding if necessary
+ auto nextImageIndex = 0u;
+ if (beginFrame(_app, gpu.device, nextImageIndex)) {
+ return pugl::Status::unknownError;
+ }
+
+ // Ready to go, update the data to the current time
+ update(_app, world().time());
+
+ // Submit the frame to the queue and present it
+ endFrame(vk, gpu, _app.renderer, nextImageIndex);
+
+ ++_app.framesDrawn;
+ ++_app.renderer.sync.currentFrame;
+ _app.renderer.sync.currentFrame %= _app.renderer.sync.inFlight.size();
+
+ return pugl::Status::success;
+}
+
+pugl::Status
+View::onLoopEnter(const pugl::LoopEnterEvent&)
+{
+ _app.resizing = true;
+ startTimer(resizeTimerId,
+ 1.0 / static_cast<double>(getHint(pugl::ViewHint::refreshRate)));
+
+ return pugl::Status::success;
+}
+
+pugl::Status
+View::onTimer(const pugl::TimerEvent&)
+{
+ return postRedisplay();
+}
+
+pugl::Status
+View::onLoopLeave(const pugl::LoopLeaveEvent&)
+{
+ stopTimer(resizeTimerId);
+
+ // Trigger a swapchain recreation with the normal present mode
+ _app.renderer.swapchain.extent = {};
+ _app.resizing = false;
+
+ return pugl::Status::success;
+}
+
+pugl::Status
+View::onKeyPress(const pugl::KeyPressEvent& event)
+{
+ if (event.key == PUGL_KEY_ESCAPE || event.key == 'q') {
+ _app.quit = true;
+ }
+
+ return pugl::Status::success;
+}
+
+pugl::Status
+View::onClose(const pugl::CloseEvent&)
+{
+ _app.quit = true;
+
+ return pugl::Status::success;
+}
+
+VkResult
+VulkanContext::init(pugl::VulkanLoader& loader, const PuglTestOptions& opts)
+{
+ VkResult r = VK_SUCCESS;
+
+ sk::VulkanInitApi initApi{};
+
+ // Load Vulkan API and set up the fundamentals
+ if ((r = initApi.init(loader.getInstanceProcAddrFunc())) ||
+ (r = createInstance(initApi, opts, instance)) ||
+ (r = vk.init(initApi, instance)) ||
+ (r = getDebugReportCallback(
+ vk, instance, opts.verbose, debugCallback))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+int
+run(const PuglTestOptions opts, const size_t numRects)
+{
+ PuglVulkanDemo app{opts, numRects};
+
+ VkResult r = VK_SUCCESS;
+ const auto width = static_cast<int>(app.extent.width);
+ const auto height = static_cast<int>(app.extent.height);
+
+ // Realize window so we can set up Vulkan
+ app.world.setClassName("PuglVulkanDemo");
+ app.view.setWindowTitle("Pugl Vulkan Demo");
+ app.view.setAspectRatio(1, 1, 16, 9);
+ app.view.setDefaultSize(width, height);
+ app.view.setMinSize(width / 4, height / 4);
+ app.view.setBackend(pugl::vulkanBackend());
+ app.view.setHint(pugl::ViewHint::resizable, opts.resizable);
+ const pugl::Status st = app.view.realize();
+ if (st != pugl::Status::success) {
+ return logError("Failed to create window (%s)\n", pugl::strerror(st));
+ }
+
+ if (!app.loader) {
+ return logError("Failed to load Vulkan library\n");
+ }
+
+ // Load Vulkan for the view
+ if ((r = app.vulkan.init(app.loader, opts))) {
+ return logError("Failed to set up Vulkan API (%s)\n", sk::string(r));
+ }
+
+ const auto& vk = app.vulkan.vk;
+
+ // Set up the graphics device
+ if ((r = app.gpu.init(app.loader, app.vulkan, app.view, opts))) {
+ return logError("Failed to set up device (%s)\n", sk::string(r));
+ }
+
+ logInfo("Present mode", sk::string(app.gpu.presentMode));
+ logInfo("Resize present mode", sk::string(app.gpu.resizePresentMode));
+
+ // Set up the rectangle data we will render every frame
+ if ((r = app.rectData.init(vk, app.gpu, app.rects.size()))) {
+ return logError("Failed to allocate render data (%s)\n", sk::string(r));
+ }
+
+ // Load shader modules
+ if ((r = app.rectShaders.init(vk, app.gpu))) {
+ return logError("Failed to load shaders (%s)\n", sk::string(r));
+ }
+
+ if ((r = app.renderer.init(app.vulkan.vk,
+ app.gpu,
+ app.rectData,
+ app.rectShaders,
+ app.extent,
+ false))) {
+ return logError("Failed to create renderer (%s)\n", sk::string(r));
+ }
+
+ logInfo("Swapchain frames",
+ std::to_string(app.renderer.swapchain.imageViews.size()));
+ logInfo("Frames in flight",
+ std::to_string(app.renderer.sync.inFlight.size()));
+
+ recordCommandBuffers(app.vulkan.vk,
+ app.renderer.swapchain,
+ app.renderer.renderPass,
+ app.renderer.rectPipeline,
+ app.rectData);
+
+ const int refreshRate = app.view.getHint(pugl::ViewHint::refreshRate);
+ const double frameDuration = 1.0 / static_cast<double>(refreshRate);
+ const double timeout = app.opts.sync ? frameDuration : 0.0;
+
+ PuglFpsPrinter fpsPrinter = {app.world.time()};
+ app.view.showWindow();
+ while (!app.quit) {
+ app.world.update(timeout);
+ puglPrintFps(app.world.cobj(), &fpsPrinter, &app.framesDrawn);
+ }
+
+ if ((r = app.vulkan.vk.deviceWaitIdle(app.gpu.device))) {
+ return logError("Failed to wait for device idle (%s)\n", sk::string(r));
+ }
+
+ return 0;
+}
+
+} // namespace
+
+int
+main(int argc, char** argv)
+{
+ // Parse command line options
+ const PuglTestOptions opts = puglParseTestOptions(&argc, &argv);
+ if (opts.help) {
+ puglPrintTestUsage(argv[0], "");
+ return 0;
+ }
+
+ // Parse number of rectangles argument, if given
+ int64_t numRects = 1000;
+ if (argc >= 1) {
+ char* endptr = nullptr;
+ numRects = strtol(argv[0], &endptr, 10);
+ if (endptr != argv[0] + strlen(argv[0]) || numRects < 1) {
+ logError("Invalid number of rectangles: %s\n", argv[0]);
+ return 1;
+ }
+ }
+
+ // Run application
+ return run(opts, static_cast<size_t>(numRects));
+}
diff --git a/examples/pugl_vulkan_demo.c b/examples/pugl_vulkan_demo.c
new file mode 100644
index 0000000..ef4550a
--- /dev/null
+++ b/examples/pugl_vulkan_demo.c
@@ -0,0 +1,1134 @@
+/*
+ Copyright 2019-2020 David Robillard <http://drobilla.net>
+ Copyright 2019 Jordan Halase <jordan@halase.me>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file pugl_vulkan_demo.c
+ @brief A simple example of drawing with Vulkan.
+*/
+
+#include "demo_utils.h"
+#include "test/test_utils.h"
+
+#include "pugl/pugl.h"
+#include "pugl/pugl_vulkan.h"
+
+#include <vulkan/vk_platform.h>
+#include <vulkan/vulkan_core.h>
+
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#define CLAMP(x, l, h) ((x) <= (l) ? (l) : (x) >= (h) ? (h) : (x))
+
+// Vulkan allocation callbacks which can be used for debugging
+#define ALLOC_VK NULL
+
+// Helper macro for allocating arrays by type, with C++ compatible cast
+#define AALLOC(size, Type) ((Type*)calloc(size, sizeof(Type)))
+
+// Helper macro for counted array arguments to make clang-format behave
+#define COUNTED(count, ...) count, __VA_ARGS__
+
+/// Dynamically loaded Vulkan API functions
+typedef struct {
+ PFN_vkCreateDebugReportCallbackEXT vkCreateDebugReportCallbackEXT;
+ PFN_vkDestroyDebugReportCallbackEXT vkDestroyDebugReportCallbackEXT;
+} InstanceAPI;
+
+/// Vulkan swapchain and everything that depends on it
+typedef struct {
+ VkSwapchainKHR rawSwapchain;
+ uint32_t nImages;
+ VkExtent2D extent;
+ VkImage* images;
+ VkImageView* imageViews;
+ VkFence* fences;
+ VkCommandBuffer* commandBuffers;
+} Swapchain;
+
+/// Synchronization semaphores
+typedef struct {
+ VkSemaphore presentComplete;
+ VkSemaphore renderFinished;
+} Sync;
+
+/// Vulkan state, purely Vulkan functions can depend on only this
+typedef struct {
+ InstanceAPI api;
+ VkInstance instance;
+ VkDebugReportCallbackEXT debugCallback;
+ VkSurfaceKHR surface;
+ VkSurfaceFormatKHR surfaceFormat;
+ VkPresentModeKHR presentMode;
+ VkPhysicalDeviceProperties deviceProperties;
+ VkPhysicalDevice physicalDevice;
+ uint32_t graphicsIndex;
+ VkDevice device;
+ VkQueue graphicsQueue;
+ VkCommandPool commandPool;
+ Swapchain* swapchain;
+ Sync sync;
+} VulkanState;
+
+/// Complete application
+typedef struct {
+ PuglTestOptions opts;
+ PuglWorld* world;
+ PuglView* view;
+ VulkanState vk;
+ uint32_t framesDrawn;
+ uint32_t width;
+ uint32_t height;
+ bool quit;
+} VulkanApp;
+
+static VKAPI_ATTR VkBool32 VKAPI_CALL
+debugCallback(VkDebugReportFlagsEXT flags,
+ VkDebugReportObjectTypeEXT objType,
+ uint64_t obj,
+ size_t location,
+ int32_t code,
+ const char* layerPrefix,
+ const char* msg,
+ void* userData)
+{
+ (void)userData;
+ (void)objType;
+ (void)obj;
+ (void)location;
+ (void)code;
+
+ if (flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT) {
+ fprintf(stderr, "note: ");
+ } else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT) {
+ fprintf(stderr, "warning: ");
+ } else if (flags & VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT) {
+ fprintf(stderr, "performance warning: ");
+ } else if (flags & VK_DEBUG_REPORT_ERROR_BIT_EXT) {
+ fprintf(stderr, "error: ");
+ } else if (flags & VK_DEBUG_REPORT_DEBUG_BIT_EXT) {
+ fprintf(stderr, "debug: ");
+ }
+
+ fprintf(stderr, "%s: ", layerPrefix);
+ fprintf(stderr, "%s\n", msg);
+ return VK_FALSE;
+}
+
+static bool
+hasExtension(const char* const name,
+ const VkExtensionProperties* const properties,
+ const uint32_t count)
+{
+ for (uint32_t i = 0; i < count; ++i) {
+ if (!strcmp(properties[i].extensionName, name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool
+hasLayer(const char* const name,
+ const VkLayerProperties* const properties,
+ const uint32_t count)
+{
+ for (uint32_t i = 0; i < count; ++i) {
+ if (!strcmp(properties[i].layerName, name)) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void
+pushString(const char*** const array,
+ uint32_t* const count,
+ const char* const string)
+{
+ *array = (const char**)realloc(*array, (*count + 1) * sizeof(const char*));
+ (*array)[*count] = string;
+ ++*count;
+}
+
+static VkResult
+createInstance(VulkanApp* const app)
+{
+ const VkApplicationInfo appInfo = {
+ VK_STRUCTURE_TYPE_APPLICATION_INFO,
+ NULL,
+ "Pugl Vulkan Test",
+ VK_MAKE_VERSION(0, 1, 0),
+ "Pugl Vulkan Test Engine",
+ VK_MAKE_VERSION(0, 1, 0),
+ VK_MAKE_VERSION(1, 0, 0),
+ };
+
+ // Get the number of supported extensions and layers
+ VkResult vr = VK_SUCCESS;
+ uint32_t nExtProps = 0;
+ uint32_t nLayerProps = 0;
+ if ((vr = vkEnumerateInstanceLayerProperties(&nLayerProps, NULL)) ||
+ (vr = vkEnumerateInstanceExtensionProperties(NULL, &nExtProps, NULL))) {
+ return vr;
+ }
+
+ // Get properties of supported extensions
+ VkExtensionProperties* extProps = AALLOC(nExtProps, VkExtensionProperties);
+ vkEnumerateInstanceExtensionProperties(NULL, &nExtProps, extProps);
+
+ uint32_t nExtensions = 0;
+ const char** extensions = NULL;
+
+ // Add extensions required by pugl
+ uint32_t nPuglExts = 0;
+ const char* const* puglExts = puglGetInstanceExtensions(&nPuglExts);
+ for (uint32_t i = 0; i < nPuglExts; ++i) {
+ pushString(&extensions, &nExtensions, puglExts[i]);
+ }
+
+ // Add extra extensions we want to use if they are supported
+ if (hasExtension("VK_EXT_debug_report", extProps, nExtProps)) {
+ pushString(&extensions, &nExtensions, "VK_EXT_debug_report");
+ }
+
+ // Get properties of supported layers
+ VkLayerProperties* layerProps = AALLOC(nLayerProps, VkLayerProperties);
+ vkEnumerateInstanceLayerProperties(&nLayerProps, layerProps);
+
+ // Add validation layers if error checking is enabled
+ uint32_t nLayers = 0;
+ const char** layers = NULL;
+ if (app->opts.errorChecking) {
+ const char* debugLayers[] = {"VK_LAYER_KHRONOS_validation",
+ "VK_LAYER_LUNARG_standard_validation",
+ NULL};
+
+ for (const char** l = debugLayers; *l; ++l) {
+ if (hasLayer(*l, layerProps, nLayerProps)) {
+ pushString(&layers, &nLayers, *l);
+ }
+ }
+ }
+
+ for (uint32_t i = 0; i < nExtensions; ++i) {
+ printf("Using instance extension: %s\n", extensions[i]);
+ }
+
+ for (uint32_t i = 0; i < nLayers; ++i) {
+ printf("Using instance layer: %s\n", layers[i]);
+ }
+
+ const VkInstanceCreateInfo createInfo = {
+ VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO,
+ NULL,
+ 0,
+ &appInfo,
+ COUNTED(nLayers, layers),
+ COUNTED(nExtensions, extensions),
+ };
+
+ if ((vr = vkCreateInstance(&createInfo, ALLOC_VK, &app->vk.instance))) {
+ logError("Could not create Vulkan Instance: %d\n", vr);
+ }
+
+ free(layers);
+ free(extensions);
+ free(layerProps);
+ free(extProps);
+
+ return vr;
+}
+
+static VkResult
+enableDebugging(VulkanState* const vk)
+{
+ vk->api.vkCreateDebugReportCallbackEXT =
+ (PFN_vkCreateDebugReportCallbackEXT)vkGetInstanceProcAddr(
+ vk->instance, "vkCreateDebugReportCallbackEXT");
+
+ vk->api.vkDestroyDebugReportCallbackEXT =
+ (PFN_vkDestroyDebugReportCallbackEXT)vkGetInstanceProcAddr(
+ vk->instance, "vkDestroyDebugReportCallbackEXT");
+
+ if (vk->api.vkCreateDebugReportCallbackEXT) {
+ const VkDebugReportCallbackCreateInfoEXT info = {
+ VK_STRUCTURE_TYPE_DEBUG_REPORT_CALLBACK_CREATE_INFO_EXT,
+ NULL,
+ VK_DEBUG_REPORT_ERROR_BIT_EXT | VK_DEBUG_REPORT_WARNING_BIT_EXT,
+ debugCallback,
+ NULL,
+ };
+
+ VkResult vr = VK_SUCCESS;
+ if ((vr = vk->api.vkCreateDebugReportCallbackEXT(
+ vk->instance, &info, ALLOC_VK, &vk->debugCallback))) {
+ logError("Could not create debug reporter: %d\n", vr);
+ return vr;
+ }
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+getGraphicsQueueIndex(VkSurfaceKHR surface,
+ VkPhysicalDevice device,
+ uint32_t* graphicsIndex)
+{
+ VkResult r = VK_SUCCESS;
+
+ uint32_t nProps = 0;
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &nProps, NULL);
+
+ VkQueueFamilyProperties* props = AALLOC(nProps, VkQueueFamilyProperties);
+ vkGetPhysicalDeviceQueueFamilyProperties(device, &nProps, props);
+
+ for (uint32_t q = 0; q < nProps; ++q) {
+ if (props[q].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
+ VkBool32 supported = false;
+ if ((r = vkGetPhysicalDeviceSurfaceSupportKHR(
+ device, q, surface, &supported))) {
+ free(props);
+ return r;
+ } else if (supported) {
+ *graphicsIndex = q;
+ free(props);
+ return VK_SUCCESS;
+ }
+ }
+ }
+
+ free(props);
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+}
+
+static bool
+supportsRequiredExtensions(const VkPhysicalDevice device)
+{
+ uint32_t nExtProps = 0;
+ vkEnumerateDeviceExtensionProperties(device, NULL, &nExtProps, NULL);
+
+ VkExtensionProperties* extProps = AALLOC(nExtProps, VkExtensionProperties);
+ vkEnumerateDeviceExtensionProperties(device, NULL, &nExtProps, extProps);
+
+ for (uint32_t i = 0; i < nExtProps; ++i) {
+ if (!strcmp(extProps[i].extensionName,
+ VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
+ free(extProps);
+ return true;
+ }
+ }
+
+ free(extProps);
+ return false;
+}
+
+static bool
+isDeviceSuitable(const VulkanState* const vk,
+ const VkPhysicalDevice device,
+ uint32_t* const graphicsIndex)
+{
+ if (!supportsRequiredExtensions(device) ||
+ getGraphicsQueueIndex(vk->surface, device, graphicsIndex)) {
+ return false;
+ }
+
+ return true;
+}
+
+/**
+ Selects a physical graphics device.
+
+ This doesn't try to be clever, and just selects the first suitable device.
+*/
+static VkResult
+selectPhysicalDevice(VulkanState* const vk)
+{
+ VkResult vr = VK_SUCCESS;
+ if (!vk->surface) {
+ logError("Cannot select a physical device without a surface\n");
+ return VK_ERROR_SURFACE_LOST_KHR;
+ }
+
+ uint32_t nDevices = 0;
+ if ((vr = vkEnumeratePhysicalDevices(vk->instance, &nDevices, NULL))) {
+ logError("Failed to get count of physical devices: %d\n", vr);
+ return vr;
+ }
+
+ if (!nDevices) {
+ logError("No physical devices found\n");
+ return VK_ERROR_DEVICE_LOST;
+ }
+
+ VkPhysicalDevice* devices = AALLOC(nDevices, VkPhysicalDevice);
+ if ((vr = vkEnumeratePhysicalDevices(vk->instance, &nDevices, devices))) {
+ logError("Failed to enumerate physical devices: %d\n", vr);
+ free(devices);
+ return vr;
+ }
+
+ uint32_t i = 0;
+ for (i = 0; i < nDevices; ++i) {
+ VkPhysicalDeviceProperties deviceProps = {0};
+ vkGetPhysicalDeviceProperties(devices[i], &deviceProps);
+
+ uint32_t graphicsIndex = 0;
+ if (isDeviceSuitable(vk, devices[i], &graphicsIndex)) {
+ printf("Using device %u/%u: \"%s\"\n",
+ i + 1,
+ nDevices,
+ deviceProps.deviceName);
+ vk->deviceProperties = deviceProps;
+ vk->physicalDevice = devices[i];
+ vk->graphicsIndex = graphicsIndex;
+ printf("Using graphics queue family: %u\n", vk->graphicsIndex);
+ break;
+ }
+
+ printf("Device \"%s\" not suitable\n", deviceProps.deviceName);
+ }
+
+ if (i >= nDevices) {
+ logError("No suitable devices found\n");
+ vr = VK_ERROR_DEVICE_LOST;
+ }
+
+ free(devices);
+ return vr;
+}
+
+/// Opens the logical device and sets up the queue and command pool
+static VkResult
+openDevice(VulkanState* const vk)
+{
+ if (vk->device) {
+ logError("Renderer already has an opened device\n");
+ return VK_NOT_READY;
+ }
+
+ const float graphicsQueuePriority = 1.0f;
+ const char* const swapchainName = VK_KHR_SWAPCHAIN_EXTENSION_NAME;
+
+ const VkDeviceQueueCreateInfo queueCreateInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO,
+ NULL,
+ 0,
+ vk->graphicsIndex,
+ COUNTED(1, &graphicsQueuePriority),
+ };
+
+ const VkDeviceCreateInfo createInfo = {
+ VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO,
+ NULL,
+ 0,
+ COUNTED(1, &queueCreateInfo),
+ COUNTED(0, NULL),
+ COUNTED(1, &swapchainName),
+ NULL,
+ };
+
+ VkDevice device = NULL;
+ VkResult vr = VK_SUCCESS;
+ if ((vr = vkCreateDevice(
+ vk->physicalDevice, &createInfo, ALLOC_VK, &device))) {
+ logError("Could not open device \"%s\": %d\n",
+ vk->deviceProperties.deviceName,
+ vr);
+ return vr;
+ }
+
+ vk->device = device;
+ vkGetDeviceQueue(vk->device, vk->graphicsIndex, 0, &vk->graphicsQueue);
+
+ const VkCommandPoolCreateInfo commandInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO,
+ NULL,
+ 0,
+ vk->graphicsIndex,
+ };
+
+ if ((vr = vkCreateCommandPool(
+ vk->device, &commandInfo, ALLOC_VK, &vk->commandPool))) {
+ logError("Could not create command pool: %d\n", vr);
+ return vr;
+ }
+
+ return VK_SUCCESS;
+}
+
+static const char*
+presentModeString(const VkPresentModeKHR presentMode)
+{
+ switch (presentMode) {
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return "Immediate";
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return "Mailbox";
+ case VK_PRESENT_MODE_FIFO_KHR:
+ return "FIFO";
+ case VK_PRESENT_MODE_FIFO_RELAXED_KHR:
+ return "FIFO relaxed";
+ default:
+ return "Other";
+ }
+}
+
+static bool
+hasPresentMode(const VkPresentModeKHR mode,
+ const VkPresentModeKHR* const presentModes,
+ const uint32_t nPresentModes)
+{
+ for (uint32_t i = 0; i < nPresentModes; ++i) {
+ if (presentModes[i] == mode) {
+ return true;
+ }
+ }
+
+ return false;
+}
+
+/// Configure the surface for the currently opened device
+static VkResult
+configureSurface(VulkanState* const vk)
+{
+ uint32_t nFormats = 0;
+ vkGetPhysicalDeviceSurfaceFormatsKHR(vk->physicalDevice,
+ vk->surface,
+ &nFormats,
+ NULL);
+ if (!nFormats) {
+ logError("No surface formats available\n");
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+ }
+
+ VkSurfaceFormatKHR* surfaceFormats = AALLOC(nFormats, VkSurfaceFormatKHR);
+ vkGetPhysicalDeviceSurfaceFormatsKHR(vk->physicalDevice,
+ vk->surface,
+ &nFormats,
+ surfaceFormats);
+
+ const VkSurfaceFormatKHR want = {VK_FORMAT_B8G8R8A8_UNORM,
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR};
+
+ uint32_t formatIndex = 0;
+ for (formatIndex = 0; formatIndex < nFormats; ++formatIndex) {
+ if (surfaceFormats[formatIndex].format == want.format &&
+ surfaceFormats[formatIndex].colorSpace == want.colorSpace) {
+ vk->surfaceFormat = want;
+ break;
+ }
+ }
+ free(surfaceFormats);
+ if (formatIndex >= nFormats) {
+ logError("Could not find a suitable surface format\n");
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+ }
+
+ uint32_t nPresentModes = 0;
+ vkGetPhysicalDeviceSurfacePresentModesKHR(vk->physicalDevice,
+ vk->surface,
+ &nPresentModes,
+ NULL);
+ if (!nPresentModes) {
+ logError("No present modes available\n");
+ return VK_ERROR_FORMAT_NOT_SUPPORTED;
+ }
+
+ VkPresentModeKHR* presentModes = AALLOC(nPresentModes, VkPresentModeKHR);
+ vkGetPhysicalDeviceSurfacePresentModesKHR(vk->physicalDevice,
+ vk->surface,
+ &nPresentModes,
+ presentModes);
+
+ const VkPresentModeKHR tryModes[] = {
+ VK_PRESENT_MODE_MAILBOX_KHR,
+ VK_PRESENT_MODE_FIFO_RELAXED_KHR,
+ VK_PRESENT_MODE_FIFO_KHR,
+ VK_PRESENT_MODE_IMMEDIATE_KHR,
+ };
+
+ VkPresentModeKHR presentMode = VK_PRESENT_MODE_FIFO_KHR;
+ for (uint32_t i = 0; i < sizeof(tryModes) / sizeof(VkPresentModeKHR); ++i) {
+ if (hasPresentMode(tryModes[i], presentModes, nPresentModes)) {
+ presentMode = tryModes[i];
+ break;
+ }
+ }
+
+ free(presentModes);
+ vk->presentMode = presentMode;
+ printf("Using present mode: \"%s\" (%u)\n",
+ presentModeString(presentMode),
+ presentMode);
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+createRawSwapchain(VulkanState* const vk,
+ const uint32_t width,
+ const uint32_t height)
+{
+ VkSurfaceCapabilitiesKHR surfaceCapabilities;
+ VkResult vr = VK_SUCCESS;
+ if ((vr = vkGetPhysicalDeviceSurfaceCapabilitiesKHR(
+ vk->physicalDevice, vk->surface, &surfaceCapabilities))) {
+ logError("Could not get surface capabilities: %d\n", vr);
+ return vr;
+ }
+
+ /* There is a known race condition with window/surface sizes, so we clamp
+ to what Vulkan reports and hope for the best. */
+
+ vk->swapchain->extent.width =
+ CLAMP(width,
+ surfaceCapabilities.minImageExtent.width,
+ surfaceCapabilities.maxImageExtent.width);
+
+ vk->swapchain->extent.height =
+ CLAMP(height,
+ surfaceCapabilities.minImageExtent.height,
+ surfaceCapabilities.maxImageExtent.height);
+
+ vk->swapchain->nImages = surfaceCapabilities.minImageCount;
+
+ const VkSwapchainCreateInfoKHR createInfo = {
+ VK_STRUCTURE_TYPE_SWAPCHAIN_CREATE_INFO_KHR,
+ NULL,
+ 0,
+ vk->surface,
+ vk->swapchain->nImages,
+ vk->surfaceFormat.format,
+ VK_COLOR_SPACE_SRGB_NONLINEAR_KHR,
+ vk->swapchain->extent,
+ 1,
+ (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_TRANSFER_DST_BIT),
+ VK_SHARING_MODE_EXCLUSIVE,
+ COUNTED(0, NULL),
+ surfaceCapabilities.currentTransform,
+ VK_COMPOSITE_ALPHA_OPAQUE_BIT_KHR,
+ vk->presentMode,
+ VK_TRUE,
+ 0,
+ };
+
+ if ((vr = vkCreateSwapchainKHR(vk->device,
+ &createInfo,
+ ALLOC_VK,
+ &vk->swapchain->rawSwapchain))) {
+ logError("Could not create swapchain: %d\n", vr);
+ return vr;
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+recordCommandBuffers(VulkanState* const vk)
+{
+ const VkClearColorValue clearValue = {{
+ 0xA4 / (float)0x100, // R
+ 0x1E / (float)0x100, // G
+ 0x22 / (float)0x100, // B
+ 0xFF / (float)0x100, // A
+ }};
+
+ const VkImageSubresourceRange range = {
+ VK_IMAGE_ASPECT_COLOR_BIT,
+ 0,
+ 1,
+ 0,
+ 1,
+ };
+
+ const VkCommandBufferBeginInfo beginInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO,
+ NULL,
+ VK_COMMAND_BUFFER_USAGE_SIMULTANEOUS_USE_BIT,
+ NULL,
+ };
+
+ for (uint32_t i = 0; i < vk->swapchain->nImages; ++i) {
+ const VkImageMemoryBarrier toClearBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ NULL,
+ VK_ACCESS_MEMORY_READ_BIT,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_IMAGE_LAYOUT_UNDEFINED,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ vk->graphicsIndex,
+ vk->graphicsIndex,
+ vk->swapchain->images[i],
+ range,
+ };
+
+ const VkImageMemoryBarrier toPresentBarrier = {
+ VK_STRUCTURE_TYPE_IMAGE_MEMORY_BARRIER,
+ NULL,
+ VK_ACCESS_TRANSFER_WRITE_BIT,
+ VK_ACCESS_MEMORY_READ_BIT,
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ VK_IMAGE_LAYOUT_PRESENT_SRC_KHR,
+ vk->graphicsIndex,
+ vk->graphicsIndex,
+ vk->swapchain->images[i],
+ range,
+ };
+
+ vkBeginCommandBuffer(vk->swapchain->commandBuffers[i], &beginInfo);
+
+ vkCmdPipelineBarrier(vk->swapchain->commandBuffers[i],
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ 0,
+ COUNTED(0, NULL),
+ COUNTED(0, NULL),
+ COUNTED(1, &toClearBarrier));
+
+ vkCmdClearColorImage(vk->swapchain->commandBuffers[i],
+ vk->swapchain->images[i],
+ VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
+ &clearValue,
+ COUNTED(1, &range));
+
+ vkCmdPipelineBarrier(vk->swapchain->commandBuffers[i],
+ VK_PIPELINE_STAGE_TRANSFER_BIT,
+ VK_PIPELINE_STAGE_BOTTOM_OF_PIPE_BIT,
+ 0,
+ COUNTED(0, NULL),
+ COUNTED(0, NULL),
+ COUNTED(1, &toPresentBarrier));
+
+ vkEndCommandBuffer(vk->swapchain->commandBuffers[i]);
+ }
+
+ return VK_SUCCESS;
+}
+
+static VkResult
+createSwapchain(VulkanState* const vk,
+ const uint32_t width,
+ const uint32_t height)
+{
+ VkResult vr = VK_SUCCESS;
+
+ vk->swapchain = AALLOC(1, Swapchain);
+ if ((vr = createRawSwapchain(vk, width, height))) {
+ return vr;
+ }
+
+ if ((vr = vkGetSwapchainImagesKHR(vk->device,
+ vk->swapchain->rawSwapchain,
+ &vk->swapchain->nImages,
+ NULL))) {
+ logError("Failed to query swapchain images: %d\n", vr);
+ return vr;
+ }
+
+ vk->swapchain->images = AALLOC(vk->swapchain->nImages, VkImage);
+ if ((vr = vkGetSwapchainImagesKHR(vk->device,
+ vk->swapchain->rawSwapchain,
+ &vk->swapchain->nImages,
+ vk->swapchain->images))) {
+ logError("Failed to get swapchain images: %d\n", vr);
+ return vr;
+ }
+
+ const VkCommandBufferAllocateInfo allocInfo = {
+ VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO,
+ NULL,
+ vk->commandPool,
+ VK_COMMAND_BUFFER_LEVEL_PRIMARY,
+ vk->swapchain->nImages,
+ };
+
+ vk->swapchain->commandBuffers = AALLOC(vk->swapchain->nImages,
+ VkCommandBuffer);
+
+ if ((vr = vkAllocateCommandBuffers(vk->device,
+ &allocInfo,
+ vk->swapchain->commandBuffers))) {
+ logError("Could not allocate command buffers: %d\n", vr);
+ return vr;
+ }
+
+ const VkFenceCreateInfo fenceInfo = {
+ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO,
+ NULL,
+ VK_FENCE_CREATE_SIGNALED_BIT,
+ };
+ vk->swapchain->fences = AALLOC(vk->swapchain->nImages, VkFence);
+
+ for (uint32_t i = 0; i < vk->swapchain->nImages; ++i) {
+ if ((vr = vkCreateFence(vk->device,
+ &fenceInfo,
+ ALLOC_VK,
+ &vk->swapchain->fences[i]))) {
+ logError("Could not create render finished fence: %d\n", vr);
+ return vr;
+ }
+ }
+
+ if ((vr = recordCommandBuffers(vk))) {
+ logError("Failed to record command buffers\n");
+ return vr;
+ }
+
+ return VK_SUCCESS;
+}
+
+static void
+destroySwapchain(VulkanState* const vk, Swapchain* const swapchain)
+{
+ if (!swapchain) {
+ return;
+ }
+
+ for (uint32_t i = 0; i < swapchain->nImages; ++i) {
+ if (swapchain->fences[i]) {
+ vkDestroyFence(vk->device, swapchain->fences[i], ALLOC_VK);
+ }
+
+ if (swapchain->imageViews && swapchain->imageViews[i]) {
+ vkDestroyImageView(vk->device, swapchain->imageViews[i], ALLOC_VK);
+ }
+ }
+
+ free(swapchain->fences);
+ swapchain->fences = NULL;
+ free(swapchain->imageViews);
+ swapchain->imageViews = NULL;
+
+ if (swapchain->images) {
+ free(swapchain->images);
+ swapchain->images = NULL;
+ }
+
+ if (swapchain->commandBuffers) {
+ vkFreeCommandBuffers(vk->device,
+ vk->commandPool,
+ swapchain->nImages,
+ swapchain->commandBuffers);
+ free(swapchain->commandBuffers);
+ }
+
+ if (swapchain->rawSwapchain) {
+ vkDestroySwapchainKHR(vk->device, swapchain->rawSwapchain, ALLOC_VK);
+ }
+
+ free(swapchain);
+}
+
+static VkResult
+createSyncObjects(VulkanState* const vk)
+{
+ const VkSemaphoreCreateInfo info = {
+ VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO,
+ NULL,
+ 0,
+ };
+
+ vkCreateSemaphore(vk->device, &info, ALLOC_VK, &vk->sync.presentComplete);
+ vkCreateSemaphore(vk->device, &info, ALLOC_VK, &vk->sync.renderFinished);
+ return VK_SUCCESS;
+}
+
+static void
+destroySyncObjects(VulkanState* const vk)
+{
+ if (vk->sync.renderFinished) {
+ vkDestroySemaphore(vk->device, vk->sync.renderFinished, ALLOC_VK);
+ vk->sync.renderFinished = VK_NULL_HANDLE;
+ }
+ if (vk->sync.presentComplete) {
+ vkDestroySemaphore(vk->device, vk->sync.presentComplete, ALLOC_VK);
+ vk->sync.presentComplete = VK_NULL_HANDLE;
+ }
+}
+
+static void
+closeDevice(VulkanState* const vk)
+{
+ if (vk->device) {
+ vkDeviceWaitIdle(vk->device);
+ destroySyncObjects(vk);
+ destroySwapchain(vk, vk->swapchain);
+ if (vk->commandPool) {
+ vkDestroyCommandPool(vk->device, vk->commandPool, ALLOC_VK);
+ vk->commandPool = VK_NULL_HANDLE;
+ }
+ vk->graphicsQueue = VK_NULL_HANDLE;
+ vkDestroyDevice(vk->device, ALLOC_VK);
+ vk->device = VK_NULL_HANDLE;
+ }
+}
+
+static void
+destroyWorld(VulkanApp* const app)
+{
+ VulkanState* vk = &app->vk;
+
+ if (vk) {
+ closeDevice(vk);
+
+ if (app->view) {
+ puglHideWindow(app->view);
+ puglFreeView(app->view);
+ app->view = NULL;
+ }
+ if (vk->debugCallback && vk->api.vkDestroyDebugReportCallbackEXT) {
+ vk->api.vkDestroyDebugReportCallbackEXT(vk->instance,
+ vk->debugCallback,
+ ALLOC_VK);
+ vk->debugCallback = VK_NULL_HANDLE;
+ }
+ if (vk->surface) {
+ vkDestroySurfaceKHR(vk->instance, vk->surface, ALLOC_VK);
+ vk->surface = VK_NULL_HANDLE;
+ }
+ if (vk->instance) {
+ fflush(stderr);
+ vkDestroyInstance(vk->instance, ALLOC_VK);
+ vk->instance = VK_NULL_HANDLE;
+ }
+ if (app->world) {
+ puglFreeWorld(app->world);
+ app->world = NULL;
+ }
+ }
+}
+
+static PuglStatus
+onConfigure(PuglView* const view, const double width, const double height)
+{
+ VulkanApp* const app = (VulkanApp*)puglGetHandle(view);
+
+ // We just record the size here and lazily resize the surface when exposed
+ app->width = (uint32_t)width;
+ app->height = (uint32_t)height;
+
+ return PUGL_SUCCESS;
+}
+
+static PuglStatus
+recreateSwapchain(VulkanState* const vk,
+ const uint32_t width,
+ const uint32_t height)
+{
+ vkDeviceWaitIdle(vk->device);
+ destroySwapchain(vk, vk->swapchain);
+
+ if (createSwapchain(vk, width, height)) {
+ logError("Failed to recreate swapchain\n");
+ return PUGL_UNKNOWN_ERROR;
+ }
+
+ return PUGL_SUCCESS;
+}
+
+static PuglStatus
+onExpose(PuglView* const view)
+{
+ VulkanApp* app = (VulkanApp*)puglGetHandle(view);
+ VulkanState* vk = &app->vk;
+ uint32_t imageIndex = 0;
+ VkResult result = VK_SUCCESS;
+
+ // Recreate swapchain if the window size has changed
+ const Swapchain* swapchain = vk->swapchain;
+ if (swapchain->extent.width != app->width ||
+ swapchain->extent.height != app->height) {
+ recreateSwapchain(vk, app->width, app->height);
+ }
+
+ // Acquire the next image to render, rebuilding if necessary
+ while ((result = vkAcquireNextImageKHR(vk->device,
+ vk->swapchain->rawSwapchain,
+ UINT64_MAX,
+ vk->sync.presentComplete,
+ VK_NULL_HANDLE,
+ &imageIndex))) {
+ switch (result) {
+ case VK_SUCCESS:
+ break;
+ case VK_SUBOPTIMAL_KHR:
+ case VK_ERROR_OUT_OF_DATE_KHR:
+ recreateSwapchain(vk, app->width, app->height);
+ continue;
+ default:
+ logError("Could not acquire swapchain image: %d\n", result);
+ return PUGL_UNKNOWN_ERROR;
+ }
+ }
+
+ // Wait until we can start rendering this frame
+ vkWaitForFences(vk->device,
+ COUNTED(1, &vk->swapchain->fences[imageIndex]),
+ VK_TRUE,
+ UINT64_MAX);
+ vkResetFences(vk->device, 1, &vk->swapchain->fences[imageIndex]);
+
+ const VkPipelineStageFlags waitStage = VK_PIPELINE_STAGE_TRANSFER_BIT;
+
+ // Submit command buffer to render this frame
+ const VkSubmitInfo submitInfo = {
+ VK_STRUCTURE_TYPE_SUBMIT_INFO,
+ NULL,
+ COUNTED(1, &vk->sync.presentComplete),
+ &waitStage,
+ COUNTED(1, &vk->swapchain->commandBuffers[imageIndex]),
+ COUNTED(1, &vk->sync.renderFinished)};
+ if ((result = vkQueueSubmit(vk->graphicsQueue,
+ 1,
+ &submitInfo,
+ vk->swapchain->fences[imageIndex]))) {
+ logError("Could not submit to queue: %d\n", result);
+ return PUGL_FAILURE;
+ }
+
+ // Present this frame
+ const VkPresentInfoKHR presentInfo = {
+ VK_STRUCTURE_TYPE_PRESENT_INFO_KHR,
+ NULL,
+ COUNTED(1, &vk->sync.renderFinished),
+ COUNTED(1, &vk->swapchain->rawSwapchain, &imageIndex, NULL),
+ };
+ if ((result = vkQueuePresentKHR(vk->graphicsQueue, &presentInfo))) {
+ logError("Could not present image: %d\n", result);
+ }
+
+ if (app->opts.continuous) {
+ ++app->framesDrawn;
+ }
+
+ return PUGL_SUCCESS;
+}
+
+static PuglStatus
+onEvent(PuglView* const view, const PuglEvent* const e)
+{
+ VulkanApp* const app = (VulkanApp*)puglGetHandle(view);
+
+ printEvent(e, "Event: ", app->opts.verbose);
+
+ switch (e->type) {
+ case PUGL_EXPOSE:
+ return onExpose(view);
+ case PUGL_CONFIGURE:
+ return onConfigure(view, e->configure.width, e->configure.height);
+ case PUGL_CLOSE:
+ app->quit = 1;
+ break;
+ case PUGL_KEY_PRESS:
+ switch (e->key.key) {
+ case PUGL_KEY_ESCAPE:
+ case 'q':
+ app->quit = 1;
+ break;
+ }
+ break;
+ default:
+ break;
+ }
+ return PUGL_SUCCESS;
+}
+
+int
+main(int argc, char** argv)
+{
+ VulkanApp app = {0};
+ VulkanState* vk = &app.vk;
+ const uint32_t defaultWidth = 640;
+ const uint32_t defaultHeight = 360;
+ const PuglRect frame = {0, 0, defaultWidth, defaultHeight};
+
+ // Parse command line options
+ app.opts = puglParseTestOptions(&argc, &argv);
+ if (app.opts.help) {
+ puglPrintTestUsage(argv[0], "");
+ return 0;
+ }
+
+ // Create world and view
+ if (!(app.world = puglNewWorld(PUGL_PROGRAM, PUGL_WORLD_THREADS))) {
+ return logError("Failed to create world\n");
+ } else if (!(app.view = puglNewView(app.world))) {
+ puglFreeWorld(app.world);
+ return logError("Failed to create Pugl World and View\n");
+ }
+
+ // Create Vulkan instance
+ if (createInstance(&app)) {
+ puglFreeWorld(app.world);
+ return logError("Failed to create instance\n");
+ }
+
+ // Create window
+ puglSetWindowTitle(app.view, "Pugl Vulkan");
+ puglSetFrame(app.view, frame);
+ puglSetHandle(app.view, &app);
+ puglSetBackend(app.view, puglVulkanBackend());
+ puglSetViewHint(app.view, PUGL_RESIZABLE, app.opts.resizable);
+ puglSetEventFunc(app.view, onEvent);
+ const PuglStatus st = puglRealize(app.view);
+ if (st) {
+ puglFreeWorld(app.world);
+ puglFreeView(app.view);
+ return logError("Failed to create window (%s)\n", puglStrerror(st));
+ }
+
+ // Create Vulkan surface for Window
+ PuglVulkanLoader* loader = puglNewVulkanLoader(app.world);
+ if (puglCreateSurface(
+ loader, app.view, vk->instance, ALLOC_VK, &vk->surface)) {
+ return logError("Failed to create surface\n");
+ }
+
+ // Set up Vulkan
+ VkResult vr = VK_SUCCESS;
+ if ((vr = enableDebugging(vk)) || //
+ (vr = selectPhysicalDevice(vk)) || //
+ (vr = openDevice(vk)) || //
+ (vr = configureSurface(vk)) || //
+ (vr = createSwapchain(vk, defaultWidth, defaultHeight)) || //
+ (vr = createSyncObjects(vk))) {
+ destroyWorld(&app);
+ return logError("Failed to set up graphics (%d)\n", vr);
+ }
+
+ printf("Swapchain images: %u\n", app.vk.swapchain->nImages);
+
+ PuglFpsPrinter fpsPrinter = {puglGetTime(app.world)};
+ puglShowWindow(app.view);
+ while (!app.quit) {
+ puglUpdate(app.world, -1.0);
+
+ if (app.opts.continuous) {
+ puglPrintFps(app.world, &fpsPrinter, &app.framesDrawn);
+ }
+ }
+
+ destroyWorld(&app);
+ return 0;
+}
diff --git a/examples/sybok.hpp b/examples/sybok.hpp
new file mode 100644
index 0000000..8985547
--- /dev/null
+++ b/examples/sybok.hpp
@@ -0,0 +1,2358 @@
+/*
+ Copyright 2020 David Robillard <http://drobilla.net>
+
+ Permission to use, copy, modify, and/or distribute this software for any
+ purpose with or without fee is hereby granted, provided that the above
+ copyright notice and this permission notice appear in all copies.
+
+ THIS SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+*/
+
+/**
+ @file sybok.hpp
+ @brief A minimal C++ wrapper for the Vulkan API.
+
+ This is a manually-written minimal wrapper for Vulkan. It makes working
+ with Vulkan a little easier in C++, but takes a different approach than
+ vulkan.hpp. In particular:
+
+ - Works nicely with dynamic loading. Since the API itself is an object, it
+ is simple to ensure the dynamically loaded API (or a consistent API in
+ general) is used everywhere. Passing a dispatch parameter to every
+ function as in vulkan.hpp makes dynamic loading extremely painful (not to
+ mention ugly), and mistakes tend to become link time errors. This is, in
+ my opinion, a glaring design flaw, and the real reason why this wrapper
+ reluctantly exists.
+
+ - Explicit separation of the initial API that does not require an instance
+ to load, from the rest of the API that does.
+
+ - Opinionated use of scoped handles everywhere.
+
+ - Remains close to the C API so that code can be easily ported. This means
+ that the pattern of return codes with output parameters is preserved,
+ except with smart handles that make leaks impossible. While less pretty,
+ this does not require exceptions.
+
+ - No exceptions or RTTI required.
+
+ - A safe scoped API for commands that encodes the semantics of the Vulkan
+ API. For example, it is statically impossible to call render scope
+ commands while not in a render scope.
+
+ - A reasonable amount of relatively readable code.
+
+ On the other hand, there are far fewer niceties, and the C API is used
+ directly as much as possible, particularly for structs (although they are
+ taken by const reference so they can be written inline). There is only
+ support for a minimal portable subset of Vulkan 1.1 with a few portable KHR
+ extensions.
+
+ In short, if the above sounds appealing, or you want a minimal wrapper that
+ can be extended if necessary to suit your application, you might find this
+ useful. If you want a fully-featured wrapper for Vulkan and don't care
+ about linker dependencies, you probably won't.
+*/
+
+#ifndef SYBOK_HPP
+#define SYBOK_HPP
+
+#define VK_NO_PROTOTYPES
+
+// On 64-bit platforms, all handles are "dispatchable" pointers
+#if defined(__LP64__) || defined(_WIN64) || \
+ (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || \
+ defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || \
+ defined(__powerpc64__)
+
+# define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ typedef struct object##_T* object;
+
+// On 32-bit platforms, some "non-dispatchable" handles are 64 bit integers
+#else
+
+/// Trivial wrapper class for a 64-bit integer handle for type safety
+template<class Tag>
+struct NonDispatchableHandle {
+ explicit operator uint64_t() const noexcept { return handle; }
+ explicit operator bool() const noexcept { return handle; }
+
+ uint64_t handle;
+};
+
+# define VK_DEFINE_NON_DISPATCHABLE_HANDLE(object) \
+ using object = NonDispatchableHandle<struct Sk##object##Tag>;
+
+#endif
+
+#include <vulkan/vulkan_core.h>
+
+#include <array>
+#include <cassert>
+#include <cstddef>
+#include <cstdint>
+#include <type_traits>
+#include <utility>
+
+#if __cplusplus >= 201703L
+# define SYBOK_NODISCARD [[nodiscard]]
+#elif defined(__GNUC__)
+# define SYBOK_NODISCARD [[gnu::warn_unused_result]]
+#else
+# define SYBOK_NODISCARD
+#endif
+
+/// Helper macro to make array arguments format nicely
+#define SK_COUNTED(count, ...) count, __VA_ARGS__
+
+namespace sk {
+
+class CommandScope;
+class RenderCommandScope;
+
+static inline const char*
+string(const VkResult result)
+{
+ switch (result) {
+ case VK_SUCCESS:
+ return "Success";
+ case VK_NOT_READY:
+ return "Not Ready";
+ case VK_TIMEOUT:
+ return "Timeout";
+ case VK_EVENT_SET:
+ return "Event set";
+ case VK_EVENT_RESET:
+ return "Event reset";
+ case VK_INCOMPLETE:
+ return "Incomplete";
+ case VK_ERROR_OUT_OF_HOST_MEMORY:
+ return "Out of host memory";
+ case VK_ERROR_OUT_OF_DEVICE_MEMORY:
+ return "Out of device memory";
+ case VK_ERROR_INITIALIZATION_FAILED:
+ return "Initialization failed";
+ case VK_ERROR_DEVICE_LOST:
+ return "Device lost";
+ case VK_ERROR_MEMORY_MAP_FAILED:
+ return "Memory map failed";
+ case VK_ERROR_LAYER_NOT_PRESENT:
+ return "Layer not present";
+ case VK_ERROR_EXTENSION_NOT_PRESENT:
+ return "Extension not present";
+ case VK_ERROR_FEATURE_NOT_PRESENT:
+ return "Feature not present";
+ case VK_ERROR_INCOMPATIBLE_DRIVER:
+ return "Incompatible driver";
+ case VK_ERROR_TOO_MANY_OBJECTS:
+ return "Too many objects";
+ case VK_ERROR_FORMAT_NOT_SUPPORTED:
+ return "Format not supported";
+ case VK_ERROR_FRAGMENTED_POOL:
+ return "Fragmented pool";
+ case VK_ERROR_OUT_OF_POOL_MEMORY: // Vulkan 1.1
+ return "Out of pool memory";
+ case VK_ERROR_INVALID_EXTERNAL_HANDLE: // Vulkan 1.1
+ return "Invalid external handle";
+ case VK_ERROR_SURFACE_LOST_KHR: // VK_KHR_surface
+ return "Surface lost";
+ case VK_ERROR_NATIVE_WINDOW_IN_USE_KHR: // VK_KHR_surface
+ return "Native window in use";
+ case VK_SUBOPTIMAL_KHR: // VK_KHR_swapchain
+ return "Suboptimal";
+ case VK_ERROR_OUT_OF_DATE_KHR: // VK_KHR_swapchain
+ return "Out of date";
+ case VK_ERROR_VALIDATION_FAILED_EXT: // VK_EXT_debug_report
+ return "Validation failed";
+ default:
+ break;
+ }
+
+ return "Unknown error";
+}
+
+static inline const char*
+string(const VkPresentModeKHR presentMode)
+{
+ switch (presentMode) {
+ case VK_PRESENT_MODE_IMMEDIATE_KHR:
+ return "Immediate";
+ case VK_PRESENT_MODE_MAILBOX_KHR:
+ return "Mailbox";
+ case VK_PRESENT_MODE_FIFO_KHR:
+ return "FIFO";
+ case VK_PRESENT_MODE_FIFO_RELAXED_KHR:
+ return "Relaxed FIFO";
+ default:
+ break;
+ }
+
+ return "Unknown present mode";
+}
+
+static inline const char*
+string(const VkDebugReportFlagBitsEXT flag)
+{
+ switch (flag) {
+ case VK_DEBUG_REPORT_INFORMATION_BIT_EXT:
+ return "Information";
+ case VK_DEBUG_REPORT_WARNING_BIT_EXT:
+ return "Warning";
+ case VK_DEBUG_REPORT_PERFORMANCE_WARNING_BIT_EXT:
+ return "Performance Warning";
+ case VK_DEBUG_REPORT_ERROR_BIT_EXT:
+ return "Error";
+ case VK_DEBUG_REPORT_DEBUG_BIT_EXT:
+ return "Debug";
+ default:
+ break;
+ }
+
+ return "Unknown report";
+}
+
+template<class T>
+class GlobalDeleter
+{
+public:
+ using DestroyFunc = void (*)(T, const VkAllocationCallbacks*);
+
+ GlobalDeleter() = default;
+ ~GlobalDeleter() = default;
+
+ GlobalDeleter(DestroyFunc destroyFunc) noexcept
+ : _destroyFunc{destroyFunc}
+ {}
+
+ GlobalDeleter(const GlobalDeleter&) = delete;
+ GlobalDeleter& operator=(const GlobalDeleter&) = delete;
+
+ GlobalDeleter(GlobalDeleter&& other) noexcept
+ {
+ std::swap(_destroyFunc, other._destroyFunc);
+ }
+
+ GlobalDeleter& operator=(GlobalDeleter&& other) noexcept
+ {
+ std::swap(_destroyFunc, other._destroyFunc);
+ return *this;
+ }
+
+ void operator()(T handle) noexcept
+ {
+ if (_destroyFunc && handle) {
+ _destroyFunc(handle, nullptr);
+ }
+ }
+
+private:
+ DestroyFunc _destroyFunc{};
+};
+
+template<class T, class Parent>
+class DependantDeleter
+{
+public:
+ using DestroyFunc = void (*)(Parent, T, const VkAllocationCallbacks*);
+
+ DependantDeleter() = default;
+ ~DependantDeleter() = default;
+
+ DependantDeleter(Parent parent, DestroyFunc destroyFunc) noexcept
+ : _parent{parent}
+ , _destroyFunc{destroyFunc}
+ {}
+
+ DependantDeleter(const DependantDeleter&) = delete;
+ DependantDeleter& operator=(const DependantDeleter&) = delete;
+
+ DependantDeleter(DependantDeleter&& other) noexcept { swap(other); }
+
+ DependantDeleter& operator=(DependantDeleter&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ void operator()(T handle) noexcept
+ {
+ if (_parent && _destroyFunc && handle) {
+ _destroyFunc(_parent, handle, nullptr);
+ }
+ }
+
+private:
+ void swap(DependantDeleter& other) noexcept
+ {
+ std::swap(_parent, other._parent);
+ std::swap(_destroyFunc, other._destroyFunc);
+ }
+
+ Parent _parent{};
+ DestroyFunc _destroyFunc{};
+};
+
+template<class T, class Pool, class FreeFuncResult>
+class PoolDeleter
+{
+public:
+ using FreeFunc = FreeFuncResult (*)(VkDevice, Pool, uint32_t, const T*);
+
+ PoolDeleter() noexcept = default;
+ ~PoolDeleter() noexcept = default;
+
+ PoolDeleter(VkDevice device,
+ Pool pool,
+ uint32_t count,
+ FreeFunc freeFunc) noexcept
+ : _device{device}
+ , _pool{pool}
+ , _count{count}
+ , _freeFunc{freeFunc}
+ {}
+
+ PoolDeleter(const PoolDeleter&) = delete;
+ PoolDeleter& operator=(const PoolDeleter&) = delete;
+
+ PoolDeleter(PoolDeleter&& other) noexcept { swap(other); }
+
+ PoolDeleter& operator=(PoolDeleter&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ void operator()(T* handle) noexcept
+ {
+ if (_device && _pool && handle) {
+ _freeFunc(_device, _pool, _count, handle);
+ }
+ }
+
+private:
+ void swap(PoolDeleter& other) noexcept
+ {
+ std::swap(_device, other._device);
+ std::swap(_pool, other._pool);
+ std::swap(_count, other._count);
+ std::swap(_freeFunc, other._freeFunc);
+ }
+
+ VkDevice _device{};
+ Pool _pool{};
+ uint32_t _count{};
+ FreeFunc _freeFunc{};
+};
+
+template<class T, class TDeleter>
+class UniqueDispatchableHandle
+{
+public:
+ using Deleter = TDeleter;
+ using Handle = T;
+
+ static_assert(std::is_pointer<T>::value, "");
+
+ UniqueDispatchableHandle() = default;
+
+ UniqueDispatchableHandle(Handle handle, Deleter deleter) noexcept
+ : _handle{handle}
+ , _deleter{std::move(deleter)}
+ {}
+
+ ~UniqueDispatchableHandle() noexcept
+ {
+ if (_handle) {
+ _deleter(_handle);
+ }
+ }
+
+ UniqueDispatchableHandle(const UniqueDispatchableHandle&) noexcept = delete;
+ UniqueDispatchableHandle&
+ operator=(const UniqueDispatchableHandle&) noexcept = delete;
+
+ UniqueDispatchableHandle(UniqueDispatchableHandle&& other) noexcept
+ {
+ swap(other);
+ }
+
+ UniqueDispatchableHandle&
+ operator=(UniqueDispatchableHandle&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ const Handle& get() const noexcept { return _handle; }
+
+ operator Handle() const noexcept { return _handle; }
+
+private:
+ void swap(UniqueDispatchableHandle& other) noexcept
+ {
+ std::swap(_handle, other._handle);
+ std::swap(_deleter, other._deleter);
+ }
+
+ Handle _handle{};
+ Deleter _deleter{};
+};
+
+#if defined(__LP64__) || defined(_WIN64) || \
+ (defined(__x86_64__) && !defined(__ILP32__)) || defined(_M_X64) || \
+ defined(__ia64) || defined(_M_IA64) || defined(__aarch64__) || \
+ defined(__powerpc64__)
+
+template<class T, class TDeleter>
+using UniqueNonDispatchableHandle = UniqueDispatchableHandle<T, TDeleter>;
+
+#else
+
+template<class T, class TDeleter>
+class UniqueNonDispatchableHandle
+{
+public:
+ using Deleter = TDeleter;
+ using Handle = T;
+
+ UniqueNonDispatchableHandle() = default;
+
+ UniqueNonDispatchableHandle(T handle, Deleter deleter) noexcept
+ : _handle{handle}
+ , _deleter{std::move(deleter)}
+ {
+ assert(handle);
+ }
+
+ ~UniqueNonDispatchableHandle() noexcept
+ {
+ if (_handle) {
+ _deleter(_handle);
+ }
+ }
+
+ UniqueNonDispatchableHandle(const UniqueNonDispatchableHandle&) noexcept =
+ delete;
+ UniqueNonDispatchableHandle&
+ operator=(const UniqueNonDispatchableHandle&) noexcept = delete;
+
+ UniqueNonDispatchableHandle(UniqueNonDispatchableHandle&& other) noexcept
+ {
+ swap(other);
+ }
+
+ UniqueNonDispatchableHandle&
+ operator=(UniqueNonDispatchableHandle&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ const Handle& get() const noexcept { return _handle; }
+
+ operator Handle() const noexcept { return _handle; }
+
+private:
+ void swap(UniqueNonDispatchableHandle& other) noexcept
+ {
+ std::swap(_handle, other._handle);
+ std::swap(_deleter, other._deleter);
+ }
+
+ T _handle{};
+ Deleter _deleter{};
+};
+
+#endif
+
+template<class Vector, class Deleter>
+class UniqueArrayHandle
+{
+public:
+ using T = typename Vector::value_type;
+
+ UniqueArrayHandle() = default;
+
+ UniqueArrayHandle(uint32_t size, Vector&& array, Deleter deleter) noexcept
+ : _array{std::move(array)}
+ , _deleter{std::move(deleter)}
+ , _size{size}
+ {
+ assert(!_array.empty());
+ }
+
+ ~UniqueArrayHandle() noexcept
+ {
+ if (!_array.empty()) {
+ _deleter(_array.data());
+ }
+ }
+
+ UniqueArrayHandle(const UniqueArrayHandle&) noexcept = delete;
+ UniqueArrayHandle& operator=(const UniqueArrayHandle&) noexcept = delete;
+
+ UniqueArrayHandle(UniqueArrayHandle&& other) noexcept { swap(other); }
+
+ UniqueArrayHandle& operator=(UniqueArrayHandle&& other) noexcept
+ {
+ swap(other);
+ return *this;
+ }
+
+ const T& operator[](const size_t index) const noexcept
+ {
+ return _array[index];
+ }
+
+ T& operator[](const size_t index) noexcept { return _array[index]; }
+
+ const T* get() const noexcept { return _array.data(); }
+ T* get() noexcept { return _array.data(); }
+
+private:
+ void swap(UniqueArrayHandle& other) noexcept
+ {
+ std::swap(_array, other._array);
+ std::swap(_deleter, other._deleter);
+ std::swap(_size, other._size);
+ }
+
+ Vector _array{};
+ Deleter _deleter{};
+ uint32_t _size{};
+};
+
+template<typename T>
+class OptionalParameter
+{
+public:
+ using Handle = typename T::Handle;
+
+ OptionalParameter(const T& value) noexcept
+ : _handle{value.get()}
+ {}
+
+ OptionalParameter() noexcept
+ : _handle{}
+ {}
+
+ OptionalParameter(const OptionalParameter&) = delete;
+ OptionalParameter& operator=(const OptionalParameter&) = delete;
+
+ OptionalParameter(OptionalParameter&&) = delete;
+ OptionalParameter& operator=(OptionalParameter&&) = delete;
+
+ const Handle get() const noexcept { return _handle; }
+
+private:
+ Handle _handle{};
+};
+
+template<typename T>
+using GlobalObject = UniqueDispatchableHandle<T, GlobalDeleter<T>>;
+
+template<typename T>
+using InstanceChild =
+ UniqueNonDispatchableHandle<T, DependantDeleter<T, VkInstance>>;
+
+template<typename T>
+using DispatchableDeviceChild =
+ UniqueDispatchableHandle<T, DependantDeleter<T, VkDevice>>;
+
+template<typename T>
+using NonDispatchableDeviceChild =
+ UniqueNonDispatchableHandle<T, DependantDeleter<T, VkDevice>>;
+
+template<typename Vector, typename Pool, typename FreeFuncResult>
+using PoolChild = UniqueArrayHandle<
+ Vector,
+ PoolDeleter<typename Vector::value_type, Pool, FreeFuncResult>>;
+
+using Device = GlobalObject<VkDevice>;
+using Instance = GlobalObject<VkInstance>;
+
+using PhysicalDevice = VkPhysicalDevice; // Weak handle, no destroy function
+using Queue = VkQueue; // Weak handle, no destroy function
+
+using Buffer = NonDispatchableDeviceChild<VkBuffer>;
+using BufferView = NonDispatchableDeviceChild<VkBufferView>;
+using CommandBuffer = DispatchableDeviceChild<VkCommandBuffer>;
+using CommandPool = NonDispatchableDeviceChild<VkCommandPool>;
+using DescriptorPool = NonDispatchableDeviceChild<VkDescriptorPool>;
+using DescriptorSetLayout = NonDispatchableDeviceChild<VkDescriptorSetLayout>;
+using DeviceMemory = NonDispatchableDeviceChild<VkDeviceMemory>;
+using Event = NonDispatchableDeviceChild<VkEvent>;
+using Fence = NonDispatchableDeviceChild<VkFence>;
+using Framebuffer = NonDispatchableDeviceChild<VkFramebuffer>;
+using Image = NonDispatchableDeviceChild<VkImage>;
+using ImageView = NonDispatchableDeviceChild<VkImageView>;
+using Pipeline = NonDispatchableDeviceChild<VkPipeline>;
+using PipelineCache = NonDispatchableDeviceChild<VkPipelineCache>;
+using PipelineLayout = NonDispatchableDeviceChild<VkPipelineLayout>;
+using QueryPool = NonDispatchableDeviceChild<VkQueryPool>;
+using RenderPass = NonDispatchableDeviceChild<VkRenderPass>;
+using Sampler = NonDispatchableDeviceChild<VkSampler>;
+using Semaphore = NonDispatchableDeviceChild<VkSemaphore>;
+using ShaderModule = NonDispatchableDeviceChild<VkShaderModule>;
+
+template<class VkCommandBufferVector>
+using CommandBuffers = PoolChild<VkCommandBufferVector, VkCommandPool, void>;
+
+template<class VkDescriptorSetVector>
+using DescriptorSets =
+ PoolChild<VkDescriptorSetVector, VkDescriptorPool, VkResult>;
+
+// VK_KHR_swapchain
+using SwapchainKHR = NonDispatchableDeviceChild<VkSwapchainKHR>;
+
+// VK_KHR_surface
+using SurfaceKHR = InstanceChild<VkSurfaceKHR>;
+
+// VK_EXT_debug_report
+using DebugReportCallbackEXT = InstanceChild<VkDebugReportCallbackEXT>;
+
+template<size_t...>
+struct IndexSequence {};
+
+template<size_t N, size_t... Next>
+struct IndexSequenceHelper
+ : public IndexSequenceHelper<N - 1U, N - 1U, Next...> {};
+
+template<size_t... Next>
+struct IndexSequenceHelper<0U, Next...> {
+ using type = IndexSequence<Next...>;
+};
+
+template<size_t N>
+using makeIndexSequence = typename IndexSequenceHelper<N>::type;
+
+template<class T, class Parent, class DestroyFunc, size_t count, size_t... Is>
+std::array<T, count>
+make_handle_array_h(Parent parent,
+ DestroyFunc destroyFunc,
+ std::array<typename T::Handle, count> handles,
+ IndexSequence<Is...>) noexcept
+{
+ return {T{handles[Is], {parent, destroyFunc}}...};
+}
+
+template<class T, class Parent, class DestroyFunc, size_t count>
+std::array<T, count>
+make_handle_array(Parent parent,
+ DestroyFunc destroyFunc,
+ std::array<typename T::Handle, count> handles) noexcept
+{
+ return make_handle_array_h<T, Parent, DestroyFunc, count>(
+ parent, destroyFunc, handles, makeIndexSequence<count>());
+}
+
+namespace detail {
+
+template<class Value, class Vector, class Func, class... Args>
+inline VkResult
+wrapVectorAccessor(Vector& vector, Func func, Args... args) noexcept
+{
+ uint32_t count = 0u;
+ VkResult r = func(args..., &count, nullptr);
+ if (r > VK_INCOMPLETE) {
+ vector.clear();
+ return r;
+ }
+
+ vector = Vector(count);
+ if ((r = func(args..., &count, vector.data()))) {
+ vector.clear();
+ return r;
+ }
+
+ return VK_SUCCESS;
+}
+
+} // namespace detail
+
+class VulkanApi;
+
+struct MappedMemory {
+ MappedMemory() noexcept = default;
+
+ MappedMemory(const VulkanApi& api,
+ VkDevice device,
+ VkDeviceMemory memory,
+ void* data) noexcept
+ : _api{&api}
+ , _device{device}
+ , _memory{memory}
+ , _data{data}
+ {}
+
+ MappedMemory(const MappedMemory&) = delete;
+ MappedMemory& operator=(const MappedMemory&) = delete;
+
+ MappedMemory(MappedMemory&& mappedMemory) noexcept
+ : _api{mappedMemory._api}
+ , _device{mappedMemory._device}
+ , _memory{mappedMemory._memory}
+ , _data{mappedMemory._data}
+ {
+ mappedMemory._device = {};
+ mappedMemory._memory = {};
+ mappedMemory._data = {};
+ }
+
+ MappedMemory& operator=(MappedMemory&& mappedMemory) noexcept
+ {
+ std::swap(_api, mappedMemory._api);
+ std::swap(_device, mappedMemory._device);
+ std::swap(_memory, mappedMemory._memory);
+ std::swap(_data, mappedMemory._data);
+ return *this;
+ }
+
+ ~MappedMemory() noexcept;
+
+ const void* get() const noexcept { return _data; }
+ void* get() noexcept { return _data; }
+
+private:
+ const VulkanApi* _api{};
+ VkDevice _device{};
+ VkDeviceMemory _memory{};
+ void* _data{};
+};
+
+class VulkanInitApi
+{
+public:
+ template<typename NotFoundFunc>
+ VkResult init(PFN_vkGetInstanceProcAddr pGetInstanceProcAddr,
+ NotFoundFunc notFound) noexcept
+ {
+#define SK_INIT(name) \
+ do { \
+ if (!(name = PFN_##name(getInstanceProcAddr(NULL, #name)))) { \
+ notFound(#name); \
+ } \
+ } while (0)
+
+ vkGetInstanceProcAddr = pGetInstanceProcAddr;
+ SK_INIT(vkCreateInstance);
+ vkDestroyInstance = {}; // Loaded after we create an instance
+ SK_INIT(vkEnumerateInstanceExtensionProperties);
+ SK_INIT(vkEnumerateInstanceLayerProperties);
+
+ if (!vkCreateInstance || !vkEnumerateInstanceExtensionProperties ||
+ !vkEnumerateInstanceLayerProperties) {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ return VK_SUCCESS;
+#undef SK_INIT
+ }
+
+ VkResult init(PFN_vkGetInstanceProcAddr pGetInstanceProcAddr) noexcept
+ {
+ return init(pGetInstanceProcAddr, [](const char*) {});
+ }
+
+ PFN_vkVoidFunction
+ getInstanceProcAddr(VkInstance instance,
+ const char* const name) const noexcept
+ {
+ return vkGetInstanceProcAddr(instance, name);
+ }
+
+ VkResult createInstance(const VkInstanceCreateInfo& createInfo,
+ Instance& instance) noexcept
+ {
+ VkInstance h = {};
+ if (const VkResult r = vkCreateInstance(&createInfo, nullptr, &h)) {
+ return r;
+ } else if (!h) {
+ // Shouldn't actually happen, but this lets the compiler know that
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ if (!vkDestroyInstance) {
+ vkDestroyInstance = PFN_vkDestroyInstance(
+ getInstanceProcAddr(instance, "vkDestroyInstance"));
+ }
+
+ instance = {h, {vkDestroyInstance}};
+ return VK_SUCCESS;
+ }
+
+ template<class Vector>
+ VkResult
+ enumerateInstanceExtensionProperties(Vector& properties) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkExtensionProperties>(
+ properties, vkEnumerateInstanceExtensionProperties, nullptr);
+ }
+
+ template<class Vector>
+ VkResult
+ enumerateInstanceExtensionProperties(const char* const layerName,
+ Vector& properties) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkExtensionProperties>(
+ properties, vkEnumerateInstanceExtensionProperties, layerName);
+ }
+
+ template<class Vector>
+ VkResult enumerateInstanceLayerProperties(Vector& properties) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkLayerProperties>(
+ properties, vkEnumerateInstanceLayerProperties);
+ }
+
+private:
+ PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr;
+
+#define SK_FUNC(name) \
+ PFN_##name name {}
+
+ SK_FUNC(vkCreateInstance);
+ SK_FUNC(vkDestroyInstance);
+ SK_FUNC(vkEnumerateInstanceExtensionProperties);
+ SK_FUNC(vkEnumerateInstanceLayerProperties);
+
+#undef SK_FUNC
+};
+
+class VulkanApi
+{
+public:
+ template<typename NotFoundFunc>
+ VkResult init(const VulkanInitApi& initApi,
+ const Instance& instance,
+ NotFoundFunc notFound) noexcept
+ {
+ VkResult r = VK_SUCCESS;
+
+ const auto notFoundWrapper = [&r, notFound](const char* name) {
+ r = VK_INCOMPLETE;
+ notFound(name);
+ };
+
+#define SK_INIT(name) \
+ do { \
+ if (!(name = PFN_##name( \
+ initApi.getInstanceProcAddr(instance, #name)))) { \
+ notFoundWrapper(#name); \
+ } \
+ } while (0)
+
+ SK_INIT(vkAllocateCommandBuffers);
+ SK_INIT(vkAllocateDescriptorSets);
+ SK_INIT(vkAllocateMemory);
+ SK_INIT(vkBeginCommandBuffer);
+ SK_INIT(vkBindBufferMemory);
+ SK_INIT(vkBindImageMemory);
+ SK_INIT(vkCmdBeginQuery);
+ SK_INIT(vkCmdBeginRenderPass);
+ SK_INIT(vkCmdBindDescriptorSets);
+ SK_INIT(vkCmdBindIndexBuffer);
+ SK_INIT(vkCmdBindPipeline);
+ SK_INIT(vkCmdBindVertexBuffers);
+ SK_INIT(vkCmdBlitImage);
+ SK_INIT(vkCmdClearAttachments);
+ SK_INIT(vkCmdClearColorImage);
+ SK_INIT(vkCmdClearDepthStencilImage);
+ SK_INIT(vkCmdCopyBuffer);
+ SK_INIT(vkCmdCopyBufferToImage);
+ SK_INIT(vkCmdCopyImage);
+ SK_INIT(vkCmdCopyImageToBuffer);
+ SK_INIT(vkCmdCopyQueryPoolResults);
+ SK_INIT(vkCmdDispatch);
+ SK_INIT(vkCmdDispatchIndirect);
+ SK_INIT(vkCmdDraw);
+ SK_INIT(vkCmdDrawIndexed);
+ SK_INIT(vkCmdDrawIndexedIndirect);
+ SK_INIT(vkCmdDrawIndirect);
+ SK_INIT(vkCmdEndQuery);
+ SK_INIT(vkCmdEndRenderPass);
+ SK_INIT(vkCmdExecuteCommands);
+ SK_INIT(vkCmdFillBuffer);
+ SK_INIT(vkCmdNextSubpass);
+ SK_INIT(vkCmdPipelineBarrier);
+ SK_INIT(vkCmdPushConstants);
+ SK_INIT(vkCmdResetEvent);
+ SK_INIT(vkCmdResetQueryPool);
+ SK_INIT(vkCmdResolveImage);
+ SK_INIT(vkCmdSetBlendConstants);
+ SK_INIT(vkCmdSetDepthBias);
+ SK_INIT(vkCmdSetDepthBounds);
+ SK_INIT(vkCmdSetEvent);
+ SK_INIT(vkCmdSetLineWidth);
+ SK_INIT(vkCmdSetScissor);
+ SK_INIT(vkCmdSetStencilCompareMask);
+ SK_INIT(vkCmdSetStencilReference);
+ SK_INIT(vkCmdSetStencilWriteMask);
+ SK_INIT(vkCmdSetViewport);
+ SK_INIT(vkCmdUpdateBuffer);
+ SK_INIT(vkCmdWaitEvents);
+ SK_INIT(vkCmdWriteTimestamp);
+ SK_INIT(vkCreateBuffer);
+ SK_INIT(vkCreateBufferView);
+ SK_INIT(vkCreateCommandPool);
+ SK_INIT(vkCreateComputePipelines);
+ SK_INIT(vkCreateDescriptorPool);
+ SK_INIT(vkCreateDescriptorSetLayout);
+ SK_INIT(vkCreateDevice);
+ SK_INIT(vkCreateEvent);
+ SK_INIT(vkCreateFence);
+ SK_INIT(vkCreateFramebuffer);
+ SK_INIT(vkCreateGraphicsPipelines);
+ SK_INIT(vkCreateImage);
+ SK_INIT(vkCreateImageView);
+ SK_INIT(vkCreateInstance);
+ SK_INIT(vkCreatePipelineCache);
+ SK_INIT(vkCreatePipelineLayout);
+ SK_INIT(vkCreateQueryPool);
+ SK_INIT(vkCreateRenderPass);
+ SK_INIT(vkCreateSampler);
+ SK_INIT(vkCreateSemaphore);
+ SK_INIT(vkCreateShaderModule);
+ SK_INIT(vkDestroyBuffer);
+ SK_INIT(vkDestroyBufferView);
+ SK_INIT(vkDestroyCommandPool);
+ SK_INIT(vkDestroyDescriptorPool);
+ SK_INIT(vkDestroyDescriptorSetLayout);
+ SK_INIT(vkDestroyDevice);
+ SK_INIT(vkDestroyEvent);
+ SK_INIT(vkDestroyFence);
+ SK_INIT(vkDestroyFramebuffer);
+ SK_INIT(vkDestroyImage);
+ SK_INIT(vkDestroyImageView);
+ SK_INIT(vkDestroyPipeline);
+ SK_INIT(vkDestroyPipelineCache);
+ SK_INIT(vkDestroyPipelineLayout);
+ SK_INIT(vkDestroyQueryPool);
+ SK_INIT(vkDestroyRenderPass);
+ SK_INIT(vkDestroySampler);
+ SK_INIT(vkDestroySemaphore);
+ SK_INIT(vkDestroyShaderModule);
+ SK_INIT(vkDeviceWaitIdle);
+ SK_INIT(vkEndCommandBuffer);
+ SK_INIT(vkEnumerateDeviceExtensionProperties);
+ SK_INIT(vkEnumerateDeviceLayerProperties);
+ SK_INIT(vkEnumeratePhysicalDevices);
+ SK_INIT(vkFlushMappedMemoryRanges);
+ SK_INIT(vkFreeCommandBuffers);
+ SK_INIT(vkFreeDescriptorSets);
+ SK_INIT(vkFreeMemory);
+ SK_INIT(vkGetBufferMemoryRequirements);
+ SK_INIT(vkGetDeviceMemoryCommitment);
+ SK_INIT(vkGetDeviceProcAddr);
+ SK_INIT(vkGetDeviceQueue);
+ SK_INIT(vkGetEventStatus);
+ SK_INIT(vkGetFenceStatus);
+ SK_INIT(vkGetImageMemoryRequirements);
+ SK_INIT(vkGetImageSparseMemoryRequirements);
+ SK_INIT(vkGetImageSubresourceLayout);
+ SK_INIT(vkGetInstanceProcAddr);
+ SK_INIT(vkGetPhysicalDeviceFeatures);
+ SK_INIT(vkGetPhysicalDeviceFormatProperties);
+ SK_INIT(vkGetPhysicalDeviceImageFormatProperties);
+ SK_INIT(vkGetPhysicalDeviceMemoryProperties);
+ SK_INIT(vkGetPhysicalDeviceProperties);
+ SK_INIT(vkGetPhysicalDeviceQueueFamilyProperties);
+ SK_INIT(vkGetPhysicalDeviceSparseImageFormatProperties);
+ SK_INIT(vkGetPipelineCacheData);
+ SK_INIT(vkGetQueryPoolResults);
+ SK_INIT(vkGetRenderAreaGranularity);
+ SK_INIT(vkInvalidateMappedMemoryRanges);
+ SK_INIT(vkMapMemory);
+ SK_INIT(vkMergePipelineCaches);
+ SK_INIT(vkQueueBindSparse);
+ SK_INIT(vkQueueSubmit);
+ SK_INIT(vkQueueWaitIdle);
+ SK_INIT(vkResetCommandBuffer);
+ SK_INIT(vkResetCommandPool);
+ SK_INIT(vkResetDescriptorPool);
+ SK_INIT(vkResetEvent);
+ SK_INIT(vkResetFences);
+ SK_INIT(vkSetEvent);
+ SK_INIT(vkUnmapMemory);
+ SK_INIT(vkUpdateDescriptorSets);
+ SK_INIT(vkWaitForFences);
+
+ // VK_EXT_debug_report
+ SK_INIT(vkCreateDebugReportCallbackEXT);
+ SK_INIT(vkDebugReportMessageEXT);
+ SK_INIT(vkDestroyDebugReportCallbackEXT);
+
+ // VK_KHR_surface
+ SK_INIT(vkDestroySurfaceKHR);
+ SK_INIT(vkGetPhysicalDeviceSurfaceCapabilitiesKHR);
+ SK_INIT(vkGetPhysicalDeviceSurfaceFormatsKHR);
+ SK_INIT(vkGetPhysicalDeviceSurfacePresentModesKHR);
+ SK_INIT(vkGetPhysicalDeviceSurfaceSupportKHR);
+
+ // VK_KHR_swapchain
+ SK_INIT(vkAcquireNextImageKHR);
+ SK_INIT(vkCreateSwapchainKHR);
+ SK_INIT(vkDestroySwapchainKHR);
+ SK_INIT(vkGetDeviceGroupPresentCapabilitiesKHR);
+ SK_INIT(vkGetDeviceGroupSurfacePresentModesKHR);
+ SK_INIT(vkGetPhysicalDevicePresentRectanglesKHR);
+ SK_INIT(vkGetSwapchainImagesKHR);
+ SK_INIT(vkQueuePresentKHR);
+
+#undef SK_INIT
+
+ return r;
+ }
+
+ VkResult
+ init(const VulkanInitApi& initApi, const Instance& instance) noexcept
+ {
+ return init(initApi, instance, [](const char*) {});
+ }
+
+ template<class VkCommandBufferVector>
+ VkResult allocateCommandBuffers(
+ const Device& device,
+ const VkCommandBufferAllocateInfo& allocateInfo,
+ CommandBuffers<VkCommandBufferVector>& commandBuffers) const noexcept
+ {
+ VkCommandBufferVector rawCommandBuffers = VkCommandBufferVector(
+ allocateInfo.commandBufferCount);
+
+ if (const VkResult r = vkAllocateCommandBuffers(
+ device, &allocateInfo, rawCommandBuffers.data())) {
+ return r;
+ }
+
+ commandBuffers = CommandBuffers<VkCommandBufferVector>{
+ allocateInfo.commandBufferCount,
+ std::move(rawCommandBuffers),
+ PoolDeleter<VkCommandBuffer, VkCommandPool, void>{
+ device,
+ allocateInfo.commandPool,
+ allocateInfo.commandBufferCount,
+ vkFreeCommandBuffers}};
+ return VK_SUCCESS;
+ }
+
+ template<class VkDescriptorSetVector>
+ VkResult allocateDescriptorSets(
+ const Device& device,
+ const VkDescriptorSetAllocateInfo& allocateInfo,
+ DescriptorSets<VkDescriptorSetVector>& descriptorSets) const noexcept
+ {
+ auto descriptorSetVector = VkDescriptorSetVector(
+ allocateInfo.descriptorSetCount);
+
+ if (const VkResult r = vkAllocateDescriptorSets(
+ device, &allocateInfo, descriptorSetVector.data())) {
+ return r;
+ }
+
+ descriptorSets = DescriptorSets<VkDescriptorSetVector>{
+ allocateInfo.descriptorSetCount,
+ std::move(descriptorSetVector),
+ PoolDeleter<VkDescriptorSet, VkDescriptorPool, VkResult>{
+ device,
+ allocateInfo.descriptorPool,
+ allocateInfo.descriptorSetCount,
+ vkFreeDescriptorSets}};
+ return VK_SUCCESS;
+ }
+
+ VkResult bindBufferMemory(const Device& device,
+ const Buffer& buffer,
+ const DeviceMemory& memory,
+ VkDeviceSize memoryOffset) const noexcept
+ {
+ return vkBindBufferMemory
+ ? vkBindBufferMemory(device, buffer, memory, memoryOffset)
+ : VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ VkResult createBuffer(const Device& device,
+ const VkBufferCreateInfo& createInfo,
+ Buffer& buffer) const noexcept
+ {
+ VkBuffer h = {};
+ const VkResult r = vkCreateBuffer(device, &createInfo, nullptr, &h);
+ return wrapResult(r, h, {device, vkDestroyBuffer}, buffer);
+ }
+
+ VkResult createBufferView(const Device& device,
+ const VkBufferViewCreateInfo& createInfo,
+ BufferView& bufferView) const noexcept
+ {
+ VkBufferView h = {};
+ const VkResult r = vkCreateBufferView(device, &createInfo, nullptr, &h);
+ return wrapResult(r, h, {device, vkDestroyBufferView}, bufferView);
+ }
+
+ VkResult createCommandPool(const Device& device,
+ const VkCommandPoolCreateInfo& createInfo,
+ CommandPool& commandPool) const noexcept
+ {
+ VkCommandPool h = {};
+ const VkResult r =
+ vkCreateCommandPool(device, &createInfo, nullptr, &h);
+ return wrapResult(r, h, {device, vkDestroyCommandPool}, commandPool);
+ }
+
+ VkResult createDescriptorPool(const Device& device,
+ const VkDescriptorPoolCreateInfo& createInfo,
+ DescriptorPool& descriptorPool) const noexcept
+ {
+ VkDescriptorPool h = {};
+ const VkResult r =
+ vkCreateDescriptorPool(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r,
+ h,
+ {device, vkDestroyDescriptorPool},
+ descriptorPool);
+ }
+
+ VkResult createDescriptorSetLayout(
+ const Device& device,
+ const VkDescriptorSetLayoutCreateInfo& createInfo,
+ DescriptorSetLayout& descriptorSetLayout) const noexcept
+ {
+ VkDescriptorSetLayout h = {};
+ const VkResult r =
+ vkCreateDescriptorSetLayout(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r,
+ h,
+ {device, vkDestroyDescriptorSetLayout},
+ descriptorSetLayout);
+ }
+
+ VkResult createDevice(const PhysicalDevice& physicalDevice,
+ const VkDeviceCreateInfo& createInfo,
+ Device& result) const noexcept
+ {
+ VkDevice h = {};
+ const VkResult r =
+ vkCreateDevice(physicalDevice, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {vkDestroyDevice}, result);
+ }
+
+ VkResult createEvent(const Device& device,
+ const VkEventCreateInfo& createInfo,
+ Event& event) const noexcept
+ {
+ VkEvent h = {};
+ const VkResult r = vkCreateEvent(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyEvent}, event);
+ }
+
+ VkResult createFence(const Device& device,
+ const VkFenceCreateInfo& createInfo,
+ Fence& fence) const noexcept
+ {
+ VkFence h = {};
+ const VkResult r = vkCreateFence(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyFence}, fence);
+ }
+
+ VkResult createFramebuffer(const Device& device,
+ const VkFramebufferCreateInfo& createInfo,
+ Framebuffer& framebuffer) const noexcept
+ {
+ VkFramebuffer h = {};
+ const VkResult r =
+ vkCreateFramebuffer(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyFramebuffer}, framebuffer);
+ }
+
+ VkResult createImage(const Device& device,
+ const VkImageCreateInfo& createInfo,
+ Image& image) const noexcept
+ {
+ VkImage h = {};
+ const VkResult r = vkCreateImage(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyImage}, image);
+ }
+
+ VkResult createImageView(const Device& device,
+ const VkImageViewCreateInfo& createInfo,
+ ImageView& imageView) const noexcept
+ {
+ VkImageView h = {};
+ const VkResult r = vkCreateImageView(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyImageView}, imageView);
+ }
+
+ template<size_t count>
+ VkResult createGraphicsPipelines(
+ const Device& device,
+ const OptionalParameter<PipelineCache>& pipelineCache,
+ const std::array<VkGraphicsPipelineCreateInfo, count>& createInfos,
+ std::array<Pipeline, count>& pipelines) const noexcept
+ {
+ std::array<VkPipeline, count> pipelineHandles{};
+
+ if (const VkResult r = vkCreateGraphicsPipelines(
+ device,
+ pipelineCache.get(),
+ static_cast<uint32_t>(createInfos.size()),
+ createInfos.data(),
+ nullptr,
+ pipelineHandles.data())) {
+ return r;
+ }
+
+ pipelines = make_handle_array<Pipeline>(device.get(),
+ vkDestroyPipeline,
+ pipelineHandles);
+ return VK_SUCCESS;
+ }
+
+ VkResult createPipelineCache(const Device& device,
+ const VkPipelineCacheCreateInfo& createInfo,
+ PipelineCache& pipelineCache) const noexcept
+ {
+ VkPipelineCache h = {};
+ const VkResult r =
+ vkCreatePipelineCache(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r,
+ h,
+ {device, vkDestroyPipelineCache},
+ pipelineCache);
+ }
+
+ VkResult createPipelineLayout(const Device& device,
+ const VkPipelineLayoutCreateInfo& createInfo,
+ PipelineLayout& pipelineLayout) const noexcept
+ {
+ VkPipelineLayout h = {};
+ const VkResult r =
+ vkCreatePipelineLayout(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r,
+ h,
+ {device, vkDestroyPipelineLayout},
+ pipelineLayout);
+ }
+
+ VkResult createQueryPool(const Device& device,
+ const VkQueryPoolCreateInfo& createInfo,
+ QueryPool& queryPool) const noexcept
+ {
+ VkQueryPool h = {};
+ const VkResult r = vkCreateQueryPool(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyQueryPool}, queryPool);
+ }
+
+ VkResult createRenderPass(const Device& device,
+ const VkRenderPassCreateInfo& createInfo,
+ RenderPass& renderPass) const noexcept
+ {
+ VkRenderPass h = {};
+ const VkResult r = vkCreateRenderPass(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyRenderPass}, renderPass);
+ }
+
+ VkResult createSampler(const Device& device,
+ const VkSamplerCreateInfo& createInfo,
+ Sampler& sampler) const noexcept
+ {
+ VkSampler h = {};
+ const VkResult r = vkCreateSampler(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroySampler}, sampler);
+ }
+
+ VkResult createSemaphore(const Device& device,
+ const VkSemaphoreCreateInfo& createInfo,
+ Semaphore& semaphore) const noexcept
+ {
+ VkSemaphore h = {};
+ const VkResult r = vkCreateSemaphore(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroySemaphore}, semaphore);
+ }
+
+ VkResult createShaderModule(const Device& device,
+ const VkShaderModuleCreateInfo& createInfo,
+ ShaderModule& shaderModule) const noexcept
+ {
+ VkShaderModule h = {};
+ const VkResult r =
+ vkCreateShaderModule(device, &createInfo, nullptr, &h);
+
+ return wrapResult(r, h, {device, vkDestroyShaderModule}, shaderModule);
+ }
+
+ VkResult deviceWaitIdle(const Device& device) const noexcept
+ {
+ return vkDeviceWaitIdle(device);
+ }
+
+ template<class Vector>
+ VkResult
+ enumerateDeviceExtensionProperties(const PhysicalDevice& physicalDevice,
+ const char* const layerName,
+ Vector& properties) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkExtensionProperties>(
+ properties,
+ vkEnumerateDeviceExtensionProperties,
+ physicalDevice,
+ layerName);
+ }
+
+ template<class Vector>
+ VkResult
+ enumerateDeviceExtensionProperties(const PhysicalDevice& physicalDevice,
+ Vector& properties) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkExtensionProperties>(
+ properties,
+ vkEnumerateDeviceExtensionProperties,
+ physicalDevice,
+ nullptr);
+ }
+
+ template<class Vector>
+ VkResult enumeratePhysicalDevices(const Instance& instance,
+ Vector& physicalDevices) const noexcept
+ {
+ uint32_t count = 0u;
+ VkResult r = vkEnumeratePhysicalDevices(instance, &count, nullptr);
+ if (r > VK_INCOMPLETE) {
+ return r;
+ }
+
+ physicalDevices = Vector(count);
+ if ((r = vkEnumeratePhysicalDevices(instance,
+ &count,
+ physicalDevices.data()))) {
+ return r;
+ }
+
+ return VK_SUCCESS;
+ }
+
+ sk::Queue getDeviceQueue(const Device& device,
+ const uint32_t queueFamilyIndex,
+ const uint32_t queueIndex) const noexcept
+ {
+ VkQueue queue{};
+ vkGetDeviceQueue(device, queueFamilyIndex, queueIndex, &queue);
+ return sk::Queue{queue};
+ }
+
+ VkPhysicalDeviceMemoryProperties getPhysicalDeviceMemoryProperties(
+ VkPhysicalDevice physicalDevice) const noexcept
+ {
+ VkPhysicalDeviceMemoryProperties properties{};
+ vkGetPhysicalDeviceMemoryProperties(physicalDevice, &properties);
+ return properties;
+ }
+
+ VkPhysicalDeviceProperties getPhysicalDeviceProperties(
+ const PhysicalDevice& physicalDevice) const noexcept
+ {
+ VkPhysicalDeviceProperties properties{};
+ vkGetPhysicalDeviceProperties(physicalDevice, &properties);
+ return properties;
+ }
+
+ template<class Vector>
+ VkResult getPhysicalDeviceQueueFamilyProperties(
+ const PhysicalDevice& physicalDevice,
+ Vector& queueFamilyProperties) const noexcept
+ {
+ uint32_t count = 0u;
+ vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice,
+ &count,
+ nullptr);
+
+ queueFamilyProperties = Vector(count);
+ vkGetPhysicalDeviceQueueFamilyProperties(physicalDevice,
+ &count,
+ queueFamilyProperties.data());
+
+ return VK_SUCCESS;
+ }
+
+ VkMemoryRequirements
+ getBufferMemoryRequirements(const Device& device,
+ const Buffer& buffer) const noexcept
+ {
+ VkMemoryRequirements requirements;
+ vkGetBufferMemoryRequirements(device, buffer, &requirements);
+ return requirements;
+ }
+
+ VkResult allocateMemory(const Device& device,
+ const VkMemoryAllocateInfo& info,
+ DeviceMemory& memory) const noexcept
+ {
+ VkDeviceMemory h = {};
+ if (const VkResult r = vkAllocateMemory(device, &info, nullptr, &h)) {
+ return r;
+ } else if (!h) {
+ return VK_ERROR_OUT_OF_DEVICE_MEMORY;
+ }
+
+ memory = DeviceMemory{h, {device, vkFreeMemory}};
+ return VK_SUCCESS;
+ }
+
+ VkResult mapMemory(const Device& device,
+ const DeviceMemory& memory,
+ VkDeviceSize offset,
+ VkDeviceSize size,
+ VkMemoryMapFlags flags,
+ MappedMemory& mappedMemory) const noexcept
+ {
+ void* data = nullptr;
+ if (const VkResult r =
+ vkMapMemory(device, memory, offset, size, flags, &data)) {
+ return r;
+ }
+
+ mappedMemory = MappedMemory{*this, device, memory, data};
+ return VK_SUCCESS;
+ }
+
+ VkResult queueSubmit(const Queue& queue,
+ uint32_t submitCount,
+ const VkSubmitInfo& submits,
+ const Fence& fence) const noexcept
+ {
+ return vkQueueSubmit(queue, submitCount, &submits, fence);
+ }
+
+ VkResult queueSubmit(const Queue& queue,
+ const VkSubmitInfo& submit,
+ const Fence& fence) const noexcept
+ {
+ return vkQueueSubmit(queue, 1u, &submit, fence);
+ }
+
+ template<size_t descriptorWriteCount, size_t descriptorCopyCount>
+ void updateDescriptorSets(
+ const Device& device,
+ std::array<VkWriteDescriptorSet, descriptorWriteCount> descriptorWrites,
+ std::array<VkCopyDescriptorSet, descriptorCopyCount> descriptorCopies)
+ const noexcept
+ {
+ vkUpdateDescriptorSets(device,
+ static_cast<uint32_t>(descriptorWrites.size()),
+ descriptorWrites.data(),
+ static_cast<uint32_t>(descriptorCopies.size()),
+ descriptorCopies.data());
+ }
+
+ VkResult resetFence(const Device& device, const Fence& fence) const noexcept
+ {
+ VkFence h = fence;
+ return vkResetFences(device, 1u, &h);
+ }
+
+ VkResult waitForFence(const Device& device,
+ const Fence& fence,
+ uint64_t timeout) const noexcept
+ {
+ VkFence h = fence;
+ return vkWaitForFences(device, 1u, &h, VK_TRUE, timeout);
+ }
+
+ VkResult
+ waitForFence(const Device& device, const Fence& fence) const noexcept
+ {
+ VkFence h = fence;
+ return vkWaitForFences(device, 1u, &h, VK_TRUE, UINT64_MAX);
+ }
+
+ // Scoped command buffer interface
+ SYBOK_NODISCARD
+ CommandScope
+ beginCommandBuffer(VkCommandBuffer commandBuffer,
+ VkCommandBufferBeginInfo beginInfo) const noexcept;
+
+ // VK_EXT_debug_report
+
+ VkResult createDebugReportCallbackEXT(
+ const Instance& instance,
+ const VkDebugReportCallbackCreateInfoEXT& createInfo,
+ DebugReportCallbackEXT& callback) const noexcept
+ {
+ VkDebugReportCallbackEXT h = {};
+
+ if (const VkResult r = vkCreateDebugReportCallbackEXT(
+ instance, &createInfo, nullptr, &h)) {
+ return r;
+ } else if (!h) {
+ return VK_ERROR_FEATURE_NOT_PRESENT;
+ }
+
+ callback = {h, {instance, vkDestroyDebugReportCallbackEXT}};
+ return VK_SUCCESS;
+ }
+
+ // VK_KHR_surface
+
+ VkResult getPhysicalDeviceSurfaceCapabilitiesKHR(
+ const PhysicalDevice& physicalDevice,
+ const SurfaceKHR& surface,
+ VkSurfaceCapabilitiesKHR& capabilities) const noexcept
+ {
+ return vkGetPhysicalDeviceSurfaceCapabilitiesKHR(physicalDevice,
+ surface,
+ &capabilities);
+ }
+
+ template<typename Vector>
+ VkResult
+ getPhysicalDeviceSurfaceFormatsKHR(const PhysicalDevice& physicalDevice,
+ const SurfaceKHR& surface,
+ Vector& surfaceFormats) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkSurfaceFormatKHR>(
+ surfaceFormats,
+ vkGetPhysicalDeviceSurfaceFormatsKHR,
+ physicalDevice,
+ surface.get());
+ }
+
+ template<typename Vector>
+ VkResult getPhysicalDeviceSurfacePresentModesKHR(
+ const PhysicalDevice& physicalDevice,
+ const SurfaceKHR& surface,
+ Vector& presentModes) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkPresentModeKHR>(
+ presentModes,
+ vkGetPhysicalDeviceSurfacePresentModesKHR,
+ physicalDevice,
+ surface.get());
+ }
+
+ VkResult
+ getPhysicalDeviceSurfaceSupportKHR(const PhysicalDevice& physicalDevice,
+ uint32_t queueFamilyIndex,
+ const SurfaceKHR& surface,
+ bool& supported) const noexcept
+ {
+ VkBool32 s = {};
+
+ if (VkResult r = vkGetPhysicalDeviceSurfaceSupportKHR(
+ physicalDevice, queueFamilyIndex, surface, &s)) {
+ return r;
+ }
+
+ supported = s;
+ return VK_SUCCESS;
+ }
+
+ // VK_KHR_swapchain
+
+ VkResult acquireNextImageKHR(const Device& device,
+ const SwapchainKHR& swapchain,
+ uint64_t timeout,
+ const Semaphore& semaphore,
+ const OptionalParameter<Fence>& fence,
+ uint32_t* pImageIndex) const noexcept
+ {
+ return vkAcquireNextImageKHR(
+ device, swapchain, timeout, semaphore, fence.get(), pImageIndex);
+ }
+
+ template<class Vector>
+ VkResult getSwapchainImagesKHR(const Device& device,
+ const SwapchainKHR& swapchain,
+ Vector& images) const noexcept
+ {
+ return detail::wrapVectorAccessor<VkImage>(images,
+ vkGetSwapchainImagesKHR,
+ device.get(),
+ swapchain.get());
+ }
+
+ VkResult createSwapchainKHR(const Device& device,
+ const VkSwapchainCreateInfoKHR& createInfo,
+ SwapchainKHR& swapchain) const noexcept
+ {
+ VkSwapchainKHR h = {};
+ const VkResult r =
+ vkCreateSwapchainKHR(device, &createInfo, nullptr, &h);
+
+ if (r) {
+ return r;
+ } else if (!h) {
+ return VK_ERROR_INCOMPATIBLE_DRIVER;
+ }
+
+ swapchain = {h, {device, vkDestroySwapchainKHR}};
+ return VK_SUCCESS;
+ }
+
+ VkResult queuePresentKHR(const Queue& queue,
+ const VkPresentInfoKHR& presentInfo) const noexcept
+ {
+ return vkQueuePresentKHR(queue, &presentInfo);
+ }
+
+#define SK_FUNC(name) \
+ PFN_##name name {} // NOLINT
+
+ // Vulkan 1.0 Core
+ SK_FUNC(vkAllocateCommandBuffers);
+ SK_FUNC(vkAllocateDescriptorSets);
+ SK_FUNC(vkAllocateMemory);
+ SK_FUNC(vkBeginCommandBuffer);
+ SK_FUNC(vkBindBufferMemory);
+ SK_FUNC(vkBindImageMemory);
+ SK_FUNC(vkCmdBeginQuery);
+ SK_FUNC(vkCmdBeginRenderPass);
+ SK_FUNC(vkCmdBindDescriptorSets);
+ SK_FUNC(vkCmdBindIndexBuffer);
+ SK_FUNC(vkCmdBindPipeline);
+ SK_FUNC(vkCmdBindVertexBuffers);
+ SK_FUNC(vkCmdBlitImage);
+ SK_FUNC(vkCmdClearAttachments);
+ SK_FUNC(vkCmdClearColorImage);
+ SK_FUNC(vkCmdClearDepthStencilImage);
+ SK_FUNC(vkCmdCopyBuffer);
+ SK_FUNC(vkCmdCopyBufferToImage);
+ SK_FUNC(vkCmdCopyImage);
+ SK_FUNC(vkCmdCopyImageToBuffer);
+ SK_FUNC(vkCmdCopyQueryPoolResults);
+ SK_FUNC(vkCmdDispatch);
+ SK_FUNC(vkCmdDispatchIndirect);
+ SK_FUNC(vkCmdDraw);
+ SK_FUNC(vkCmdDrawIndexed);
+ SK_FUNC(vkCmdDrawIndexedIndirect);
+ SK_FUNC(vkCmdDrawIndirect);
+ SK_FUNC(vkCmdEndQuery);
+ SK_FUNC(vkCmdEndRenderPass);
+ SK_FUNC(vkCmdExecuteCommands);
+ SK_FUNC(vkCmdFillBuffer);
+ SK_FUNC(vkCmdNextSubpass);
+ SK_FUNC(vkCmdPipelineBarrier);
+ SK_FUNC(vkCmdPushConstants);
+ SK_FUNC(vkCmdResetEvent);
+ SK_FUNC(vkCmdResetQueryPool);
+ SK_FUNC(vkCmdResolveImage);
+ SK_FUNC(vkCmdSetBlendConstants);
+ SK_FUNC(vkCmdSetDepthBias);
+ SK_FUNC(vkCmdSetDepthBounds);
+ SK_FUNC(vkCmdSetEvent);
+ SK_FUNC(vkCmdSetLineWidth);
+ SK_FUNC(vkCmdSetScissor);
+ SK_FUNC(vkCmdSetStencilCompareMask);
+ SK_FUNC(vkCmdSetStencilReference);
+ SK_FUNC(vkCmdSetStencilWriteMask);
+ SK_FUNC(vkCmdSetViewport);
+ SK_FUNC(vkCmdUpdateBuffer);
+ SK_FUNC(vkCmdWaitEvents);
+ SK_FUNC(vkCmdWriteTimestamp);
+ SK_FUNC(vkCreateBuffer);
+ SK_FUNC(vkCreateBufferView);
+ SK_FUNC(vkCreateCommandPool);
+ SK_FUNC(vkCreateComputePipelines);
+ SK_FUNC(vkCreateDescriptorPool);
+ SK_FUNC(vkCreateDescriptorSetLayout);
+ SK_FUNC(vkCreateDevice);
+ SK_FUNC(vkCreateEvent);
+ SK_FUNC(vkCreateFence);
+ SK_FUNC(vkCreateFramebuffer);
+ SK_FUNC(vkCreateGraphicsPipelines);
+ SK_FUNC(vkCreateImage);
+ SK_FUNC(vkCreateImageView);
+ SK_FUNC(vkCreateInstance);
+ SK_FUNC(vkCreatePipelineCache);
+ SK_FUNC(vkCreatePipelineLayout);
+ SK_FUNC(vkCreateQueryPool);
+ SK_FUNC(vkCreateRenderPass);
+ SK_FUNC(vkCreateSampler);
+ SK_FUNC(vkCreateSemaphore);
+ SK_FUNC(vkCreateShaderModule);
+ SK_FUNC(vkDestroyBuffer);
+ SK_FUNC(vkDestroyBufferView);
+ SK_FUNC(vkDestroyCommandPool);
+ SK_FUNC(vkDestroyDescriptorPool);
+ SK_FUNC(vkDestroyDescriptorSetLayout);
+ SK_FUNC(vkDestroyDevice);
+ SK_FUNC(vkDestroyEvent);
+ SK_FUNC(vkDestroyFence);
+ SK_FUNC(vkDestroyFramebuffer);
+ SK_FUNC(vkDestroyImage);
+ SK_FUNC(vkDestroyImageView);
+ SK_FUNC(vkDestroyPipeline);
+ SK_FUNC(vkDestroyPipelineCache);
+ SK_FUNC(vkDestroyPipelineLayout);
+ SK_FUNC(vkDestroyQueryPool);
+ SK_FUNC(vkDestroyRenderPass);
+ SK_FUNC(vkDestroySampler);
+ SK_FUNC(vkDestroySemaphore);
+ SK_FUNC(vkDestroyShaderModule);
+ SK_FUNC(vkDeviceWaitIdle);
+ SK_FUNC(vkEndCommandBuffer);
+ SK_FUNC(vkEnumerateDeviceExtensionProperties);
+ SK_FUNC(vkEnumerateDeviceLayerProperties);
+ SK_FUNC(vkEnumeratePhysicalDevices);
+ SK_FUNC(vkFlushMappedMemoryRanges);
+ SK_FUNC(vkFreeCommandBuffers);
+ SK_FUNC(vkFreeDescriptorSets);
+ SK_FUNC(vkFreeMemory);
+ SK_FUNC(vkGetBufferMemoryRequirements);
+ SK_FUNC(vkGetDeviceMemoryCommitment);
+ SK_FUNC(vkGetDeviceProcAddr);
+ SK_FUNC(vkGetDeviceQueue);
+ SK_FUNC(vkGetEventStatus);
+ SK_FUNC(vkGetFenceStatus);
+ SK_FUNC(vkGetImageMemoryRequirements);
+ SK_FUNC(vkGetImageSparseMemoryRequirements);
+ SK_FUNC(vkGetImageSubresourceLayout);
+ SK_FUNC(vkGetInstanceProcAddr);
+ SK_FUNC(vkGetPhysicalDeviceFeatures);
+ SK_FUNC(vkGetPhysicalDeviceFormatProperties);
+ SK_FUNC(vkGetPhysicalDeviceImageFormatProperties);
+ SK_FUNC(vkGetPhysicalDeviceMemoryProperties);
+ SK_FUNC(vkGetPhysicalDeviceProperties);
+ SK_FUNC(vkGetPhysicalDeviceQueueFamilyProperties);
+ SK_FUNC(vkGetPhysicalDeviceSparseImageFormatProperties);
+ SK_FUNC(vkGetPipelineCacheData);
+ SK_FUNC(vkGetQueryPoolResults);
+ SK_FUNC(vkGetRenderAreaGranularity);
+ SK_FUNC(vkInvalidateMappedMemoryRanges);
+ SK_FUNC(vkMapMemory);
+ SK_FUNC(vkMergePipelineCaches);
+ SK_FUNC(vkQueueBindSparse);
+ SK_FUNC(vkQueueSubmit);
+ SK_FUNC(vkQueueWaitIdle);
+ SK_FUNC(vkResetCommandBuffer);
+ SK_FUNC(vkResetCommandPool);
+ SK_FUNC(vkResetDescriptorPool);
+ SK_FUNC(vkResetEvent);
+ SK_FUNC(vkResetFences);
+ SK_FUNC(vkSetEvent);
+ SK_FUNC(vkUnmapMemory);
+ SK_FUNC(vkUpdateDescriptorSets);
+ SK_FUNC(vkWaitForFences);
+
+ // VK_EXT_debug_report
+ SK_FUNC(vkCreateDebugReportCallbackEXT);
+ SK_FUNC(vkDebugReportMessageEXT);
+ SK_FUNC(vkDestroyDebugReportCallbackEXT);
+
+ // VK_KHR_surface
+ SK_FUNC(vkDestroySurfaceKHR);
+ SK_FUNC(vkGetPhysicalDeviceSurfaceCapabilitiesKHR);
+ SK_FUNC(vkGetPhysicalDeviceSurfaceFormatsKHR);
+ SK_FUNC(vkGetPhysicalDeviceSurfacePresentModesKHR);
+ SK_FUNC(vkGetPhysicalDeviceSurfaceSupportKHR);
+
+ // VK_KHR_swapchain
+ SK_FUNC(vkAcquireNextImageKHR);
+ SK_FUNC(vkCreateSwapchainKHR);
+ SK_FUNC(vkDestroySwapchainKHR);
+ SK_FUNC(vkGetDeviceGroupPresentCapabilitiesKHR);
+ SK_FUNC(vkGetDeviceGroupSurfacePresentModesKHR);
+ SK_FUNC(vkGetPhysicalDevicePresentRectanglesKHR);
+ SK_FUNC(vkGetSwapchainImagesKHR);
+ SK_FUNC(vkQueuePresentKHR);
+
+#undef SK_FUNC
+
+private:
+ template<class T>
+ static inline VkResult wrapResult(const VkResult r,
+ const typename T::Handle handle,
+ typename T::Deleter&& deleter,
+ T& result) noexcept
+ {
+ if (r) {
+ return r;
+ } else if (!handle) {
+ return VK_ERROR_INITIALIZATION_FAILED;
+ }
+
+ result = T{handle, std::move(deleter)};
+ return VK_SUCCESS;
+ }
+};
+
+/// Scope for commands that work both inside and outside a render pass
+class CommonCommandScope
+{
+public:
+ CommonCommandScope(const VulkanApi& api,
+ VkCommandBuffer commandBuffer,
+ VkResult result) noexcept
+ : _api{api}
+ , _commandBuffer{commandBuffer}
+ , _result{result}
+ {}
+
+ CommonCommandScope(const CommonCommandScope&) noexcept = delete;
+ CommonCommandScope& operator=(const CommonCommandScope&) noexcept = delete;
+
+ CommonCommandScope(CommonCommandScope&& scope) noexcept
+ : _api{scope._api}
+ , _commandBuffer{scope._commandBuffer}
+ {
+ scope._commandBuffer = {};
+ }
+
+ CommonCommandScope& operator=(CommonCommandScope&&) = delete;
+
+ ~CommonCommandScope() noexcept = default;
+
+ explicit operator bool() const noexcept { return _result == VK_SUCCESS; }
+
+ VkResult error() const noexcept { return _result; }
+
+ void bindPipeline(VkPipelineBindPoint pipelineBindPoint,
+ VkPipeline pipeline) const noexcept
+ {
+ _api.vkCmdBindPipeline(_commandBuffer, pipelineBindPoint, pipeline);
+ }
+
+ void setViewport(uint32_t firstViewport,
+ uint32_t viewportCount,
+ const VkViewport* pViewports) const noexcept
+ {
+ _api.vkCmdSetViewport(_commandBuffer,
+ firstViewport,
+ viewportCount,
+ pViewports);
+ }
+
+ void setScissor(uint32_t firstScissor,
+ uint32_t scissorCount,
+ const VkRect2D* pScissors) const noexcept
+ {
+ _api.vkCmdSetScissor(_commandBuffer,
+ firstScissor,
+ scissorCount,
+ pScissors);
+ }
+
+ void setLineWidth(float lineWidth) const noexcept
+ {
+ _api.vkCmdSetLineWidth(_commandBuffer, lineWidth);
+ }
+
+ void setDepthBias(float depthBiasConstantFactor,
+ float depthBiasClamp,
+ float depthBiasSlopeFactor) const noexcept
+ {
+ _api.vkCmdSetDepthBias(_commandBuffer,
+ depthBiasConstantFactor,
+ depthBiasClamp,
+ depthBiasSlopeFactor);
+ }
+
+ void setBlendConstants(const float blendConstants[4]) const noexcept
+ {
+ _api.vkCmdSetBlendConstants(_commandBuffer, blendConstants);
+ }
+
+ void
+ setDepthBounds(float minDepthBounds, float maxDepthBounds) const noexcept
+ {
+ _api.vkCmdSetDepthBounds(_commandBuffer,
+ minDepthBounds,
+ maxDepthBounds);
+ }
+
+ void setStencilCompareMask(VkStencilFaceFlags faceMask,
+ uint32_t compareMask) const noexcept
+ {
+ _api.vkCmdSetStencilCompareMask(_commandBuffer, faceMask, compareMask);
+ }
+
+ void setStencilWriteMask(VkStencilFaceFlags faceMask,
+ uint32_t writeMask) const noexcept
+ {
+ _api.vkCmdSetStencilWriteMask(_commandBuffer, faceMask, writeMask);
+ }
+
+ void setStencilReference(VkStencilFaceFlags faceMask,
+ uint32_t reference) const noexcept
+ {
+ _api.vkCmdSetStencilReference(_commandBuffer, faceMask, reference);
+ }
+
+ void bindDescriptorSets(VkPipelineBindPoint pipelineBindPoint,
+ VkPipelineLayout layout,
+ uint32_t firstSet,
+ uint32_t descriptorSetCount,
+ const VkDescriptorSet* pDescriptorSets,
+ uint32_t dynamicOffsetCount,
+ const uint32_t* pDynamicOffsets) const noexcept
+ {
+ _api.vkCmdBindDescriptorSets(_commandBuffer,
+ pipelineBindPoint,
+ layout,
+ firstSet,
+ descriptorSetCount,
+ pDescriptorSets,
+ dynamicOffsetCount,
+ pDynamicOffsets);
+ }
+
+ void bindIndexBuffer(VkBuffer buffer,
+ VkDeviceSize offset,
+ VkIndexType indexType) const noexcept
+ {
+ _api.vkCmdBindIndexBuffer(_commandBuffer, buffer, offset, indexType);
+ }
+
+ void bindVertexBuffers(uint32_t firstBinding,
+ uint32_t bindingCount,
+ const VkBuffer* pBuffers,
+ const VkDeviceSize* pOffsets) const noexcept
+ {
+ _api.vkCmdBindVertexBuffers(
+ _commandBuffer, firstBinding, bindingCount, pBuffers, pOffsets);
+ }
+
+ void
+ waitEvents(uint32_t eventCount,
+ const VkEvent* pEvents,
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers) const noexcept
+ {
+ _api.vkCmdWaitEvents(_commandBuffer,
+ eventCount,
+ pEvents,
+ srcStageMask,
+ dstStageMask,
+ memoryBarrierCount,
+ pMemoryBarriers,
+ bufferMemoryBarrierCount,
+ pBufferMemoryBarriers,
+ imageMemoryBarrierCount,
+ pImageMemoryBarriers);
+ }
+
+ void pipelineBarrier(
+ VkPipelineStageFlags srcStageMask,
+ VkPipelineStageFlags dstStageMask,
+ VkDependencyFlags dependencyFlags,
+ uint32_t memoryBarrierCount,
+ const VkMemoryBarrier* pMemoryBarriers,
+ uint32_t bufferMemoryBarrierCount,
+ const VkBufferMemoryBarrier* pBufferMemoryBarriers,
+ uint32_t imageMemoryBarrierCount,
+ const VkImageMemoryBarrier* pImageMemoryBarriers) const noexcept
+ {
+ _api.vkCmdPipelineBarrier(_commandBuffer,
+ srcStageMask,
+ dstStageMask,
+ dependencyFlags,
+ memoryBarrierCount,
+ pMemoryBarriers,
+ bufferMemoryBarrierCount,
+ pBufferMemoryBarriers,
+ imageMemoryBarrierCount,
+ pImageMemoryBarriers);
+ }
+
+ void beginQuery(VkQueryPool queryPool,
+ uint32_t query,
+ VkQueryControlFlags flags) const noexcept
+ {
+ _api.vkCmdBeginQuery(_commandBuffer, queryPool, query, flags);
+ }
+
+ void endQuery(VkQueryPool queryPool, uint32_t query) const noexcept
+ {
+ _api.vkCmdEndQuery(_commandBuffer, queryPool, query);
+ }
+
+ void writeTimestamp(VkPipelineStageFlagBits pipelineStage,
+ VkQueryPool queryPool,
+ uint32_t query) const noexcept
+ {
+ _api.vkCmdWriteTimestamp(_commandBuffer,
+ pipelineStage,
+ queryPool,
+ query);
+ }
+
+ void pushConstants(VkPipelineLayout layout,
+ VkShaderStageFlags stageFlags,
+ uint32_t offset,
+ uint32_t size,
+ const void* pValues) const noexcept
+ {
+ _api.vkCmdPushConstants(
+ _commandBuffer, layout, stageFlags, offset, size, pValues);
+ }
+
+ void executeCommands(uint32_t commandBufferCount,
+ const VkCommandBuffer* pCommandBuffers) const noexcept
+ {
+ _api.vkCmdExecuteCommands(_commandBuffer,
+ commandBufferCount,
+ pCommandBuffers);
+ }
+
+protected:
+ const VulkanApi& _api;
+ VkCommandBuffer _commandBuffer;
+ VkResult _result;
+};
+
+// Top level command scope outside a render pass
+class CommandScope : public CommonCommandScope
+{
+public:
+ CommandScope(const VulkanApi& api,
+ VkCommandBuffer commandBuffer,
+ VkResult result) noexcept
+ : CommonCommandScope{api, commandBuffer, result}
+ {}
+
+ CommandScope(const CommandScope&) = delete;
+ CommandScope& operator=(const CommandScope&) = delete;
+
+ CommandScope(CommandScope&& scope) noexcept
+ : CommonCommandScope{std::forward<CommandScope>(scope)}
+ {}
+
+ CommandScope& operator=(CommandScope&&) = delete;
+
+ ~CommandScope() noexcept
+ {
+ assert(!_commandBuffer); // Buffer must be finished with end()
+ }
+
+ VkResult end() noexcept
+ {
+ if (_commandBuffer) {
+ VkResult r = _api.vkEndCommandBuffer(_commandBuffer);
+ _commandBuffer = {};
+ return r;
+ }
+
+ return VK_NOT_READY;
+ }
+
+ void dispatch(uint32_t groupCountX,
+ uint32_t groupCountY,
+ uint32_t groupCountZ) const noexcept
+ {
+ _api.vkCmdDispatch(_commandBuffer,
+ groupCountX,
+ groupCountY,
+ groupCountZ);
+ }
+
+ void dispatchIndirect(VkBuffer buffer, VkDeviceSize offset) const noexcept
+ {
+ _api.vkCmdDispatchIndirect(_commandBuffer, buffer, offset);
+ }
+
+ void copyBuffer(VkBuffer srcBuffer,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferCopy* pRegions) const noexcept
+ {
+ _api.vkCmdCopyBuffer(
+ _commandBuffer, srcBuffer, dstBuffer, regionCount, pRegions);
+ }
+
+ void copyImage(VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageCopy* pRegions) const noexcept
+ {
+ _api.vkCmdCopyImage(_commandBuffer,
+ srcImage,
+ srcImageLayout,
+ dstImage,
+ dstImageLayout,
+ regionCount,
+ pRegions);
+ }
+
+ void blitImage(VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageBlit* pRegions,
+ VkFilter filter) const noexcept
+ {
+ _api.vkCmdBlitImage(_commandBuffer,
+ srcImage,
+ srcImageLayout,
+ dstImage,
+ dstImageLayout,
+ regionCount,
+ pRegions,
+ filter);
+ }
+
+ void copyBufferToImage(VkBuffer srcBuffer,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions) const noexcept
+ {
+ _api.vkCmdCopyBufferToImage(_commandBuffer,
+ srcBuffer,
+ dstImage,
+ dstImageLayout,
+ regionCount,
+ pRegions);
+ }
+
+ void copyImageToBuffer(VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkBuffer dstBuffer,
+ uint32_t regionCount,
+ const VkBufferImageCopy* pRegions) const noexcept
+ {
+ _api.vkCmdCopyImageToBuffer(_commandBuffer,
+ srcImage,
+ srcImageLayout,
+ dstBuffer,
+ regionCount,
+ pRegions);
+ }
+
+ void updateBuffer(VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize dataSize,
+ const void* pData) const noexcept
+ {
+ _api.vkCmdUpdateBuffer(
+ _commandBuffer, dstBuffer, dstOffset, dataSize, pData);
+ }
+
+ void fillBuffer(VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize size,
+ uint32_t data) const noexcept
+ {
+ _api.vkCmdFillBuffer(_commandBuffer, dstBuffer, dstOffset, size, data);
+ }
+
+ void clearColorImage(VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearColorValue& color,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges) const noexcept
+ {
+ _api.vkCmdClearColorImage(
+ _commandBuffer, image, imageLayout, &color, rangeCount, pRanges);
+ }
+
+ void clearDepthStencilImage(
+ VkImage image,
+ VkImageLayout imageLayout,
+ const VkClearDepthStencilValue& depthStencil,
+ uint32_t rangeCount,
+ const VkImageSubresourceRange* pRanges) const noexcept
+ {
+ _api.vkCmdClearDepthStencilImage(_commandBuffer,
+ image,
+ imageLayout,
+ &depthStencil,
+ rangeCount,
+ pRanges);
+ }
+
+ void resolveImage(VkImage srcImage,
+ VkImageLayout srcImageLayout,
+ VkImage dstImage,
+ VkImageLayout dstImageLayout,
+ uint32_t regionCount,
+ const VkImageResolve* pRegions) const noexcept
+ {
+ _api.vkCmdResolveImage(_commandBuffer,
+ srcImage,
+ srcImageLayout,
+ dstImage,
+ dstImageLayout,
+ regionCount,
+ pRegions);
+ }
+
+ void setEvent(VkEvent event, VkPipelineStageFlags stageMask) const noexcept
+ {
+ _api.vkCmdSetEvent(_commandBuffer, event, stageMask);
+ }
+
+ void
+ resetEvent(VkEvent event, VkPipelineStageFlags stageMask) const noexcept
+ {
+ _api.vkCmdResetEvent(_commandBuffer, event, stageMask);
+ }
+
+ void resetQueryPool(VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount) const noexcept
+ {
+ _api.vkCmdResetQueryPool(_commandBuffer,
+ queryPool,
+ firstQuery,
+ queryCount);
+ }
+
+ void copyQueryPoolResults(VkQueryPool queryPool,
+ uint32_t firstQuery,
+ uint32_t queryCount,
+ VkBuffer dstBuffer,
+ VkDeviceSize dstOffset,
+ VkDeviceSize stride,
+ VkQueryResultFlags flags) const noexcept
+ {
+ _api.vkCmdCopyQueryPoolResults(_commandBuffer,
+ queryPool,
+ firstQuery,
+ queryCount,
+ dstBuffer,
+ dstOffset,
+ stride,
+ flags);
+ }
+
+ SYBOK_NODISCARD
+ RenderCommandScope
+ beginRenderPass(const VkRenderPassBeginInfo& renderPassBegin,
+ VkSubpassContents contents) const noexcept;
+};
+
+class RenderCommandScope : public CommonCommandScope
+{
+public:
+ RenderCommandScope(const VulkanApi& api,
+ VkCommandBuffer commandBuffer) noexcept
+ : CommonCommandScope{api, commandBuffer, VK_SUCCESS}
+ {}
+
+ RenderCommandScope(const RenderCommandScope&) = delete;
+ RenderCommandScope& operator=(const RenderCommandScope&) = delete;
+
+ RenderCommandScope(RenderCommandScope&& scope) noexcept
+ : CommonCommandScope{std::forward<RenderCommandScope>(scope)}
+ {}
+
+ RenderCommandScope& operator=(RenderCommandScope&&) = delete;
+
+ ~RenderCommandScope() noexcept { _api.vkCmdEndRenderPass(_commandBuffer); }
+
+ void draw(uint32_t vertexCount,
+ uint32_t instanceCount,
+ uint32_t firstVertex,
+ uint32_t firstInstance) const noexcept
+ {
+ _api.vkCmdDraw(_commandBuffer,
+ vertexCount,
+ instanceCount,
+ firstVertex,
+ firstInstance);
+ }
+
+ void drawIndexed(uint32_t indexCount,
+ uint32_t instanceCount,
+ uint32_t firstIndex,
+ int32_t vertexOffset,
+ uint32_t firstInstance) const noexcept
+ {
+ _api.vkCmdDrawIndexed(_commandBuffer,
+ indexCount,
+ instanceCount,
+ firstIndex,
+ vertexOffset,
+ firstInstance);
+ }
+
+ void drawIndirect(VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride) const noexcept
+ {
+ _api.vkCmdDrawIndirect(
+ _commandBuffer, buffer, offset, drawCount, stride);
+ }
+
+ void drawIndexedIndirect(VkBuffer buffer,
+ VkDeviceSize offset,
+ uint32_t drawCount,
+ uint32_t stride) const noexcept
+ {
+ _api.vkCmdDrawIndexedIndirect(
+ _commandBuffer, buffer, offset, drawCount, stride);
+ }
+
+ void clearAttachments(uint32_t attachmentCount,
+ const VkClearAttachment& attachments,
+ uint32_t rectCount,
+ const VkClearRect* pRects) const noexcept
+ {
+ _api.vkCmdClearAttachments(
+ _commandBuffer, attachmentCount, &attachments, rectCount, pRects);
+ }
+
+ void nextSubpass(VkSubpassContents contents) const noexcept
+ {
+ _api.vkCmdNextSubpass(_commandBuffer, contents);
+ }
+};
+
+CommandScope
+VulkanApi::beginCommandBuffer(
+ VkCommandBuffer commandBuffer,
+ const VkCommandBufferBeginInfo beginInfo) const noexcept
+{
+ if (const VkResult r = vkBeginCommandBuffer(commandBuffer, &beginInfo)) {
+ return {*this, nullptr, r};
+ }
+
+ return {*this, commandBuffer, VK_SUCCESS};
+}
+
+inline RenderCommandScope
+CommandScope::beginRenderPass(const VkRenderPassBeginInfo& renderPassBegin,
+ VkSubpassContents contents) const noexcept
+{
+ _api.vkCmdBeginRenderPass(_commandBuffer, &renderPassBegin, contents);
+
+ return {_api, _commandBuffer};
+}
+
+inline MappedMemory::~MappedMemory() noexcept
+{
+ if (_api && _memory) {
+ _api->vkUnmapMemory(_device, _memory);
+ }
+}
+
+} // namespace sk
+
+#endif // SYBOK_HPP