diff --git a/.github/workflows/workflow.yml b/.github/workflows/workflow.yml index ad9dd134..6ac20ffb 100644 --- a/.github/workflows/workflow.yml +++ b/.github/workflows/workflow.yml @@ -7,6 +7,71 @@ on: branches: [ main ] jobs: + + check-android-changes: + name: Check Android Changes + runs-on: ubuntu-latest + outputs: + should_build: ${{ steps.check.outputs.should_build }} + + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 # Fetch all history for all branches and tags + + - name: Check for Android changes + id: check + run: | + # For push events, check if workflow file has changed + if [ "${{ github.event_name }}" == "push" ] && [ -n "${{ github.event.before }}" ] && [ -n "${{ github.event.after }}" ]; then + if git diff --name-only ${{ github.event.before }} ${{ github.event.after }} | grep -q ".github/workflows/workflow.yml"; then + echo "Workflow file has changed, building Android" + echo "should_build=true" >> $GITHUB_OUTPUT + exit 0 + fi + fi + + # For pull requests, check the files changed in the PR + if [ "${{ github.event_name }}" == "pull_request" ]; then + echo "Checking files changed in pull request..." + + # Check if PR has android label or title contains android + if [[ "${{ contains(github.event.pull_request.labels.*.name, 'android') }}" == "true" || \ + "${{ contains(github.event.pull_request.title, 'android') }}" == "true" || \ + "${{ contains(github.event.pull_request.title, 'Android') }}" == "true" ]]; then + echo "PR has android label or title contains android" + echo "should_build=true" >> $GITHUB_OUTPUT + exit 0 + fi + + # Get the list of files changed in the PR + git fetch origin ${{ github.event.pull_request.base.ref }} --depth=1 + PR_FILES=$(git diff --name-only origin/${{ github.event.pull_request.base.ref }} ${{ github.sha }}) + else + # For pushes, check the files changed in the last commit + echo "Checking files changed in push..." + + # If this is the first commit, build Android + if [ "${{ github.event.before }}" == "0000000000000000000000000000000000000000" ]; then + echo "First commit, building Android" + echo "should_build=true" >> $GITHUB_OUTPUT + exit 0 + fi + + # Get the list of files changed in the push + PR_FILES=$(git diff --name-only ${{ github.event.before }} ${{ github.event.after }}) + fi + + # Check if any Android-related files have changed + ANDROID_PATTERN="attachments/34_android.cpp|attachments/35_gltf_ktx.cpp|attachments/android/|attachments/27_shader_depth.(frag|vert)" + if echo "$PR_FILES" | grep -E "$ANDROID_PATTERN"; then + echo "Android-related files have changed" + echo "should_build=true" >> $GITHUB_OUTPUT + else + echo "No Android-related files have changed" + echo "should_build=false" >> $GITHUB_OUTPUT + fi + build: strategy: fail-fast: false @@ -14,39 +79,28 @@ jobs: os: [ubuntu-latest, windows-latest] include: - os: ubuntu-latest + ccache: ccache vulkan-install: | - # Download and install Vulkan SDK using the tar.gz method VULKAN_VERSION=$(curl -s https://vulkan.lunarg.com/sdk/latest/linux.txt) echo "Using Vulkan SDK version: $VULKAN_VERSION" - # Create a temporary directory for the SDK mkdir -p vulkan-sdk cd vulkan-sdk - # Download the SDK curl -O "https://sdk.lunarg.com/sdk/download/$VULKAN_VERSION/linux/vulkansdk-linux-x86_64-$VULKAN_VERSION.tar.xz" - # Extract the SDK - use tar with J flag for xz compression tar -xJf vulkansdk-linux-x86_64-$VULKAN_VERSION.tar.xz - # Set up environment variables echo "VULKAN_SDK=$PWD/$VULKAN_VERSION/x86_64" >> $GITHUB_ENV echo "PATH=$PWD/$VULKAN_VERSION/x86_64/bin:$PATH" >> $GITHUB_ENV echo "LD_LIBRARY_PATH=$PWD/$VULKAN_VERSION/x86_64/lib:$LD_LIBRARY_PATH" >> $GITHUB_ENV echo "VK_LAYER_PATH=$PWD/$VULKAN_VERSION/x86_64/etc/vulkan/explicit_layer.d" >> $GITHUB_ENV - # Return to the original directory cd .. deps-install: | - # GitHub runners already have cmake, ninja-build, and clang installed - sudo apt-get update - sudo apt-get install -y \ - libglfw3-dev \ - libglm-dev \ - libtinyobjloader-dev \ - libstb-dev + chmod +x scripts/install_dependencies_linux.sh + ./scripts/install_dependencies_linux.sh test-cmd: | - # Check if some of the expected executables were built if [ -f "00_base_code/00_base_code" ]; then echo "00_base_code built successfully" else @@ -68,44 +122,35 @@ jobs: exit 1 fi - os: windows-latest + ccache: sccache vulkan-install: | - # Download the Vulkan SDK installer - Invoke-WebRequest -Uri "https://sdk.lunarg.com/sdk/download/latest/windows/vulkan-sdk.exe" -OutFile "$env:TEMP\vulkan-sdk.exe" - - # Run the installer with silent options - Start-Process -FilePath "$env:TEMP\vulkan-sdk.exe" -ArgumentList "--accept-licenses --default-answer --confirm-command install" -Wait -NoNewWindow + if (Test-Path "C:\VulkanSDK") { + Write-Host "Using cached Vulkan SDK" + } else { + Write-Host "Downloading Vulkan SDK..." + choco install -y aria2 + aria2c --split=16 --max-connection-per-server=16 --min-split-size=1M --dir="$env:TEMP" --out="vulkan-sdk.exe" "https://sdk.lunarg.com/sdk/download/latest/windows/vulkan-sdk.exe" - # Find the actual installed SDK version - $vulkanPath = Get-ChildItem "C:\VulkanSDK" | Sort-Object -Property Name -Descending | Select-Object -First 1 -ExpandProperty FullName + Write-Host "Installing minimal Vulkan SDK components..." + Start-Process -FilePath "$env:TEMP\vulkan-sdk.exe" -ArgumentList "--accept-licenses --default-answer --confirm-command install --components VulkanRT,VulkanSDK64,VulkanDXC,VulkanTools" -Wait -NoNewWindow + } + $vulkanPath = Get-ChildItem "C:\VulkanSDK" | Sort-Object -Property Name -Descending | Select-Object -First 1 -ExpandProperty FullName if (-not $vulkanPath) { $vulkanPath = "C:\VulkanSDK\latest" } - # Set environment variables with correct Windows-style paths echo "VULKAN_SDK=$vulkanPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "$vulkanPath\Bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append echo "CMAKE_PREFIX_PATH=$vulkanPath" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "Vulkan_INCLUDE_DIR=$vulkanPath\Include" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append echo "Vulkan_LIBRARY=$vulkanPath\Lib\vulkan-1.lib" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append - # Display debug information Write-Host "Vulkan SDK path: $vulkanPath" - if (Test-Path "$vulkanPath\Lib") { - Write-Host "Lib directory exists" - } else { - Write-Host "Lib directory does not exist" - } - if (Test-Path "$vulkanPath\Include") { - Write-Host "Include directory exists" - } else { - Write-Host "Include directory does not exist" - } deps-install: | - vcpkg install glfw3:x64-windows glm:x64-windows tinyobjloader:x64-windows stb:x64-windows + .\scripts\install_dependencies_windows.bat echo "CMAKE_TOOLCHAIN_FILE=$env:VCPKG_INSTALLATION_ROOT/scripts/buildsystems/vcpkg.cmake" >> $env:GITHUB_ENV test-cmd: | - # Check if some of the expected executables were built if (Test-Path "00_base_code/Release/00_base_code.exe") { echo "00_base_code built successfully" } else { @@ -132,7 +177,6 @@ jobs: steps: - uses: actions/checkout@v3 - # Cache vcpkg packages for Windows - name: Cache vcpkg packages (Windows) if: runner.os == 'Windows' uses: actions/cache@v3 @@ -141,22 +185,23 @@ jobs: ${{ env.VCPKG_INSTALLATION_ROOT }}/installed ${{ env.VCPKG_INSTALLATION_ROOT }}/packages ${{ env.VCPKG_INSTALLATION_ROOT }}/buildtrees - key: ${{ runner.os }}-vcpkg-${{ hashFiles('**/CMakeLists.txt') }}-${{ hashFiles('**/*.cpp') }} + ${{ env.VCPKG_INSTALLATION_ROOT }}/downloads + ${{ runner.temp }}/vcpkg-cache + key: ${{ runner.os }}-vcpkg-${{ hashFiles('scripts/install_dependencies_windows.bat', '**/CMakeLists.txt') }} restore-keys: | - ${{ runner.os }}-vcpkg-${{ hashFiles('**/CMakeLists.txt') }}- + ${{ runner.os }}-vcpkg-${{ hashFiles('scripts/install_dependencies_windows.bat') }}- ${{ runner.os }}-vcpkg- - # Cache Vulkan SDK for Windows - name: Cache Vulkan SDK (Windows) if: runner.os == 'Windows' uses: actions/cache@v3 with: path: C:\VulkanSDK - key: ${{ runner.os }}-vulkan-sdk-${{ hashFiles('**/CMakeLists.txt') }} + key: ${{ runner.os }}-vulkan-sdk-${{ hashFiles('**/CMakeLists.txt', '**/*.cpp', '**/*.h') }} restore-keys: | + ${{ runner.os }}-vulkan-sdk-${{ hashFiles('**/CMakeLists.txt') }}- ${{ runner.os }}-vulkan-sdk- - # Cache apt packages for Ubuntu - name: Cache apt packages (Ubuntu) if: runner.os == 'Linux' uses: actions/cache@v3 @@ -166,7 +211,16 @@ jobs: restore-keys: | ${{ runner.os }}-apt- - # Cache Vulkan SDK for Ubuntu + - name: Cache ccache files + uses: actions/cache@v3 + with: + path: | + ~/.ccache + ~/.cache/sccache + key: ${{ runner.os }}-${{ matrix.ccache }}-${{ github.sha }} + restore-keys: | + ${{ runner.os }}-${{ matrix.ccache }}- + - name: Cache Vulkan SDK (Ubuntu) if: runner.os == 'Linux' uses: actions/cache@v3 @@ -178,6 +232,48 @@ jobs: ${{ runner.os }}-vulkan-sdk-${{ hashFiles('**/CMakeLists.txt') }}- ${{ runner.os }}-vulkan-sdk- + - name: Install ccache (Ubuntu) + if: runner.os == 'Linux' + run: | + sudo apt-get update + sudo apt-get install -y ccache + ccache --max-size=2G + ccache -z + echo "CCACHE_DIR=$HOME/.ccache" >> $GITHUB_ENV + echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV + + - name: Cache sccache binary (Windows) + if: runner.os == 'Windows' + id: cache-sccache + uses: actions/cache@v3 + with: + path: ${{ runner.temp }}/sccache + key: ${{ runner.os }}-sccache-0.5.4 + + - name: Install sccache (Windows) + if: runner.os == 'Windows' + run: | + if (Test-Path "$env:RUNNER_TEMP\sccache\sccache.exe") { + Write-Host "Using cached sccache binary" + $sccachePath = "$env:RUNNER_TEMP\sccache" + } else { + Write-Host "Downloading and installing sccache..." + New-Item -ItemType Directory -Force -Path "$env:RUNNER_TEMP\sccache" + aria2c --split=8 --max-connection-per-server=8 --min-split-size=1M --dir="$env:RUNNER_TEMP" --out="sccache.tar.gz" "https://github.com/mozilla/sccache/releases/download/v0.5.4/sccache-v0.5.4-x86_64-pc-windows-msvc.tar.gz" + tar -xzf "$env:RUNNER_TEMP\sccache.tar.gz" --strip-components=1 -C "$env:RUNNER_TEMP\sccache" "sccache-v0.5.4-x86_64-pc-windows-msvc/sccache.exe" + $sccachePath = "$env:RUNNER_TEMP\sccache" + } + + echo "$sccachePath" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append + echo "SCCACHE_DIR=$HOME/.cache/sccache" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "SCCACHE_CACHE_SIZE=4G" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "SCCACHE_ERROR_LOG=$HOME/.cache/sccache/sccache.log" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "SCCACHE_LOG=info" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + echo "RUST_LOG=sccache=info" | Out-File -FilePath $env:GITHUB_ENV -Encoding utf8 -Append + + New-Item -ItemType Directory -Force -Path "$HOME/.cache/sccache" + & "$sccachePath\sccache.exe" --version + - name: Install dependencies run: ${{ matrix.deps-install }} @@ -189,8 +285,6 @@ jobs: run: | if (Test-Path $env:VULKAN_SDK) { echo "Vulkan SDK found at: $env:VULKAN_SDK" - - # Check for critical directories and files $criticalPaths = @( "$env:VULKAN_SDK\Include", "$env:VULKAN_SDK\Lib", @@ -221,18 +315,19 @@ jobs: exit 1 } - # Cache CMake build directory for Windows - name: Cache build artifacts (Windows) if: runner.os == 'Windows' uses: actions/cache@v3 with: - path: ${{github.workspace}}/attachments/build - key: ${{ runner.os }}-build-${{ hashFiles('**/CMakeLists.txt') }}-${{ hashFiles('**/*.cpp') }}-${{ hashFiles('**/*.h') }} + path: | + ${{github.workspace}}/attachments/build + key: ${{ runner.os }}-build-msvc-${{ hashFiles('**/CMakeLists.txt', 'scripts/install_dependencies_windows.bat') }}-${{ hashFiles('**/*.cpp', '**/*.h', '**/*.hpp') }} restore-keys: | - ${{ runner.os }}-build-${{ hashFiles('**/CMakeLists.txt') }}- - ${{ runner.os }}-build- + ${{ runner.os }}-build-msvc-${{ hashFiles('**/CMakeLists.txt', 'scripts/install_dependencies_windows.bat') }}- + ${{ runner.os }}-build-msvc-${{ hashFiles('**/CMakeLists.txt') }}- + ${{ runner.os }}-build-msvc- - - name: Configure CMake (Windows) + - name: Configure CMake with MSVC (Windows) working-directory: ${{github.workspace}}/attachments if: runner.os == 'Windows' run: | @@ -240,27 +335,28 @@ jobs: -DVulkan_INCLUDE_DIR="$env:Vulkan_INCLUDE_DIR" ` -DVulkan_LIBRARY="$env:Vulkan_LIBRARY" ` -DCMAKE_PREFIX_PATH="$env:VULKAN_SDK" ` - -DCMAKE_TOOLCHAIN_FILE="$env:CMAKE_TOOLCHAIN_FILE" + -DCMAKE_TOOLCHAIN_FILE="$env:CMAKE_TOOLCHAIN_FILE" ` + -DCMAKE_C_COMPILER_LAUNCHER=sccache ` + -DCMAKE_CXX_COMPILER_LAUNCHER=sccache ` + -DCMAKE_CXX_FLAGS="/MP /EHsc /Zi /W3 /O2" ` + -DCMAKE_SHARED_LINKER_FLAGS="/DEBUG:FASTLINK" ` + -DCMAKE_EXE_LINKER_FLAGS="/DEBUG:FASTLINK" - # Display CMake cache to debug Vulkan detection if (Test-Path "build/CMakeCache.txt") { Write-Host "CMake cache contents:" Get-Content "build/CMakeCache.txt" | Select-String -Pattern "Vulkan" } - # Verify Vulkan Installation for Ubuntu - name: Verify Vulkan Installation (Ubuntu) if: runner.os == 'Linux' run: | if [ -d "$VULKAN_SDK" ]; then echo "Vulkan SDK found at: $VULKAN_SDK" - echo "Vulkan SDK installation verified" else echo "Vulkan SDK not found!" exit 1 fi - # Cache CMake build directory for Ubuntu - name: Cache build artifacts (Ubuntu) if: runner.os == 'Linux' uses: actions/cache@v3 @@ -275,18 +371,195 @@ jobs: working-directory: ${{github.workspace}}/attachments if: runner.os != 'Windows' run: | - # Use Clang for better C++20 module support - export CC=clang - export CXX=clang++ + export CC="ccache clang" + export CXX="ccache clang++" cmake -B build -G Ninja -DCMAKE_BUILD_TYPE=Release \ -DCMAKE_CXX_SCAN_FOR_MODULES=ON \ - -DCMAKE_CXX_FLAGS="-std=c++20" + -DCMAKE_CXX_FLAGS="-std=c++20" \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache - name: Build working-directory: ${{github.workspace}}/attachments - run: cmake --build build --config Release + run: cmake --build build --config Release --parallel 4 + + - name: ccache statistics + if: runner.os == 'Linux' + run: ccache -s + + - name: sccache statistics + if: runner.os == 'Windows' + run: sccache -s - name: Test Build Output working-directory: ${{github.workspace}}/attachments/build run: ${{ matrix.test-cmd }} + + + android-build: + name: Android Build + runs-on: ubuntu-latest + + # We need to run a preliminary job to check for changes + needs: check-android-changes + if: needs.check-android-changes.outputs.should_build == 'true' + + steps: + - uses: actions/checkout@v3 + + - name: Cache ccache files + uses: actions/cache@v3 + with: + path: ~/.ccache + key: android-ccache-${{ hashFiles('attachments/android/app/src/main/cpp/CMakeLists.txt', 'attachments/34_android.cpp', 'attachments/35_gltf_ktx.cpp') }} + restore-keys: | + android-ccache-${{ hashFiles('attachments/android/app/src/main/cpp/CMakeLists.txt') }} + android-ccache- + + - name: Cache KTX and tinygltf + uses: actions/cache@v3 + with: + path: | + /usr/local/include/ktx + /usr/local/lib/libktx* + /usr/local/include/tinygltf + key: android-libs-${{ hashFiles('attachments/android/app/src/main/cpp/CMakeLists.txt') }} + restore-keys: | + android-libs- + + - name: Cache Gradle packages + uses: actions/cache@v3 + with: + path: | + ~/.gradle/caches + ~/.gradle/wrapper + ${{github.workspace}}/attachments/android/.gradle + ${{github.workspace}}/attachments/android/app/.cxx + ${{github.workspace}}/attachments/android/app/build/intermediates + ${{github.workspace}}/attachments/android/app/build/outputs + ${{github.workspace}}/attachments/android/app/build/generated + key: ${{ runner.os }}-gradle-${{ hashFiles('attachments/android/app/src/main/cpp/CMakeLists.txt', 'attachments/34_android.cpp', 'attachments/35_gltf_ktx.cpp', 'attachments/android/app/build.gradle') }} + restore-keys: | + ${{ runner.os }}-gradle-${{ hashFiles('attachments/android/app/src/main/cpp/CMakeLists.txt') }} + ${{ runner.os }}-gradle- + + - name: Install and configure ccache + run: | + sudo apt-get update + sudo apt-get install -y ccache + ccache --max-size=4G + ccache --set-config=compression=true + ccache --set-config=compression_level=9 + ccache --set-config=sloppiness=file_macro,time_macros,include_file_mtime,include_file_ctime + ccache --set-config=hash_dir=false + ccache -z + echo "CCACHE_DIR=$HOME/.ccache" >> $GITHUB_ENV + echo "PATH=/usr/lib/ccache:$PATH" >> $GITHUB_ENV + echo "CCACHE_COMPRESS=1" >> $GITHUB_ENV + echo "CCACHE_COMPRESSLEVEL=9" >> $GITHUB_ENV + echo "CCACHE_MAXSIZE=4G" >> $GITHUB_ENV + + - name: Set up Android SDK and NDK + run: | + echo "ANDROID_SDK_ROOT=$ANDROID_SDK_ROOT" >> $GITHUB_ENV + echo "ANDROID_NDK_HOME=$ANDROID_NDK_ROOT" >> $GITHUB_ENV + echo "Android SDK location: $ANDROID_SDK_ROOT" + echo "Android NDK location: $ANDROID_NDK_ROOT" + + yes | $ANDROID_SDK_ROOT/cmdline-tools/latest/bin/sdkmanager --install "cmake;4.0.2" + + - name: Install Vulkan SDK and glslangValidator + run: | + if command -v glslangValidator &> /dev/null; then + echo "glslangValidator already installed:" + glslangValidator --version + else + sudo apt-get update + sudo apt-get install -y glslang-tools + which glslangValidator + glslangValidator --version + fi + + echo "VULKAN_SDK=/usr" >> $GITHUB_ENV + echo "PATH=/usr/bin:$PATH" >> $GITHUB_ENV + + - name: Install KTX library + run: | + if [ -d "/usr/local/include/ktx" ] && [ -f "/usr/local/lib/libktx.so" ]; then + echo "KTX library already installed from cache" + else + echo "Installing KTX library..." + git clone --depth 1 --branch v4.1.0 https://github.com/KhronosGroup/KTX-Software.git + cd KTX-Software + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release \ + -DKTX_FEATURE_TOOLS=OFF \ + -DKTX_FEATURE_DOC=OFF \ + -DKTX_FEATURE_LOADTEST_APPS=OFF \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache + cmake --build . --config Release --parallel 4 + sudo cmake --install . + cd ../.. + fi + + - name: Install tinygltf + run: | + if [ -d "/usr/local/include/tinygltf" ]; then + echo "tinygltf library already installed from cache" + else + echo "Installing tinygltf..." + git clone --depth 1 https://github.com/syoyo/tinygltf.git + cd tinygltf + mkdir build && cd build + cmake .. -DCMAKE_BUILD_TYPE=Release \ + -DTINYGLTF_BUILD_LOADER_EXAMPLE=OFF \ + -DTINYGLTF_BUILD_GL_EXAMPLES=OFF \ + -DTINYGLTF_BUILD_VALIDATOR_EXAMPLE=OFF \ + -DCMAKE_C_COMPILER_LAUNCHER=ccache \ + -DCMAKE_CXX_COMPILER_LAUNCHER=ccache + cmake --build . --config Release --parallel 4 + sudo cmake --install . + cd ../.. + fi + + - name: Build Android Chapters + working-directory: ${{github.workspace}}/attachments/android + run: | + if [ ! -f "gradlew" ]; then + echo "Generating Gradle wrapper..." + gradle wrapper + fi + + SUPPORTED_CHAPTERS=$(grep -A 20 "set(SUPPORTED_CHAPTERS" app/src/main/cpp/CMakeLists.txt | sed -n '/set(SUPPORTED_CHAPTERS/,/)/p' | grep -o '"[^"]*"' | sed 's/"//g') + + readarray -t CHAPTERS <<< "$SUPPORTED_CHAPTERS" + echo "Detected supported Android chapters: ${CHAPTERS[@]}" + + echo "org.gradle.jvmargs=-Xmx4g -XX:MaxMetaspaceSize=512m -XX:+HeapDumpOnOutOfMemoryError" > gradle.properties + echo "org.gradle.parallel=true" >> gradle.properties + echo "org.gradle.caching=true" >> gradle.properties + echo "org.gradle.configureondemand=true" >> gradle.properties + echo "android.useAndroidX=true" >> gradle.properties + echo "android.enableJetifier=false" >> gradle.properties + echo "kotlin.incremental=true" >> gradle.properties + mkdir -p build-outputs + + for ((i=0; i<${#CHAPTERS[@]}; i++)); do + chapter="${CHAPTERS[$i]}" + if [ -n "$chapter" ]; then + echo "Building $chapter chapter..." + ./gradlew assembleDebug --parallel --max-workers=4 --build-cache -Pchapter=$chapter -PabiFilters=x86_64 + + if [ -f "app/build/outputs/apk/debug/app-debug.apk" ]; then + echo "$chapter built successfully" + cp app/build/outputs/apk/debug/app-debug.apk build-outputs/${chapter}.apk + else + echo "$chapter build failed" + exit 1 + fi + fi + done + + ccache -s diff --git a/antora/modules/ROOT/nav.adoc b/antora/modules/ROOT/nav.adoc index 784fe7c9..08155af5 100644 --- a/antora/modules/ROOT/nav.adoc +++ b/antora/modules/ROOT/nav.adoc @@ -46,4 +46,10 @@ * xref:09_Generating_Mipmaps.adoc[Generating Mipmaps] * xref:10_Multisampling.adoc[Multisampling] * xref:11_Compute_Shader.adoc[Compute Shader] -* xref:90_FAQ.adoc[FAQ] \ No newline at end of file +* xref:12_Ecosystem_Utilities_and_Compatibility.adoc[Ecosystem Utilities and GPU Compatibility] +* xref:13_Vulkan_Profiles.adoc[Vulkan Profiles] +* xref:14_Android.adoc[Android] +* xref:15_GLTF_KTX2_Migration.adoc[Migrating to Modern Asset Formats: glTF and KTX2] +* xref:16_Multiple_Objects.adoc[Rendering Multiple Objects] +* xref:17_Multithreading.adoc[Multithreading] +* xref:90_FAQ.adoc[FAQ] diff --git a/attachments/32_ecosystem_utilities.cpp b/attachments/32_ecosystem_utilities.cpp new file mode 100644 index 00000000..afb10aa3 --- /dev/null +++ b/attachments/32_ecosystem_utilities.cpp @@ -0,0 +1,1718 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include + +#define GLFW_INCLUDE_VULKAN // REQUIRED only for GLFW CreateWindowSurface. +#include + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_ENABLE_EXPERIMENTAL +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include + +#define TINYOBJLOADER_IMPLEMENTATION +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +const std::string MODEL_PATH = "models/viking_room.obj"; +const std::string TEXTURE_PATH = "textures/viking_room.png"; +constexpr int MAX_FRAMES_IN_FLIGHT = 2; + +// Validation layers are now managed by vulkanconfig instead of being hard-coded +// See the Ecosystem Utilities chapter for details on using vulkanconfig + +// Application info structure to store feature support flags +struct AppInfo { + bool dynamicRenderingSupported = false; + bool timelineSemaphoresSupported = false; + bool synchronization2Supported = false; +}; + +struct Vertex { + glm::vec3 pos; + glm::vec3 color; + glm::vec2 texCoord; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Vertex), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, pos) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, color) ), + vk::VertexInputAttributeDescription( 2, 0, vk::Format::eR32G32Sfloat, offsetof(Vertex, texCoord) ) + }; + } + + bool operator==(const Vertex& other) const { + return pos == other.pos && color == other.color && texCoord == other.texCoord; + } +}; + +template<> struct std::hash { + size_t operator()(Vertex const& vertex) const noexcept { + return ((hash()(vertex.pos) ^ (hash()(vertex.color) << 1)) >> 1) ^ (hash()(vertex.texCoord) << 1); + } + }; + +struct UniformBufferObject { + alignas(16) glm::mat4 model; + alignas(16) glm::mat4 view; + alignas(16) glm::mat4 proj; +}; + +class HelloTriangleApplication { +public: + void run() { + initWindow(); + initVulkan(); + mainLoop(); + cleanup(); + } + +private: + GLFWwindow* window = nullptr; + AppInfo appInfo; + + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::DebugUtilsMessengerEXT debugMessenger = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::SampleCountFlagBits msaaSamples = vk::SampleCountFlagBits::e1; + vk::raii::Device device = nullptr; + + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::Format swapChainImageFormat = vk::Format::eUndefined; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + + // Traditional render pass (fallback for non-dynamic rendering) + vk::raii::RenderPass renderPass = nullptr; + std::vector swapChainFramebuffers; + + vk::raii::DescriptorSetLayout descriptorSetLayout = nullptr; + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + + vk::raii::Image colorImage = nullptr; + vk::raii::DeviceMemory colorImageMemory = nullptr; + vk::raii::ImageView colorImageView = nullptr; + + vk::raii::Image depthImage = nullptr; + vk::raii::DeviceMemory depthImageMemory = nullptr; + vk::raii::ImageView depthImageView = nullptr; + + uint32_t mipLevels = 0; + vk::raii::Image textureImage = nullptr; + vk::raii::DeviceMemory textureImageMemory = nullptr; + vk::raii::ImageView textureImageView = nullptr; + vk::raii::Sampler textureSampler = nullptr; + + std::vector vertices; + std::vector indices; + vk::raii::Buffer vertexBuffer = nullptr; + vk::raii::DeviceMemory vertexBufferMemory = nullptr; + vk::raii::Buffer indexBuffer = nullptr; + vk::raii::DeviceMemory indexBufferMemory = nullptr; + + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + + vk::raii::DescriptorPool descriptorPool = nullptr; + std::vector descriptorSets; + + vk::raii::CommandPool commandPool = nullptr; + std::vector commandBuffers; + uint32_t graphicsIndex = 0; + + // Synchronization objects + std::vector presentCompleteSemaphore; + std::vector renderFinishedSemaphore; + std::vector inFlightFences; + vk::raii::Semaphore timelineSemaphore = nullptr; + uint64_t timelineValue = 0; + uint32_t currentFrame = 0; + + bool framebufferResized = false; + + std::vector requiredDeviceExtension = { + vk::KHRSwapchainExtensionName + }; + + void initWindow() { + glfwInit(); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Compatibility Example", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + glfwSetFramebufferSizeCallback(window, framebufferResizeCallback); + } + + static void framebufferResizeCallback(GLFWwindow* window, int width, int height) { + auto app = static_cast(glfwGetWindowUserPointer(window)); + app->framebufferResized = true; + } + + void initVulkan() { + createInstance(); + setupDebugMessenger(); + createSurface(); + pickPhysicalDevice(); + detectFeatureSupport(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + + // Create traditional render pass if dynamic rendering is not supported + if (!appInfo.dynamicRenderingSupported) { + createRenderPass(); + createFramebuffers(); + } + + createDescriptorSetLayout(); + createGraphicsPipeline(); + createCommandPool(); + createColorResources(); + createDepthResources(); + createTextureImage(); + createTextureImageView(); + createTextureSampler(); + loadModel(); + createVertexBuffer(); + createIndexBuffer(); + createUniformBuffers(); + createDescriptorPool(); + createDescriptorSets(); + createCommandBuffers(); + createSyncObjects(); + + // Print feature support summary + std::cout << "\nFeature support summary:\n"; + std::cout << "- Dynamic Rendering: " << (appInfo.dynamicRenderingSupported ? "Yes" : "No") << "\n"; + std::cout << "- Timeline Semaphores: " << (appInfo.timelineSemaphoresSupported ? "Yes" : "No") << "\n"; + std::cout << "- Synchronization2: " << (appInfo.synchronization2Supported ? "Yes" : "No") << "\n"; + } + + void mainLoop() { + while (!glfwWindowShouldClose(window)) { + glfwPollEvents(); + drawFrame(); + } + + device.waitIdle(); + } + + void cleanupSwapChain() { + swapChainFramebuffers.clear(); + swapChainImageViews.clear(); + } + + void cleanup() const { + glfwDestroyWindow(window); + glfwTerminate(); + } + + void recreateSwapChain() { + int width = 0, height = 0; + glfwGetFramebufferSize(window, &width, &height); + while (width == 0 || height == 0) { + glfwGetFramebufferSize(window, &width, &height); + glfwWaitEvents(); + } + + device.waitIdle(); + + cleanupSwapChain(); + createSwapChain(); + createImageViews(); + + // Recreate traditional render pass and framebuffers if dynamic rendering is not supported + if (!appInfo.dynamicRenderingSupported) { + createRenderPass(); + createFramebuffers(); + } + + createColorResources(); + createDepthResources(); + } + + void createInstance() { + // Validation layers are now managed by vulkanconfig instead of being hard-coded + + constexpr vk::ApplicationInfo appInfo{ + .pApplicationName = "Hello Triangle", + .applicationVersion = VK_MAKE_VERSION( 1, 0, 0 ), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION( 1, 0, 0 ), + .apiVersion = vk::ApiVersion14 + }; + + auto extensions = getRequiredExtensions(); + + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() + }; + + instance = vk::raii::Instance(context, createInfo); + } + + void setupDebugMessenger() { + // Always set up the debug messenger + // It will only be used if validation layers are enabled via vulkanconfig + + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( + vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError + ); + + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( + vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | + vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation + ); + + vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfoEXT{ + .messageSeverity = severityFlags, + .messageType = messageTypeFlags, + .pfnUserCallback = &debugCallback + }; + + try { + debugMessenger = instance.createDebugUtilsMessengerEXT(debugUtilsMessengerCreateInfoEXT); + } catch (vk::SystemError& err) { + // If the debug utils extension is not available, this will fail + // That's okay; it just means validation layers aren't enabled + std::cout << "Debug messenger not available. Validation layers may not be enabled." << std::endl; + } + } + + void createSurface() { + VkSurfaceKHR _surface; + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != 0) { + throw std::runtime_error("failed to create window surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); + } + + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&]( auto const & device ) + { + // Check if any of the queue families support graphics operations + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of( queueFamilies, []( auto const & qfp ) { return !!( qfp.queueFlags & vk::QueueFlagBits::eGraphics ); } ); + + // Check if all required device extensions are available + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of( requiredDeviceExtension, + [&availableDeviceExtensions]( auto const & requiredDeviceExtension ) + { + return std::ranges::any_of( availableDeviceExtensions, + [requiredDeviceExtension]( auto const & availableDeviceExtension ) + { return strcmp( availableDeviceExtension.extensionName, requiredDeviceExtension ) == 0; } ); + } ); + + return supportsGraphics && supportsAllRequiredExtensions; + }); + if ( devIter != devices.end() ) + { + physicalDevice = *devIter; + msaaSamples = getMaxUsableSampleCount(); + } + else + { + throw std::runtime_error("failed to find a suitable GPU!"); + } + } + + void detectFeatureSupport() { + // Get device properties to check Vulkan version + vk::PhysicalDeviceProperties deviceProperties = physicalDevice.getProperties(); + + // Get available extensions + std::vector availableExtensions = physicalDevice.enumerateDeviceExtensionProperties(); + + // Check for dynamic rendering support + if (deviceProperties.apiVersion >= VK_VERSION_1_3) { + appInfo.dynamicRenderingSupported = true; + std::cout << "Dynamic rendering supported via Vulkan 1.3\n"; + } else { + // Check for the extension on older Vulkan versions + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME) == 0) { + appInfo.dynamicRenderingSupported = true; + std::cout << "Dynamic rendering supported via extension\n"; + break; + } + } + } + + // Check for timeline semaphores support + if (deviceProperties.apiVersion >= VK_VERSION_1_2) { + appInfo.timelineSemaphoresSupported = true; + std::cout << "Timeline semaphores supported via Vulkan 1.2\n"; + } else { + // Check for the extension on older Vulkan versions + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME) == 0) { + appInfo.timelineSemaphoresSupported = true; + std::cout << "Timeline semaphores supported via extension\n"; + break; + } + } + } + + // Check for synchronization2 support + if (deviceProperties.apiVersion >= VK_VERSION_1_3) { + appInfo.synchronization2Supported = true; + std::cout << "Synchronization2 supported via Vulkan 1.3\n"; + } else { + // Check for the extension on older Vulkan versions + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME) == 0) { + appInfo.synchronization2Supported = true; + std::cout << "Synchronization2 supported via extension\n"; + break; + } + } + } + + // Add required extensions based on feature support + if (appInfo.dynamicRenderingSupported && deviceProperties.apiVersion < VK_VERSION_1_3) { + requiredDeviceExtension.push_back(VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME); + } + + if (appInfo.timelineSemaphoresSupported && deviceProperties.apiVersion < VK_VERSION_1_2) { + requiredDeviceExtension.push_back(VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME); + } + + if (appInfo.synchronization2Supported && deviceProperties.apiVersion < VK_VERSION_1_3) { + requiredDeviceExtension.push_back(VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME); + } + } + + void createLogicalDevice() { + // find the index of the first queue family that supports graphics + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + + // get the first index into queueFamilyProperties which supports graphics + auto graphicsQueueFamilyProperty = std::ranges::find_if( queueFamilyProperties, []( auto const & qfp ) + { return (qfp.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast(0); } ); + + graphicsIndex = static_cast( std::distance( queueFamilyProperties.begin(), graphicsQueueFamilyProperty ) ); + + // determine a queueFamilyIndex that supports present + // first check if the graphicsIndex is good enough + auto presentIndex = physicalDevice.getSurfaceSupportKHR( graphicsIndex, *surface ) + ? graphicsIndex + : ~0; + if ( presentIndex == queueFamilyProperties.size() ) + { + // the graphicsIndex doesn't support present -> look for another family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + graphicsIndex = static_cast( i ); + presentIndex = graphicsIndex; + break; + } + } + if ( presentIndex == queueFamilyProperties.size() ) + { + // there's nothing like a single family index that supports both graphics and present -> look for another + // family index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + presentIndex = static_cast( i ); + break; + } + } + } + } + if ( ( graphicsIndex == queueFamilyProperties.size() ) || ( presentIndex == queueFamilyProperties.size() ) ) + { + throw std::runtime_error( "Could not find a queue for graphics or present -> terminating" ); + } + + // Create device with appropriate features + auto features = physicalDevice.getFeatures2(); + + // Setup feature chain based on detected support + void* pNext = nullptr; + + // Add dynamic rendering if supported + vk::PhysicalDeviceVulkan13Features vulkan13Features; + vk::PhysicalDeviceDynamicRenderingFeatures dynamicRenderingFeatures; + + if (appInfo.dynamicRenderingSupported) { + if (appInfo.synchronization2Supported) { + vulkan13Features.dynamicRendering = vk::True; + vulkan13Features.synchronization2 = vk::True; + vulkan13Features.pNext = pNext; + pNext = &vulkan13Features; + } else { + dynamicRenderingFeatures.dynamicRendering = vk::True; + dynamicRenderingFeatures.pNext = pNext; + pNext = &dynamicRenderingFeatures; + } + } + + // Add timeline semaphores if supported + vk::PhysicalDeviceTimelineSemaphoreFeatures timelineSemaphoreFeatures; + if (appInfo.timelineSemaphoresSupported) { + timelineSemaphoreFeatures.timelineSemaphore = vk::True; + timelineSemaphoreFeatures.pNext = pNext; + pNext = &timelineSemaphoreFeatures; + } + + features.pNext = pNext; + + // create a Device + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo{ .queueFamilyIndex = graphicsIndex, .queueCount = 1, .pQueuePriorities = &queuePriority }; + vk::DeviceCreateInfo deviceCreateInfo{ + .pNext = &features, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &deviceQueueCreateInfo, + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data() + }; + + device = vk::raii::Device( physicalDevice, deviceCreateInfo ); + graphicsQueue = vk::raii::Queue( device, graphicsIndex, 0 ); + presentQueue = vk::raii::Queue( device, presentIndex, 0 ); + } + + void createSwapChain() { + auto surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR(surface); + swapChainImageFormat = chooseSwapSurfaceFormat(physicalDevice.getSurfaceFormatsKHR( surface )); + swapChainExtent = chooseSwapExtent(surfaceCapabilities); + auto minImageCount = std::max( 3u, surfaceCapabilities.minImageCount ); + minImageCount = ( surfaceCapabilities.maxImageCount > 0 && minImageCount > surfaceCapabilities.maxImageCount ) ? surfaceCapabilities.maxImageCount : minImageCount; + vk::SwapchainCreateInfoKHR swapChainCreateInfo{ + .surface = surface, .minImageCount = minImageCount, + .imageFormat = swapChainImageFormat, .imageColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear, + .imageExtent = swapChainExtent, .imageArrayLayers =1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment, .imageSharingMode = vk::SharingMode::eExclusive, + .preTransform = surfaceCapabilities.currentTransform, .compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque, + .presentMode = chooseSwapPresentMode(physicalDevice.getSurfacePresentModesKHR(surface)), + .clipped = true }; + + swapChain = vk::raii::SwapchainKHR(device, swapChainCreateInfo); + swapChainImages = swapChain.getImages(); + } + + void createImageViews() { + vk::ImageViewCreateInfo imageViewCreateInfo{ + .viewType = vk::ImageViewType::e2D, + .format = swapChainImageFormat, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + for ( auto image : swapChainImages ) + { + imageViewCreateInfo.image = image; + swapChainImageViews.emplace_back( device, imageViewCreateInfo ); + } + } + + void createRenderPass() { + if (appInfo.dynamicRenderingSupported) { + // No render pass needed with dynamic rendering + std::cout << "Using dynamic rendering, skipping render pass creation\n"; + return; + } + + std::cout << "Creating traditional render pass\n"; + + // Color attachment description + vk::AttachmentDescription colorAttachment{ + .format = swapChainImageFormat, + .samples = msaaSamples, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::AttachmentDescription depthAttachment{ + .format = findDepthFormat(), + .samples = msaaSamples, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eDontCare, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal + }; + + vk::AttachmentDescription colorAttachmentResolve{ + .format = swapChainImageFormat, + .samples = vk::SampleCountFlagBits::e1, + .loadOp = vk::AttachmentLoadOp::eDontCare, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::ePresentSrcKHR + }; + + // Subpass references + vk::AttachmentReference colorAttachmentRef{ + .attachment = 0, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::AttachmentReference depthAttachmentRef{ + .attachment = 1, + .layout = vk::ImageLayout::eDepthStencilAttachmentOptimal + }; + + vk::AttachmentReference colorAttachmentResolveRef{ + .attachment = 2, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + // Subpass description + vk::SubpassDescription subpass{ + .pipelineBindPoint = vk::PipelineBindPoint::eGraphics, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachmentRef, + .pResolveAttachments = &colorAttachmentResolveRef, + .pDepthStencilAttachment = &depthAttachmentRef + }; + + // Dependency to ensure proper image layout transitions + vk::SubpassDependency dependency{ + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests, + .dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests, + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite | vk::AccessFlagBits::eDepthStencilAttachmentWrite + }; + + // Create the render pass + std::array attachments = { colorAttachment, depthAttachment, colorAttachmentResolve }; + vk::RenderPassCreateInfo renderPassInfo{ + .attachmentCount = static_cast(attachments.size()), + .pAttachments = attachments.data(), + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = 1, + .pDependencies = &dependency + }; + + renderPass = vk::raii::RenderPass(device, renderPassInfo); + } + + void createFramebuffers() { + if (appInfo.dynamicRenderingSupported) { + // No framebuffers needed with dynamic rendering + std::cout << "Using dynamic rendering, skipping framebuffer creation\n"; + return; + } + + std::cout << "Creating traditional framebuffers\n"; + + swapChainFramebuffers.clear(); + + for (size_t i = 0; i < swapChainImageViews.size(); i++) { + std::array attachments = { + *colorImageView, + *depthImageView, + *swapChainImageViews[i] + }; + + vk::FramebufferCreateInfo framebufferInfo{ + .renderPass = *renderPass, + .attachmentCount = static_cast(attachments.size()), + .pAttachments = attachments.data(), + .width = swapChainExtent.width, + .height = swapChainExtent.height, + .layers = 1 + }; + + swapChainFramebuffers.emplace_back(device, framebufferInfo); + } + } + + void createDescriptorSetLayout() { + std::array bindings = { + vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex, nullptr), + vk::DescriptorSetLayoutBinding( 1, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment, nullptr) + }; + + vk::DescriptorSetLayoutCreateInfo layoutInfo{ .bindingCount = static_cast(bindings.size()), .pBindings = bindings.data() }; + descriptorSetLayout = vk::raii::DescriptorSetLayout(device, layoutInfo); + } + + void createGraphicsPipeline() { + vk::raii::ShaderModule shaderModule = createShaderModule(readFile("shaders/slang.spv")); + + vk::PipelineShaderStageCreateInfo vertShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eVertex, .module = shaderModule, .pName = "vertMain" }; + vk::PipelineShaderStageCreateInfo fragShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eFragment, .module = shaderModule, .pName = "fragMain" }; + vk::PipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo, fragShaderStageInfo}; + + auto bindingDescription = Vertex::getBindingDescription(); + auto attributeDescriptions = Vertex::getAttributeDescriptions(); + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = &bindingDescription, + .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), + .pVertexAttributeDescriptions = attributeDescriptions.data() + }; + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = vk::False + }; + vk::PipelineViewportStateCreateInfo viewportState{ + .viewportCount = 1, + .scissorCount = 1 + }; + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = vk::False, + .rasterizerDiscardEnable = vk::False, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, + .frontFace = vk::FrontFace::eCounterClockwise, + .depthBiasEnable = vk::False + }; + rasterizer.lineWidth = 1.0f; + vk::PipelineMultisampleStateCreateInfo multisampling{ + .rasterizationSamples = msaaSamples, + .sampleShadingEnable = vk::False + }; + vk::PipelineDepthStencilStateCreateInfo depthStencil{ + .depthTestEnable = vk::True, + .depthWriteEnable = vk::True, + .depthCompareOp = vk::CompareOp::eLess, + .depthBoundsTestEnable = vk::False, + .stencilTestEnable = vk::False + }; + vk::PipelineColorBlendAttachmentState colorBlendAttachment; + colorBlendAttachment.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; + colorBlendAttachment.blendEnable = vk::False; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ + .logicOpEnable = vk::False, + .logicOp = vk::LogicOp::eCopy, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachment + }; + + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + vk::PipelineDynamicStateCreateInfo dynamicState{ .dynamicStateCount = static_cast(dynamicStates.size()), .pDynamicStates = dynamicStates.data() }; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ .setLayoutCount = 1, .pSetLayouts = &*descriptorSetLayout, .pushConstantRangeCount = 0 }; + + pipelineLayout = vk::raii::PipelineLayout(device, pipelineLayoutInfo); + + vk::GraphicsPipelineCreateInfo pipelineInfo{ + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pDepthStencilState = &depthStencil, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = pipelineLayout + }; + + // Configure pipeline based on dynamic rendering support + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo; + if (appInfo.dynamicRenderingSupported) { + std::cout << "Configuring pipeline for dynamic rendering\n"; + pipelineRenderingCreateInfo.colorAttachmentCount = 1; + pipelineRenderingCreateInfo.pColorAttachmentFormats = &swapChainImageFormat; + pipelineRenderingCreateInfo.depthAttachmentFormat = findDepthFormat(); + + pipelineInfo.pNext = &pipelineRenderingCreateInfo; + pipelineInfo.renderPass = nullptr; + } else { + std::cout << "Configuring pipeline for traditional render pass\n"; + pipelineInfo.pNext = nullptr; + pipelineInfo.renderPass = *renderPass; + pipelineInfo.subpass = 0; + } + + graphicsPipeline = vk::raii::Pipeline(device, nullptr, pipelineInfo); + } + + void createCommandPool() { + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = graphicsIndex + }; + commandPool = vk::raii::CommandPool(device, poolInfo); + } + + void createColorResources() { + vk::Format colorFormat = swapChainImageFormat; + + createImage(swapChainExtent.width, swapChainExtent.height, 1, msaaSamples, colorFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eTransientAttachment | vk::ImageUsageFlagBits::eColorAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, colorImage, colorImageMemory); + colorImageView = createImageView(colorImage, colorFormat, vk::ImageAspectFlagBits::eColor, 1); + } + + void createDepthResources() { + vk::Format depthFormat = findDepthFormat(); + + createImage(swapChainExtent.width, swapChainExtent.height, 1, msaaSamples, depthFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, depthImage, depthImageMemory); + depthImageView = createImageView(depthImage, depthFormat, vk::ImageAspectFlagBits::eDepth, 1); + } + + vk::Format findSupportedFormat(const std::vector& candidates, vk::ImageTiling tiling, vk::FormatFeatureFlags features) const { + for (const auto format : candidates) { + vk::FormatProperties props = physicalDevice.getFormatProperties(format); + + if (tiling == vk::ImageTiling::eLinear && (props.linearTilingFeatures & features) == features) { + return format; + } + if (tiling == vk::ImageTiling::eOptimal && (props.optimalTilingFeatures & features) == features) { + return format; + } + } + + throw std::runtime_error("failed to find supported format!"); + } + + [[nodiscard]] vk::Format findDepthFormat() const { + return findSupportedFormat( + {vk::Format::eD32Sfloat, vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint}, + vk::ImageTiling::eOptimal, + vk::FormatFeatureFlagBits::eDepthStencilAttachment + ); + } + + static bool hasStencilComponent(vk::Format format) { + return format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint; + } + + void createTextureImage() { + int texWidth, texHeight, texChannels; + stbi_uc* pixels = stbi_load(TEXTURE_PATH.c_str(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha); + vk::DeviceSize imageSize = texWidth * texHeight * 4; + mipLevels = static_cast(std::floor(std::log2(std::max(texWidth, texHeight)))) + 1; + + if (!pixels) { + throw std::runtime_error("failed to load texture image!"); + } + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, pixels, imageSize); + stagingBufferMemory.unmapMemory(); + + stbi_image_free(pixels); + + createImage(texWidth, texHeight, mipLevels, vk::SampleCountFlagBits::e1, vk::Format::eR8G8B8A8Srgb, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, vk::MemoryPropertyFlagBits::eDeviceLocal, textureImage, textureImageMemory); + + transitionImageLayout(textureImage, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, mipLevels); + copyBufferToImage(stagingBuffer, textureImage, static_cast(texWidth), static_cast(texHeight)); + + generateMipmaps(textureImage, vk::Format::eR8G8B8A8Srgb, texWidth, texHeight, mipLevels); + } + + void generateMipmaps(vk::raii::Image& image, vk::Format imageFormat, int32_t texWidth, int32_t texHeight, uint32_t mipLevels) { + // Check if image format supports linear blit-ing + vk::FormatProperties formatProperties = physicalDevice.getFormatProperties(imageFormat); + + if (!(formatProperties.optimalTilingFeatures & vk::FormatFeatureFlagBits::eSampledImageFilterLinear)) { + throw std::runtime_error("texture image format does not support linear blitting!"); + } + + std::unique_ptr commandBuffer = beginSingleTimeCommands(); + + vk::ImageMemoryBarrier barrier = { .srcAccessMask = vk::AccessFlagBits::eTransferWrite, .dstAccessMask =vk::AccessFlagBits::eTransferRead + , .oldLayout = vk::ImageLayout::eTransferDstOptimal, .newLayout = vk::ImageLayout::eTransferSrcOptimal + , .srcQueueFamilyIndex = vk::QueueFamilyIgnored, .dstQueueFamilyIndex = vk::QueueFamilyIgnored, .image = image }; + barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor; + barrier.subresourceRange.baseArrayLayer = 0; + barrier.subresourceRange.layerCount = 1; + barrier.subresourceRange.levelCount = 1; + + int32_t mipWidth = texWidth; + int32_t mipHeight = texHeight; + + for (uint32_t i = 1; i < mipLevels; i++) { + barrier.subresourceRange.baseMipLevel = i - 1; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead; + + commandBuffer->pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eTransfer, {}, {}, {}, barrier); + + vk::ArrayWrapper1D offsets, dstOffsets; + offsets[0] = vk::Offset3D(0, 0, 0); + offsets[1] = vk::Offset3D(mipWidth, mipHeight, 1); + dstOffsets[0] = vk::Offset3D(0, 0, 0); + dstOffsets[1] = vk::Offset3D(mipWidth > 1 ? mipWidth / 2 : 1, mipHeight > 1 ? mipHeight / 2 : 1, 1); + vk::ImageBlit blit = { .srcSubresource = {}, .srcOffsets = offsets, + .dstSubresource = {}, .dstOffsets = dstOffsets }; + blit.srcSubresource = vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, i - 1, 0, 1); + blit.dstSubresource = vk::ImageSubresourceLayers( vk::ImageAspectFlagBits::eColor, i, 0, 1); + + commandBuffer->blitImage(image, vk::ImageLayout::eTransferSrcOptimal, image, vk::ImageLayout::eTransferDstOptimal, { blit }, vk::Filter::eLinear); + + barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + commandBuffer->pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {}, barrier); + + if (mipWidth > 1) mipWidth /= 2; + if (mipHeight > 1) mipHeight /= 2; + } + + barrier.subresourceRange.baseMipLevel = mipLevels - 1; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + commandBuffer->pipelineBarrier(vk::PipelineStageFlagBits::eTransfer, vk::PipelineStageFlagBits::eFragmentShader, {}, {}, {}, barrier); + + endSingleTimeCommands(*commandBuffer); + } + + vk::SampleCountFlagBits getMaxUsableSampleCount() { + vk::PhysicalDeviceProperties physicalDeviceProperties = physicalDevice.getProperties(); + + vk::SampleCountFlags counts = physicalDeviceProperties.limits.framebufferColorSampleCounts & physicalDeviceProperties.limits.framebufferDepthSampleCounts; + if (counts & vk::SampleCountFlagBits::e64) { return vk::SampleCountFlagBits::e64; } + if (counts & vk::SampleCountFlagBits::e32) { return vk::SampleCountFlagBits::e32; } + if (counts & vk::SampleCountFlagBits::e16) { return vk::SampleCountFlagBits::e16; } + if (counts & vk::SampleCountFlagBits::e8) { return vk::SampleCountFlagBits::e8; } + if (counts & vk::SampleCountFlagBits::e4) { return vk::SampleCountFlagBits::e4; } + if (counts & vk::SampleCountFlagBits::e2) { return vk::SampleCountFlagBits::e2; } + + return vk::SampleCountFlagBits::e1; + } + + void createTextureImageView() { + textureImageView = createImageView(textureImage, vk::Format::eR8G8B8A8Srgb, vk::ImageAspectFlagBits::eColor, mipLevels); + } + + void createTextureSampler() { + vk::PhysicalDeviceProperties properties = physicalDevice.getProperties(); + vk::SamplerCreateInfo samplerInfo { + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .mipLodBias = 0.0f, + .anisotropyEnable = vk::True, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .compareEnable = vk::False, + .compareOp = vk::CompareOp::eAlways + }; + textureSampler = vk::raii::Sampler(device, samplerInfo); + } + + [[nodiscard]] vk::raii::ImageView createImageView(const vk::raii::Image& image, vk::Format format, vk::ImageAspectFlags aspectFlags, uint32_t mipLevels) const { + vk::ImageViewCreateInfo viewInfo{ + .image = image, + .viewType = vk::ImageViewType::e2D, + .format = format, + .subresourceRange = { aspectFlags, 0, mipLevels, 0, 1 } + }; + return vk::raii::ImageView( device, viewInfo ); + } + + void createImage(uint32_t width, uint32_t height, uint32_t mipLevels, vk::SampleCountFlagBits numSamples, vk::Format format, vk::ImageTiling tiling, vk::ImageUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Image& image, vk::raii::DeviceMemory& imageMemory) { + vk::ImageCreateInfo imageInfo{ + .imageType = vk::ImageType::e2D, + .format = format, + .extent = {width, height, 1}, + .mipLevels = mipLevels, + .arrayLayers = 1, + .samples = numSamples, + .tiling = tiling, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive, + .initialLayout = vk::ImageLayout::eUndefined + }; + image = vk::raii::Image(device, imageInfo); + + vk::MemoryRequirements memRequirements = image.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + imageMemory = vk::raii::DeviceMemory(device, allocInfo); + image.bindMemory(imageMemory, 0); + } + + void transitionImageLayout(const vk::raii::Image& image, const vk::ImageLayout oldLayout, const vk::ImageLayout newLayout, uint32_t mipLevels) { + const auto commandBuffer = beginSingleTimeCommands(); + + if (appInfo.synchronization2Supported) { + // Use Synchronization2 API + vk::ImageMemoryBarrier2 barrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eAllCommands, + .dstStageMask = vk::PipelineStageFlagBits2::eAllCommands, + .oldLayout = oldLayout, + .newLayout = newLayout, + .image = image, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, 1 } + }; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits2::eNone; + barrier.dstAccessMask = vk::AccessFlagBits2::eTransferWrite; + barrier.srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe; + barrier.dstStageMask = vk::PipelineStageFlagBits2::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits2::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits2::eShaderRead; + barrier.srcStageMask = vk::PipelineStageFlagBits2::eTransfer; + barrier.dstStageMask = vk::PipelineStageFlagBits2::eFragmentShader; + } else { + throw std::invalid_argument("unsupported layout transition!"); + } + + vk::DependencyInfo dependencyInfo{ + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + + commandBuffer->pipelineBarrier2(dependencyInfo); + } else { + // Use traditional synchronization API + vk::ImageMemoryBarrier barrier{ + .oldLayout = oldLayout, + .newLayout = newLayout, + .image = image, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, mipLevels, 0, 1 } + }; + + vk::PipelineStageFlags sourceStage; + vk::PipelineStageFlags destinationStage; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = {}; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + sourceStage = vk::PipelineStageFlagBits::eTransfer; + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + } else { + throw std::invalid_argument("unsupported layout transition!"); + } + commandBuffer->pipelineBarrier(sourceStage, destinationStage, {}, {}, nullptr, barrier); + } + + endSingleTimeCommands(*commandBuffer); + } + + void copyBufferToImage(const vk::raii::Buffer& buffer, const vk::raii::Image& image, uint32_t width, uint32_t height) { + std::unique_ptr commandBuffer = beginSingleTimeCommands(); + vk::BufferImageCopy region{ + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { vk::ImageAspectFlagBits::eColor, 0, 0, 1 }, + .imageOffset = {0, 0, 0}, + .imageExtent = {width, height, 1} + }; + commandBuffer->copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal, {region}); + endSingleTimeCommands(*commandBuffer); + } + + void loadModel() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + std::string warn, err; + + if (!LoadObj(&attrib, &shapes, &materials, &warn, &err, MODEL_PATH.c_str())) { + throw std::runtime_error(warn + err); + } + + std::unordered_map uniqueVertices{}; + + for (const auto& shape : shapes) { + for (const auto& index : shape.mesh.indices) { + Vertex vertex{}; + + vertex.pos = { + attrib.vertices[3 * index.vertex_index + 0], + attrib.vertices[3 * index.vertex_index + 1], + attrib.vertices[3 * index.vertex_index + 2] + }; + + vertex.texCoord = { + attrib.texcoords[2 * index.texcoord_index + 0], + 1.0f - attrib.texcoords[2 * index.texcoord_index + 1] + }; + + vertex.color = {1.0f, 1.0f, 1.0f}; + + if (!uniqueVertices.contains(vertex)) { + uniqueVertices[vertex] = static_cast(vertices.size()); + vertices.push_back(vertex); + } + + indices.push_back(uniqueVertices[vertex]); + } + } + } + + void createVertexBuffer() { + vk::DeviceSize bufferSize = sizeof(vertices[0]) * vertices.size(); + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* dataStaging = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(dataStaging, vertices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eVertexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, vertexBuffer, vertexBufferMemory); + + copyBuffer(stagingBuffer, vertexBuffer, bufferSize); + } + + void createIndexBuffer() { + vk::DeviceSize bufferSize = sizeof(indices[0]) * indices.size(); + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, indices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, indexBuffer, indexBufferMemory); + + copyBuffer(stagingBuffer, indexBuffer, bufferSize); + } + + void createUniformBuffers() { + uniformBuffers.clear(); + uniformBuffersMemory.clear(); + uniformBuffersMapped.clear(); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + vk::raii::Buffer buffer({}); + vk::raii::DeviceMemory bufferMem({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, buffer, bufferMem); + uniformBuffers.emplace_back(std::move(buffer)); + uniformBuffersMemory.emplace_back(std::move(bufferMem)); + uniformBuffersMapped.emplace_back(uniformBuffersMemory[i].mapMemory(0, bufferSize)); + } + } + + void createDescriptorPool() { + std::array poolSize { + vk::DescriptorPoolSize(vk::DescriptorType::eUniformBuffer, MAX_FRAMES_IN_FLIGHT), + vk::DescriptorPoolSize(vk::DescriptorType::eCombinedImageSampler, MAX_FRAMES_IN_FLIGHT) + }; + vk::DescriptorPoolCreateInfo poolInfo{ + .flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + .maxSets = MAX_FRAMES_IN_FLIGHT, + .poolSizeCount = static_cast(poolSize.size()), + .pPoolSizes = poolSize.data() + }; + descriptorPool = vk::raii::DescriptorPool(device, poolInfo); + } + + void createDescriptorSets() { + std::vector layouts(MAX_FRAMES_IN_FLIGHT, descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = descriptorPool, + .descriptorSetCount = static_cast(layouts.size()), + .pSetLayouts = layouts.data() + }; + + descriptorSets.clear(); + descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + vk::DescriptorImageInfo imageInfo{ + .sampler = textureSampler, + .imageView = textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + std::array descriptorWrites{ + vk::WriteDescriptorSet{ + .dstSet = descriptorSets[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &bufferInfo + }, + vk::WriteDescriptorSet{ + .dstSet = descriptorSets[i], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &imageInfo + } + }; + device.updateDescriptorSets(descriptorWrites, {}); + } + } + + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) { + vk::BufferCreateInfo bufferInfo{ + .size = size, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive + }; + buffer = vk::raii::Buffer(device, bufferInfo); + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + bufferMemory = vk::raii::DeviceMemory(device, allocInfo); + buffer.bindMemory(bufferMemory, 0); + } + + std::unique_ptr beginSingleTimeCommands() { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + std::unique_ptr commandBuffer = std::make_unique(std::move(vk::raii::CommandBuffers(device, allocInfo).front())); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer->begin(beginInfo); + + return commandBuffer; + } + + void endSingleTimeCommands(const vk::raii::CommandBuffer& commandBuffer) const { + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandBuffer }; + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + void copyBuffer(vk::raii::Buffer & srcBuffer, vk::raii::Buffer & dstBuffer, vk::DeviceSize size) { + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = commandPool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1 }; + vk::raii::CommandBuffer commandCopyBuffer = std::move(device.allocateCommandBuffers(allocInfo).front()); + commandCopyBuffer.begin(vk::CommandBufferBeginInfo{ .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); + commandCopyBuffer.copyBuffer(*srcBuffer, *dstBuffer, vk::BufferCopy{ .size = size }); + commandCopyBuffer.end(); + graphicsQueue.submit(vk::SubmitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandCopyBuffer }, nullptr); + graphicsQueue.waitIdle(); + } + + uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("failed to find suitable memory type!"); + } + + void createCommandBuffers() { + commandBuffers.clear(); + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = commandPool, .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = MAX_FRAMES_IN_FLIGHT }; + commandBuffers = vk::raii::CommandBuffers(device, allocInfo); + } + + void recordCommandBuffer(uint32_t imageIndex) { + commandBuffers[currentFrame].begin({}); + + vk::ClearValue clearColor = vk::ClearColorValue(0.0f, 0.0f, 0.0f, 1.0f); + vk::ClearValue clearDepth = vk::ClearDepthStencilValue(1.0f, 0); + std::array clearValues = { clearColor, clearDepth }; + + if (appInfo.dynamicRenderingSupported) { + // Transition attachments to the correct layout + if (appInfo.synchronization2Supported) { + // Use Synchronization2 API for image transitions + vk::ImageMemoryBarrier2 colorBarrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .image = *colorImage, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + vk::ImageMemoryBarrier2 depthBarrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eEarlyFragmentTests | vk::PipelineStageFlagBits2::eLateFragmentTests, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eEarlyFragmentTests | vk::PipelineStageFlagBits2::eLateFragmentTests, + .dstAccessMask = vk::AccessFlagBits2::eDepthStencilAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal, + .image = *depthImage, + .subresourceRange = { vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 } + }; + + vk::ImageMemoryBarrier2 swapchainBarrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .image = swapChainImages[imageIndex], + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + std::array barriers = { colorBarrier, depthBarrier, swapchainBarrier }; + vk::DependencyInfo dependencyInfo{ + .imageMemoryBarrierCount = static_cast(barriers.size()), + .pImageMemoryBarriers = barriers.data() + }; + + commandBuffers[currentFrame].pipelineBarrier2(dependencyInfo); + } else { + // Use traditional synchronization API + vk::ImageMemoryBarrier colorBarrier{ + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = *colorImage, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + vk::ImageMemoryBarrier depthBarrier{ + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eDepthStencilAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = *depthImage, + .subresourceRange = { vk::ImageAspectFlagBits::eDepth, 0, 1, 0, 1 } + }; + + vk::ImageMemoryBarrier swapchainBarrier{ + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + std::array barriers = { colorBarrier, depthBarrier, swapchainBarrier }; + commandBuffers[currentFrame].pipelineBarrier( + vk::PipelineStageFlagBits::eTopOfPipe, + vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests, + vk::DependencyFlagBits::eByRegion, + {}, + {}, + barriers + ); + } + + // Setup rendering attachments + vk::RenderingAttachmentInfo colorAttachment{ + .imageView = *colorImageView, + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .resolveMode = vk::ResolveModeFlagBits::eAverage, + .resolveImageView = *swapChainImageViews[imageIndex], + .resolveImageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + + vk::RenderingAttachmentInfo depthAttachment{ + .imageView = *depthImageView, + .imageLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eDontCare, + .clearValue = clearDepth + }; + + vk::RenderingInfo renderingInfo{ + .renderArea = {{0, 0}, swapChainExtent}, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachment, + .pDepthAttachment = &depthAttachment + }; + + commandBuffers[currentFrame].beginRendering(renderingInfo); + } else { + // Use traditional render pass + std::cout << "Recording command buffer with traditional render pass\n"; + + vk::RenderPassBeginInfo renderPassInfo{ + .renderPass = *renderPass, + .framebuffer = *swapChainFramebuffers[imageIndex], + .renderArea = {{0, 0}, swapChainExtent}, + .clearValueCount = static_cast(clearValues.size()), + .pClearValues = clearValues.data() + }; + + commandBuffers[currentFrame].beginRenderPass(renderPassInfo, vk::SubpassContents::eInline); + } + + // Common rendering commands + commandBuffers[currentFrame].bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + commandBuffers[currentFrame].setViewport(0, vk::Viewport(0.0f, 0.0f, static_cast(swapChainExtent.width), static_cast(swapChainExtent.height), 0.0f, 1.0f)); + commandBuffers[currentFrame].setScissor(0, vk::Rect2D(vk::Offset2D(0, 0), swapChainExtent)); + commandBuffers[currentFrame].bindVertexBuffers(0, *vertexBuffer, {0}); + commandBuffers[currentFrame].bindIndexBuffer(*indexBuffer, 0, vk::IndexType::eUint32); + commandBuffers[currentFrame].bindDescriptorSets(vk::PipelineBindPoint::eGraphics, pipelineLayout, 0, *descriptorSets[currentFrame], nullptr); + commandBuffers[currentFrame].drawIndexed(indices.size(), 1, 0, 0, 0); + + if (appInfo.dynamicRenderingSupported) { + commandBuffers[currentFrame].endRendering(); + + // Transition swapchain image to present layout + if (appInfo.synchronization2Supported) { + vk::ImageMemoryBarrier2 barrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .srcAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .dstStageMask = vk::PipelineStageFlagBits2::eBottomOfPipe, + .dstAccessMask = vk::AccessFlagBits2::eNone, + .oldLayout = vk::ImageLayout::eColorAttachmentOptimal, + .newLayout = vk::ImageLayout::ePresentSrcKHR, + .image = swapChainImages[imageIndex], + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + vk::DependencyInfo dependencyInfo{ + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + + commandBuffers[currentFrame].pipelineBarrier2(dependencyInfo); + } else { + vk::ImageMemoryBarrier barrier{ + .srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .dstAccessMask = vk::AccessFlagBits::eNone, + .oldLayout = vk::ImageLayout::eColorAttachmentOptimal, + .newLayout = vk::ImageLayout::ePresentSrcKHR, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + commandBuffers[currentFrame].pipelineBarrier( + vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::PipelineStageFlagBits::eBottomOfPipe, + vk::DependencyFlagBits::eByRegion, + {}, + {}, + { barrier } + ); + } + } else { + commandBuffers[currentFrame].endRenderPass(); + } + + commandBuffers[currentFrame].end(); + } + + void createSyncObjects() { + presentCompleteSemaphore.clear(); + renderFinishedSemaphore.clear(); + inFlightFences.clear(); + + if (appInfo.timelineSemaphoresSupported) { + // Create timeline semaphore + std::cout << "Creating timeline semaphores\n"; + vk::SemaphoreTypeCreateInfo timelineCreateInfo{ + .semaphoreType = vk::SemaphoreType::eTimeline, + .initialValue = 0 + }; + + vk::SemaphoreCreateInfo semaphoreInfo{ + .pNext = &timelineCreateInfo + }; + + timelineSemaphore = vk::raii::Semaphore(device, semaphoreInfo); + + // Still need binary semaphores for swapchain operations + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + presentCompleteSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + renderFinishedSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + } + } else { + // Create binary semaphores and fences + std::cout << "Creating binary semaphores and fences\n"; + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + presentCompleteSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + renderFinishedSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + } + } + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + inFlightFences.emplace_back(device, vk::FenceCreateInfo{ .flags = vk::FenceCreateFlagBits::eSignaled }); + } + } + + void updateUniformBuffer(uint32_t currentImage) const { + static auto startTime = std::chrono::high_resolution_clock::now(); + + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + + UniformBufferObject ubo{}; + ubo.model = rotate(glm::mat4(1.0f), time * glm::radians(90.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.view = lookAt(glm::vec3(2.0f, 2.0f, 2.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.proj = glm::perspective(glm::radians(45.0f), static_cast(swapChainExtent.width) / static_cast(swapChainExtent.height), 0.1f, 10.0f); + ubo.proj[1][1] *= -1; + + memcpy(uniformBuffersMapped[currentImage], &ubo, sizeof(ubo)); + } + + void drawFrame() { + while (vk::Result::eTimeout == device.waitForFences(*inFlightFences[currentFrame], vk::True, FenceTimeout)) + ; + auto [result, imageIndex] = swapChain.acquireNextImage(UINT64_MAX, *presentCompleteSemaphore[currentFrame], nullptr); + + if (result == vk::Result::eErrorOutOfDateKHR) { + recreateSwapChain(); + return; + } + if (result != vk::Result::eSuccess && result != vk::Result::eSuboptimalKHR) { + throw std::runtime_error("failed to acquire swap chain image!"); + } + updateUniformBuffer(currentFrame); + + device.resetFences(*inFlightFences[currentFrame]); + commandBuffers[currentFrame].reset(); + recordCommandBuffer(imageIndex); + + if (appInfo.timelineSemaphoresSupported) { + // Use timeline semaphores for GPU synchronization + uint64_t waitValue = timelineValue; + uint64_t signalValue = ++timelineValue; + + vk::TimelineSemaphoreSubmitInfo timelineInfo{ + .waitSemaphoreValueCount = 0, // We'll still use binary semaphore for swapchain + .signalSemaphoreValueCount = 1, + .pSignalSemaphoreValues = &signalValue + }; + + std::array waitSemaphores = { *presentCompleteSemaphore[currentFrame], *timelineSemaphore }; + std::array waitStages = { vk::PipelineStageFlagBits::eColorAttachmentOutput, vk::PipelineStageFlagBits::eVertexInput }; + std::array waitValues = { 0, waitValue }; // Binary semaphore value is ignored + + std::array signalSemaphores = { *renderFinishedSemaphore[currentFrame], *timelineSemaphore }; + std::array signalValues = { 0, signalValue }; // Binary semaphore value is ignored + + timelineInfo.waitSemaphoreValueCount = 1; // Only for the timeline semaphore + timelineInfo.pWaitSemaphoreValues = &waitValues[1]; + timelineInfo.signalSemaphoreValueCount = 1; // Only for the timeline semaphore + timelineInfo.pSignalSemaphoreValues = &signalValues[1]; + + vk::SubmitInfo submitInfo{ + .pNext = &timelineInfo, + .waitSemaphoreCount = 1, // Only wait on the binary semaphore + .pWaitSemaphores = &waitSemaphores[0], + .pWaitDstStageMask = &waitStages[0], + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 2, // Signal both semaphores + .pSignalSemaphores = signalSemaphores.data() + }; + + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + } else { + // Use traditional binary semaphores + vk::PipelineStageFlags waitDestinationStageMask(vk::PipelineStageFlagBits::eColorAttachmentOutput); + const vk::SubmitInfo submitInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*presentCompleteSemaphore[currentFrame], + .pWaitDstStageMask = &waitDestinationStageMask, + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*renderFinishedSemaphore[currentFrame] + }; + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + } + + const vk::PresentInfoKHR presentInfoKHR{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*renderFinishedSemaphore[currentFrame], + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + result = presentQueue.presentKHR(presentInfoKHR); + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("failed to present swap chain image!"); + } + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + + [[nodiscard]] vk::raii::ShaderModule createShaderModule(const std::vector& code) const { + vk::ShaderModuleCreateInfo createInfo{ .codeSize = code.size(), .pCode = reinterpret_cast(code.data()) }; + vk::raii::ShaderModule shaderModule{ device, createInfo }; + + return shaderModule; + } + + static vk::Format chooseSwapSurfaceFormat(const std::vector& availableFormats) { + return (availableFormats[0].format == vk::Format::eUndefined) ? vk::Format::eB8G8R8A8Unorm : availableFormats[0].format; + } + + static vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + return std::ranges::any_of(availablePresentModes, + [](const vk::PresentModeKHR value) { return vk::PresentModeKHR::eMailbox == value; } ) ? vk::PresentModeKHR::eMailbox : vk::PresentModeKHR::eFifo; + } + + [[nodiscard]] vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) const { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } + int width, height; + glfwGetFramebufferSize(window, &width, &height); + + return { + std::clamp(width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width), + std::clamp(height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height) + }; + } + + [[nodiscard]] std::vector getRequiredExtensions() const { + // Get the required extensions from GLFW + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + std::vector extensions(glfwExtensions, glfwExtensions + glfwExtensionCount); + + // Check if the debug utils extension is available + std::vector props = context.enumerateInstanceExtensionProperties(); + bool debugUtilsAvailable = std::ranges::any_of(props, + [](vk::ExtensionProperties const & ep) { + return strcmp(ep.extensionName, vk::EXTDebugUtilsExtensionName) == 0; + }); + + // Always include the debug utils extension if available + // This allows validation layers to be enabled via vulkanconfig + if (debugUtilsAvailable) { + extensions.push_back(vk::EXTDebugUtilsExtensionName); + } else { + std::cout << "VK_EXT_debug_utils extension not available. Validation layers may not work." << std::endl; + } + + return extensions; + } + + static VKAPI_ATTR vk::Bool32 VKAPI_CALL debugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT severity, vk::DebugUtilsMessageTypeFlagsEXT type, const vk::DebugUtilsMessengerCallbackDataEXT* pCallbackData, void*) { + if (severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eError || severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { + std::cerr << "validation layer: type " << to_string(type) << " msg: " << pCallbackData->pMessage << std::endl; + } + + return vk::False; + } + + static std::vector readFile(const std::string& filename) { + std::ifstream file(filename, std::ios::ate | std::ios::binary); + + if (!file.is_open()) { + throw std::runtime_error("failed to open file!"); + } + std::vector buffer(file.tellg()); + file.seekg(0, std::ios::beg); + file.read(buffer.data(), static_cast(buffer.size())); + file.close(); + + return buffer; + } +}; + +int main() { + try { + HelloTriangleApplication app; + app.run(); + } catch (const std::exception& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/attachments/33_vulkan_profiles.cpp b/attachments/33_vulkan_profiles.cpp new file mode 100644 index 00000000..44199bfe --- /dev/null +++ b/attachments/33_vulkan_profiles.cpp @@ -0,0 +1,1749 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include +#include + +#define GLFW_INCLUDE_VULKAN // REQUIRED only for GLFW CreateWindowSurface. +#include + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_ENABLE_EXPERIMENTAL +#include +#include +#include + +#define STB_IMAGE_IMPLEMENTATION +#include + +#define TINYOBJLOADER_IMPLEMENTATION +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +const std::string MODEL_PATH = "models/viking_room.obj"; +const std::string TEXTURE_PATH = "textures/viking_room.png"; +constexpr int MAX_FRAMES_IN_FLIGHT = 2; + +// Application info structure to store profile support flags +struct AppInfo { + bool profileSupported = false; + VpProfileProperties profile; +}; + +// Moved struct definitions inside the class + +struct Vertex { + glm::vec3 pos; + glm::vec3 color; + glm::vec2 texCoord; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Vertex), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, pos) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, color) ), + vk::VertexInputAttributeDescription( 2, 0, vk::Format::eR32G32Sfloat, offsetof(Vertex, texCoord) ) + }; + } + + bool operator==(const Vertex& other) const { + return pos == other.pos && color == other.color && texCoord == other.texCoord; + } +}; + +template<> struct std::hash { + size_t operator()(Vertex const& vertex) const noexcept { + return ((hash()(vertex.pos) ^ (hash()(vertex.color) << 1)) >> 1) ^ (hash()(vertex.texCoord) << 1); + } + }; + +struct UniformBufferObject { + alignas(16) glm::mat4 model; + alignas(16) glm::mat4 view; + alignas(16) glm::mat4 proj; +}; + +class HelloTriangleApplication { +public: + void run() { + initWindow(); + initVulkan(); + mainLoop(); + cleanup(); + } + +private: + GLFWwindow* window = nullptr; + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::DebugUtilsMessengerEXT debugMessenger = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::raii::Device device = nullptr; + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::Format swapChainImageFormat = {}; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + vk::raii::RenderPass renderPass = nullptr; + vk::raii::DescriptorSetLayout descriptorSetLayout = nullptr; + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + std::vector swapChainFramebuffers; + vk::raii::CommandPool commandPool = nullptr; + std::vector commandBuffers; + std::vector imageAvailableSemaphores; + std::vector renderFinishedSemaphores; + std::vector inFlightFences; + std::vector presentCompleteSemaphore; + uint32_t currentFrame = 0; + bool framebufferResized = false; + vk::raii::Buffer vertexBuffer = nullptr; + vk::raii::DeviceMemory vertexBufferMemory = nullptr; + vk::raii::Buffer indexBuffer = nullptr; + vk::raii::DeviceMemory indexBufferMemory = nullptr; + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + vk::raii::DescriptorPool descriptorPool = nullptr; + std::vector descriptorSets; + vk::raii::Image textureImage = nullptr; + vk::raii::DeviceMemory textureImageMemory = nullptr; + vk::raii::ImageView textureImageView = nullptr; + vk::raii::Sampler textureSampler = nullptr; + vk::raii::Image depthImage = nullptr; + vk::raii::DeviceMemory depthImageMemory = nullptr; + vk::raii::ImageView depthImageView = nullptr; + std::vector vertices; + std::vector indices; + vk::SampleCountFlagBits msaaSamples = vk::SampleCountFlagBits::e1; + vk::raii::Image colorImage = nullptr; + vk::raii::DeviceMemory colorImageMemory = nullptr; + vk::raii::ImageView colorImageView = nullptr; + + // Application info to store profile support + AppInfo appInfo = {}; + + struct SwapChainSupportDetails { + vk::SurfaceCapabilitiesKHR capabilities; + std::vector formats; + std::vector presentModes; + }; + + struct QueueFamilyIndices { + std::optional graphicsFamily; + std::optional presentFamily; + + bool isComplete() const { + return graphicsFamily.has_value() && presentFamily.has_value(); + } + }; + + const std::vector requiredDeviceExtension = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME + }; + + void initWindow() { + glfwInit(); + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Profiles Demo", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + glfwSetFramebufferSizeCallback(window, framebufferResizeCallback); + } + + static void framebufferResizeCallback(GLFWwindow* window, int, int) { + auto app = reinterpret_cast(glfwGetWindowUserPointer(window)); + app->framebufferResized = true; + } + + void initVulkan() { + createInstance(); + setupDebugMessenger(); + createSurface(); + pickPhysicalDevice(); + checkFeatureSupport(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + + // Create render pass only if not using dynamic rendering + if (!appInfo.profileSupported) { + createRenderPass(); + } + + createDescriptorSetLayout(); + createGraphicsPipeline(); + + // Create framebuffers only if not using dynamic rendering + if (!appInfo.profileSupported) { + createFramebuffers(); + } + + createCommandPool(); + createColorResources(); + createDepthResources(); + createTextureImage(); + createTextureImageView(); + createTextureSampler(); + loadModel(); + createVertexBuffer(); + createIndexBuffer(); + createUniformBuffers(); + createDescriptorPool(); + createDescriptorSets(); + createCommandBuffers(); + createSyncObjects(); + } + + void mainLoop() { + while (!glfwWindowShouldClose(window)) { + glfwPollEvents(); + drawFrame(); + } + + device.waitIdle(); + } + + void cleanupSwapChain() { + colorImageView = nullptr; + colorImage = nullptr; + colorImageMemory = nullptr; + + depthImageView = nullptr; + depthImage = nullptr; + depthImageMemory = nullptr; + + for (auto& framebuffer : swapChainFramebuffers) { + framebuffer = nullptr; + } + + for (auto& imageView : swapChainImageViews) { + imageView = nullptr; + } + + swapChain = nullptr; + } + + void cleanup() { + glfwDestroyWindow(window); + glfwTerminate(); + } + + void recreateSwapChain() { + int width = 0, height = 0; + glfwGetFramebufferSize(window, &width, &height); + while (width == 0 || height == 0) { + glfwGetFramebufferSize(window, &width, &height); + glfwWaitEvents(); + } + + device.waitIdle(); + + cleanupSwapChain(); + + createSwapChain(); + createImageViews(); + + // Recreate traditional render pass and framebuffers if not using profiles + if (!appInfo.profileSupported) { + createRenderPass(); + createFramebuffers(); + } + + createColorResources(); + createDepthResources(); + } + + void createInstance() { + + constexpr vk::ApplicationInfo appInfo{ + .pApplicationName = "Vulkan Profiles Demo", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = vk::ApiVersion14 + }; + + auto extensions = getRequiredExtensions(); + + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() + }; + + instance = vk::raii::Instance(context, createInfo); + + } + + void setupDebugMessenger() { + // Always set up the debug messenger + // It will only be used if validation layers are enabled via vulkanconfig + + vk::DebugUtilsMessageSeverityFlagsEXT severityFlags( + vk::DebugUtilsMessageSeverityFlagBitsEXT::eVerbose | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning | + vk::DebugUtilsMessageSeverityFlagBitsEXT::eError + ); + + vk::DebugUtilsMessageTypeFlagsEXT messageTypeFlags( + vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | + vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance | + vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation + ); + + vk::DebugUtilsMessengerCreateInfoEXT debugUtilsMessengerCreateInfoEXT{ + .messageSeverity = severityFlags, + .messageType = messageTypeFlags, + .pfnUserCallback = &debugCallback + }; + + try { + debugMessenger = instance.createDebugUtilsMessengerEXT(debugUtilsMessengerCreateInfoEXT); + } catch (vk::SystemError& err) { + // If the debug utils extension is not available, this will fail + // That's okay, it just means validation layers aren't enabled + std::cout << "Debug messenger not available. Validation layers may not be enabled." << std::endl; + } + } + + void createSurface() { + VkSurfaceKHR _surface; + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != 0) { + throw std::runtime_error("failed to create window surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); + } + + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&](auto const & device) + { + // Check if any of the queue families support graphics operations + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of(queueFamilies, [](auto const & qfp) { return !!(qfp.queueFlags & vk::QueueFlagBits::eGraphics); }); + + // Check if all required device extensions are available + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of(requiredDeviceExtension, + [&availableDeviceExtensions](auto const & requiredDeviceExtension) + { + return std::ranges::any_of(availableDeviceExtensions, + [requiredDeviceExtension](auto const & availableDeviceExtension) + { return strcmp(availableDeviceExtension.extensionName, requiredDeviceExtension) == 0; }); + }); + + return supportsGraphics && supportsAllRequiredExtensions; + }); + + if (devIter != devices.end()) { + physicalDevice = *devIter; + msaaSamples = getMaxUsableSampleCount(); + + // Print device information + vk::PhysicalDeviceProperties deviceProperties = physicalDevice.getProperties(); + std::cout << "Selected GPU: " << deviceProperties.deviceName << std::endl; + std::cout << "API Version: " << VK_VERSION_MAJOR(deviceProperties.apiVersion) << "." + << VK_VERSION_MINOR(deviceProperties.apiVersion) << "." + << VK_VERSION_PATCH(deviceProperties.apiVersion) << std::endl; + } else { + throw std::runtime_error("failed to find a suitable GPU!"); + } + } + + void checkFeatureSupport() { + // Define the KHR roadmap 2022 profile - more widely supported than 2024 + appInfo.profile = { + VP_KHR_ROADMAP_2022_NAME, + VP_KHR_ROADMAP_2022_SPEC_VERSION + }; + + // Check if the profile is supported + VkBool32 supported = VK_FALSE; + VkResult result = vpGetPhysicalDeviceProfileSupport( + *instance, + *physicalDevice, + &appInfo.profile, + &supported + ); + + if (result == VK_SUCCESS && supported == VK_TRUE) { + appInfo.profileSupported = true; + std::cout << "Using KHR roadmap 2022 profile" << std::endl; + } else { + appInfo.profileSupported = false; + std::cout << "Falling back to traditional rendering (profile not supported)" << std::endl; + + // If we wanted to implement fallback, we would call detectFeatureSupport() here + // But for this example, we'll just use traditional rendering if the profile isn't supported + } + } + + void createLogicalDevice() { + QueueFamilyIndices indices = findQueueFamilies(physicalDevice); + + std::vector queueCreateInfos; + std::set uniqueQueueFamilies = {indices.graphicsFamily.value(), indices.presentFamily.value()}; + + float queuePriority = 1.0f; + for (uint32_t queueFamily : uniqueQueueFamilies) { + vk::DeviceQueueCreateInfo queueCreateInfo{ + .queueFamilyIndex = queueFamily, + .queueCount = 1, + .pQueuePriorities = &queuePriority + }; + queueCreateInfos.push_back(queueCreateInfo); + } + + if (appInfo.profileSupported) { + // Create device with Best Practices profile + + // Enable required features + vk::PhysicalDeviceFeatures2 features2; + vk::PhysicalDeviceFeatures deviceFeatures{}; + deviceFeatures.samplerAnisotropy = VK_TRUE; + deviceFeatures.sampleRateShading = VK_TRUE; + features2.features = deviceFeatures; + + // Enable dynamic rendering + vk::PhysicalDeviceDynamicRenderingFeatures dynamicRenderingFeatures; + dynamicRenderingFeatures.dynamicRendering = VK_TRUE; + features2.pNext = &dynamicRenderingFeatures; + + // Create a vk::DeviceCreateInfo with the required features + vk::DeviceCreateInfo vkDeviceCreateInfo{ + .pNext = &features2, + .queueCreateInfoCount = static_cast(queueCreateInfos.size()), + .pQueueCreateInfos = queueCreateInfos.data(), + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data() + }; + + // Create the device with the vk::DeviceCreateInfo + device = vk::raii::Device(physicalDevice, vkDeviceCreateInfo); + + std::cout << "Created logical device using KHR roadmap 2022 profile" << std::endl; + } else { + // Fallback to manual device creation + vk::PhysicalDeviceFeatures deviceFeatures{}; + deviceFeatures.samplerAnisotropy = VK_TRUE; + deviceFeatures.sampleRateShading = VK_TRUE; + + vk::DeviceCreateInfo createInfo{ + .queueCreateInfoCount = static_cast(queueCreateInfos.size()), + .pQueueCreateInfos = queueCreateInfos.data(), + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data(), + .pEnabledFeatures = &deviceFeatures + }; + + device = vk::raii::Device(physicalDevice, createInfo); + + std::cout << "Created logical device using manual feature selection" << std::endl; + } + + graphicsQueue = device.getQueue(indices.graphicsFamily.value(), 0); + presentQueue = device.getQueue(indices.presentFamily.value(), 0); + } + + void createSwapChain() { + SwapChainSupportDetails swapChainSupport = querySwapChainSupport(physicalDevice); + + vk::SurfaceFormatKHR surfaceFormat = chooseSwapSurfaceFormat(swapChainSupport.formats); + vk::PresentModeKHR presentMode = chooseSwapPresentMode(swapChainSupport.presentModes); + vk::Extent2D extent = chooseSwapExtent(swapChainSupport.capabilities); + + uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1; + if (swapChainSupport.capabilities.maxImageCount > 0 && imageCount > swapChainSupport.capabilities.maxImageCount) { + imageCount = swapChainSupport.capabilities.maxImageCount; + } + + vk::SwapchainCreateInfoKHR createInfo{ + .surface = *surface, + .minImageCount = imageCount, + .imageFormat = surfaceFormat.format, + .imageColorSpace = surfaceFormat.colorSpace, + .imageExtent = extent, + .imageArrayLayers = 1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment + }; + + QueueFamilyIndices indices = findQueueFamilies(physicalDevice); + uint32_t queueFamilyIndices[] = {indices.graphicsFamily.value(), indices.presentFamily.value()}; + + if (indices.graphicsFamily != indices.presentFamily) { + createInfo.imageSharingMode = vk::SharingMode::eConcurrent; + createInfo.queueFamilyIndexCount = 2; + createInfo.pQueueFamilyIndices = queueFamilyIndices; + } else { + createInfo.imageSharingMode = vk::SharingMode::eExclusive; + } + + createInfo.preTransform = swapChainSupport.capabilities.currentTransform; + createInfo.compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque; + createInfo.presentMode = presentMode; + createInfo.clipped = VK_TRUE; + + swapChain = device.createSwapchainKHR(createInfo); + swapChainImages = swapChain.getImages(); + swapChainImageFormat = surfaceFormat.format; + swapChainExtent = extent; + } + + void createImageViews() { + swapChainImageViews.reserve(swapChainImages.size()); + + for (const auto& image : swapChainImages) { + swapChainImageViews.push_back(createImageView(image, swapChainImageFormat, vk::ImageAspectFlagBits::eColor, 1)); + } + } + + void createRenderPass() { + // This is only called if the Best Practices profile is not supported + // or if dynamic rendering is not available + vk::AttachmentDescription colorAttachment{ + .format = swapChainImageFormat, + .samples = msaaSamples, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::AttachmentDescription depthAttachment{ + .format = findDepthFormat(), + .samples = msaaSamples, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eDontCare, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal + }; + + vk::AttachmentDescription colorAttachmentResolve{ + .format = swapChainImageFormat, + .samples = vk::SampleCountFlagBits::e1, + .loadOp = vk::AttachmentLoadOp::eDontCare, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::ePresentSrcKHR + }; + + vk::AttachmentReference colorAttachmentRef{ + .attachment = 0, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::AttachmentReference depthAttachmentRef{ + .attachment = 1, + .layout = vk::ImageLayout::eDepthStencilAttachmentOptimal + }; + + vk::AttachmentReference colorAttachmentResolveRef{ + .attachment = 2, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::SubpassDescription subpass{ + .pipelineBindPoint = vk::PipelineBindPoint::eGraphics, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachmentRef, + .pResolveAttachments = &colorAttachmentResolveRef, + .pDepthStencilAttachment = &depthAttachmentRef + }; + + vk::SubpassDependency dependency{ + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests, + .dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput | vk::PipelineStageFlagBits::eEarlyFragmentTests, + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite | vk::AccessFlagBits::eDepthStencilAttachmentWrite + }; + + std::array attachments = {colorAttachment, depthAttachment, colorAttachmentResolve}; + vk::RenderPassCreateInfo renderPassInfo{ + .attachmentCount = static_cast(attachments.size()), + .pAttachments = attachments.data(), + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = 1, + .pDependencies = &dependency + }; + + renderPass = device.createRenderPass(renderPassInfo); + } + + void createDescriptorSetLayout() { + vk::DescriptorSetLayoutBinding uboLayoutBinding{ + .binding = 0, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eVertex + }; + + vk::DescriptorSetLayoutBinding samplerLayoutBinding{ + .binding = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eFragment + }; + + std::array bindings = {uboLayoutBinding, samplerLayoutBinding}; + vk::DescriptorSetLayoutCreateInfo layoutInfo{ + .bindingCount = static_cast(bindings.size()), + .pBindings = bindings.data() + }; + + descriptorSetLayout = device.createDescriptorSetLayout(layoutInfo); + } + + void createGraphicsPipeline() { + auto vertShaderCode = readFile("shaders/vert.spv"); + auto fragShaderCode = readFile("shaders/frag.spv"); + + vk::raii::ShaderModule vertShaderModule = createShaderModule(vertShaderCode); + vk::raii::ShaderModule fragShaderModule = createShaderModule(fragShaderCode); + + vk::PipelineShaderStageCreateInfo vertShaderStageInfo{ + .stage = vk::ShaderStageFlagBits::eVertex, + .module = *vertShaderModule, + .pName = "main" + }; + + vk::PipelineShaderStageCreateInfo fragShaderStageInfo{ + .stage = vk::ShaderStageFlagBits::eFragment, + .module = *fragShaderModule, + .pName = "main" + }; + + vk::PipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo, fragShaderStageInfo}; + + auto bindingDescription = Vertex::getBindingDescription(); + auto attributeDescriptions = Vertex::getAttributeDescriptions(); + + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = &bindingDescription, + .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), + .pVertexAttributeDescriptions = attributeDescriptions.data() + }; + + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = VK_FALSE + }; + + vk::PipelineViewportStateCreateInfo viewportState{ + .viewportCount = 1, + .scissorCount = 1 + }; + + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = VK_FALSE, + .rasterizerDiscardEnable = VK_FALSE, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, + .frontFace = vk::FrontFace::eCounterClockwise, + .depthBiasEnable = VK_FALSE, + .lineWidth = 1.0f + }; + + vk::PipelineMultisampleStateCreateInfo multisampling{ + .rasterizationSamples = msaaSamples, + .sampleShadingEnable = VK_TRUE, + .minSampleShading = 0.2f + }; + + vk::PipelineDepthStencilStateCreateInfo depthStencil{ + .depthTestEnable = VK_TRUE, + .depthWriteEnable = VK_TRUE, + .depthCompareOp = vk::CompareOp::eLess, + .depthBoundsTestEnable = VK_FALSE, + .stencilTestEnable = VK_FALSE + }; + + vk::PipelineColorBlendAttachmentState colorBlendAttachment{ + .blendEnable = VK_FALSE, + .colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA + }; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ + .logicOpEnable = VK_FALSE, + .logicOp = vk::LogicOp::eCopy, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachment + }; + + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + + vk::PipelineDynamicStateCreateInfo dynamicState{ + .dynamicStateCount = static_cast(dynamicStates.size()), + .pDynamicStates = dynamicStates.data() + }; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ + .setLayoutCount = 1, + .pSetLayouts = &*descriptorSetLayout + }; + + pipelineLayout = device.createPipelineLayout(pipelineLayoutInfo); + + vk::GraphicsPipelineCreateInfo pipelineInfo{ + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pDepthStencilState = &depthStencil, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = *pipelineLayout + }; + + // Configure pipeline based on whether we're using the KHR roadmap 2022 profile + if (appInfo.profileSupported) { + // With the KHR roadmap 2022 profile, we can use dynamic rendering + vk::Format colorFormat = swapChainImageFormat; + vk::Format depthFormat = findDepthFormat(); + + vk::PipelineRenderingCreateInfo renderingInfo{ + .colorAttachmentCount = 1, + .pColorAttachmentFormats = &colorFormat, + .depthAttachmentFormat = depthFormat + }; + + pipelineInfo.pNext = &renderingInfo; + pipelineInfo.renderPass = nullptr; + + std::cout << "Creating pipeline with dynamic rendering (KHR roadmap 2022 profile)" << std::endl; + } else { + // Without the profile, use traditional render pass if dynamic rendering is not available + pipelineInfo.pNext = nullptr; + pipelineInfo.renderPass = *renderPass; + pipelineInfo.subpass = 0; + + std::cout << "Creating pipeline with traditional render pass (fallback)" << std::endl; + } + + graphicsPipeline = device.createGraphicsPipeline(nullptr, pipelineInfo); + } + + void createFramebuffers() { + // This is only called if the Best Practices profile is not supported + // or if dynamic rendering is not available + swapChainFramebuffers.reserve(swapChainImageViews.size()); + + for (size_t i = 0; i < swapChainImageViews.size(); i++) { + std::array attachments = { + *colorImageView, + *depthImageView, + *swapChainImageViews[i] + }; + + vk::FramebufferCreateInfo framebufferInfo{ + .renderPass = *renderPass, + .attachmentCount = static_cast(attachments.size()), + .pAttachments = attachments.data(), + .width = swapChainExtent.width, + .height = swapChainExtent.height, + .layers = 1 + }; + + swapChainFramebuffers.push_back(device.createFramebuffer(framebufferInfo)); + } + } + + void createCommandPool() { + QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice); + + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = queueFamilyIndices.graphicsFamily.value() + }; + + commandPool = device.createCommandPool(poolInfo); + } + + void createColorResources() { + vk::Format colorFormat = swapChainImageFormat; + + createImage(swapChainExtent.width, swapChainExtent.height, 1, msaaSamples, colorFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eTransientAttachment | vk::ImageUsageFlagBits::eColorAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, colorImage, colorImageMemory); + colorImageView = createImageView(*colorImage, colorFormat, vk::ImageAspectFlagBits::eColor, 1); + } + + void createDepthResources() { + vk::Format depthFormat = findDepthFormat(); + + createImage(swapChainExtent.width, swapChainExtent.height, 1, msaaSamples, depthFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, depthImage, depthImageMemory); + depthImageView = createImageView(*depthImage, depthFormat, vk::ImageAspectFlagBits::eDepth, 1); + } + + vk::Format findSupportedFormat(const std::vector& candidates, vk::ImageTiling tiling, vk::FormatFeatureFlags features) { + for (vk::Format format : candidates) { + vk::FormatProperties props = physicalDevice.getFormatProperties(format); + + if (tiling == vk::ImageTiling::eLinear && (props.linearTilingFeatures & features) == features) { + return format; + } else if (tiling == vk::ImageTiling::eOptimal && (props.optimalTilingFeatures & features) == features) { + return format; + } + } + + throw std::runtime_error("failed to find supported format!"); + } + + vk::Format findDepthFormat() { + return findSupportedFormat( + {vk::Format::eD32Sfloat, vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint}, + vk::ImageTiling::eOptimal, + vk::FormatFeatureFlagBits::eDepthStencilAttachment + ); + } + + bool hasStencilComponent(vk::Format format) { + return format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint; + } + + void createTextureImage() { + int texWidth, texHeight, texChannels; + stbi_uc* pixels = stbi_load(TEXTURE_PATH.c_str(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha); + vk::DeviceSize imageSize = texWidth * texHeight * 4; + uint32_t mipLevels = static_cast(std::floor(std::log2(std::max(texWidth, texHeight)))) + 1; + + if (!pixels) { + throw std::runtime_error("failed to load texture image!"); + } + + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, pixels, static_cast(imageSize)); + stagingBufferMemory.unmapMemory(); + + stbi_image_free(pixels); + + createImage(texWidth, texHeight, mipLevels, vk::SampleCountFlagBits::e1, vk::Format::eR8G8B8A8Srgb, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eTransferSrc | vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, vk::MemoryPropertyFlagBits::eDeviceLocal, textureImage, textureImageMemory); + + transitionImageLayout(*textureImage, vk::Format::eR8G8B8A8Srgb, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal, mipLevels); + copyBufferToImage(*stagingBuffer, *textureImage, static_cast(texWidth), static_cast(texHeight)); + + generateMipmaps(*textureImage, vk::Format::eR8G8B8A8Srgb, texWidth, texHeight, mipLevels); + } + + void generateMipmaps(vk::Image image, vk::Format imageFormat, int32_t texWidth, int32_t texHeight, uint32_t mipLevels) { + vk::FormatProperties formatProperties = physicalDevice.getFormatProperties(imageFormat); + + if (!(formatProperties.optimalTilingFeatures & vk::FormatFeatureFlagBits::eSampledImageFilterLinear)) { + throw std::runtime_error("texture image format does not support linear blitting!"); + } + + vk::raii::CommandBuffer commandBuffer = beginSingleTimeCommands(); + + vk::ImageMemoryBarrier barrier{ + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1, + } + }; + + int32_t mipWidth = texWidth; + int32_t mipHeight = texHeight; + + for (uint32_t i = 1; i < mipLevels; i++) { + barrier.subresourceRange.baseMipLevel = i - 1; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferRead; + + commandBuffer.pipelineBarrier( + vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eTransfer, + {}, + std::array{}, + std::array{}, + std::array{barrier}); + + vk::ImageBlit blit{ + .srcSubresource = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = i - 1, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .srcOffsets = std::array{ + vk::Offset3D{0, 0, 0}, + vk::Offset3D{mipWidth, mipHeight, 1} + }, + .dstSubresource = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = i, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .dstOffsets = std::array{ + vk::Offset3D{0, 0, 0}, + vk::Offset3D{mipWidth > 1 ? mipWidth / 2 : 1, mipHeight > 1 ? mipHeight / 2 : 1, 1} + } + }; + + commandBuffer.blitImage( + image, vk::ImageLayout::eTransferSrcOptimal, + image, vk::ImageLayout::eTransferDstOptimal, + std::array{blit}, + vk::Filter::eLinear); + + barrier.oldLayout = vk::ImageLayout::eTransferSrcOptimal; + barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferRead; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + commandBuffer.pipelineBarrier( + vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eFragmentShader, + {}, + std::array{}, + std::array{}, + std::array{barrier}); + + if (mipWidth > 1) mipWidth /= 2; + if (mipHeight > 1) mipHeight /= 2; + } + + barrier.subresourceRange.baseMipLevel = mipLevels - 1; + barrier.oldLayout = vk::ImageLayout::eTransferDstOptimal; + barrier.newLayout = vk::ImageLayout::eShaderReadOnlyOptimal; + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + commandBuffer.pipelineBarrier( + vk::PipelineStageFlagBits::eTransfer, + vk::PipelineStageFlagBits::eFragmentShader, + {}, + std::array{}, + std::array{}, + std::array{barrier}); + + endSingleTimeCommands(commandBuffer); + } + + vk::raii::ImageView createImageView(vk::Image image, vk::Format format, vk::ImageAspectFlags aspectFlags, uint32_t mipLevels) { + vk::ImageViewCreateInfo viewInfo{ + .image = image, + .viewType = vk::ImageViewType::e2D, + .format = format, + .subresourceRange = { + .aspectMask = aspectFlags, + .baseMipLevel = 0, + .levelCount = mipLevels, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + return device.createImageView(viewInfo); + } + + void createTextureImageView() { + textureImageView = createImageView(*textureImage, vk::Format::eR8G8B8A8Srgb, vk::ImageAspectFlagBits::eColor, 1); + } + + void createTextureSampler() { + vk::PhysicalDeviceProperties properties = physicalDevice.getProperties(); + + vk::SamplerCreateInfo samplerInfo{ + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .mipLodBias = 0.0f, + .anisotropyEnable = VK_TRUE, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .compareEnable = VK_FALSE, + .compareOp = vk::CompareOp::eAlways, + .minLod = 0.0f, + .maxLod = 0.0f, + .borderColor = vk::BorderColor::eIntOpaqueBlack, + .unnormalizedCoordinates = VK_FALSE + }; + + textureSampler = device.createSampler(samplerInfo); + } + + void createVertexBuffer() { + vk::DeviceSize bufferSize = sizeof(vertices[0]) * vertices.size(); + + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, vertices.data(), (size_t) bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eVertexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, vertexBuffer, vertexBufferMemory); + + copyBuffer(*stagingBuffer, *vertexBuffer, bufferSize); + } + + void createIndexBuffer() { + vk::DeviceSize bufferSize = sizeof(indices[0]) * indices.size(); + + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, indices.data(), (size_t) bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, indexBuffer, indexBufferMemory); + + copyBuffer(*stagingBuffer, *indexBuffer, bufferSize); + } + + void createUniformBuffers() { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + + // Reserve space but don't resize, as RAII objects can't be default-constructed + uniformBuffers.reserve(MAX_FRAMES_IN_FLIGHT); + uniformBuffersMemory.reserve(MAX_FRAMES_IN_FLIGHT); + uniformBuffersMapped.resize(MAX_FRAMES_IN_FLIGHT); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::raii::Buffer buffer = nullptr; + vk::raii::DeviceMemory bufferMemory = nullptr; + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, buffer, bufferMemory); + + uniformBuffers.push_back(std::move(buffer)); + uniformBuffersMemory.push_back(std::move(bufferMemory)); + uniformBuffersMapped[i] = uniformBuffersMemory[i].mapMemory(0, bufferSize); + } + } + + void createDescriptorPool() { + std::array poolSizes{}; + poolSizes[0].type = vk::DescriptorType::eUniformBuffer; + poolSizes[0].descriptorCount = static_cast(MAX_FRAMES_IN_FLIGHT); + poolSizes[1].type = vk::DescriptorType::eCombinedImageSampler; + poolSizes[1].descriptorCount = static_cast(MAX_FRAMES_IN_FLIGHT); + + vk::DescriptorPoolCreateInfo poolInfo{ + .flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + .maxSets = static_cast(MAX_FRAMES_IN_FLIGHT), + .poolSizeCount = static_cast(poolSizes.size()), + .pPoolSizes = poolSizes.data() + }; + + descriptorPool = device.createDescriptorPool(poolInfo); + } + + void createDescriptorSets() { + std::vector layouts(MAX_FRAMES_IN_FLIGHT, *descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = *descriptorPool, + .descriptorSetCount = static_cast(MAX_FRAMES_IN_FLIGHT), + .pSetLayouts = layouts.data() + }; + + descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = *uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + + vk::DescriptorImageInfo imageInfo{ + .sampler = *textureSampler, + .imageView = *textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + + std::array descriptorWrites{}; + + descriptorWrites[0].dstSet = *descriptorSets[i]; + descriptorWrites[0].dstBinding = 0; + descriptorWrites[0].dstArrayElement = 0; + descriptorWrites[0].descriptorType = vk::DescriptorType::eUniformBuffer; + descriptorWrites[0].descriptorCount = 1; + descriptorWrites[0].pBufferInfo = &bufferInfo; + + descriptorWrites[1].dstSet = *descriptorSets[i]; + descriptorWrites[1].dstBinding = 1; + descriptorWrites[1].dstArrayElement = 0; + descriptorWrites[1].descriptorType = vk::DescriptorType::eCombinedImageSampler; + descriptorWrites[1].descriptorCount = 1; + descriptorWrites[1].pImageInfo = &imageInfo; + + device.updateDescriptorSets(descriptorWrites, nullptr); + } + } + + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) { + vk::BufferCreateInfo bufferInfo{ + .size = size, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive + }; + + buffer = device.createBuffer(bufferInfo); + + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + + bufferMemory = device.allocateMemory(allocInfo); + buffer.bindMemory(*bufferMemory, 0); + } + + void copyBuffer(vk::Buffer srcBuffer, vk::Buffer dstBuffer, vk::DeviceSize size) { + vk::raii::CommandBuffer commandBuffer = beginSingleTimeCommands(); + + vk::BufferCopy copyRegion{ + .size = size + }; + commandBuffer.copyBuffer(srcBuffer, dstBuffer, copyRegion); + + endSingleTimeCommands(commandBuffer); + } + + void copyBufferToImage(vk::Buffer buffer, vk::Image image, uint32_t width, uint32_t height) { + vk::raii::CommandBuffer commandBuffer = beginSingleTimeCommands(); + + vk::BufferImageCopy region{ + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .imageOffset = {0, 0, 0}, + .imageExtent = { + width, + height, + 1 + } + }; + + commandBuffer.copyBufferToImage(buffer, image, vk::ImageLayout::eTransferDstOptimal, region); + + endSingleTimeCommands(commandBuffer); + } + + void createImage(uint32_t width, uint32_t height, uint32_t mipLevels, vk::SampleCountFlagBits numSamples, vk::Format format, vk::ImageTiling tiling, vk::ImageUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Image& image, vk::raii::DeviceMemory& imageMemory) { + vk::ImageCreateInfo imageInfo{ + .imageType = vk::ImageType::e2D, + .format = format, + .extent = { + .width = width, + .height = height, + .depth = 1 + }, + .mipLevels = mipLevels, + .arrayLayers = 1, + .samples = numSamples, + .tiling = tiling, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive, + .initialLayout = vk::ImageLayout::eUndefined + }; + + image = device.createImage(imageInfo); + + vk::MemoryRequirements memRequirements = image.getMemoryRequirements(); + + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + + imageMemory = device.allocateMemory(allocInfo); + image.bindMemory(*imageMemory, 0); + } + + void transitionImageLayout(vk::Image image, vk::Format format, vk::ImageLayout oldLayout, vk::ImageLayout newLayout, uint32_t mipLevels) { + vk::raii::CommandBuffer commandBuffer = beginSingleTimeCommands(); + + vk::ImageMemoryBarrier barrier{ + .oldLayout = oldLayout, + .newLayout = newLayout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = image, + .subresourceRange = { + .baseMipLevel = 0, + .levelCount = mipLevels, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + if (newLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal) { + barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eDepth; + + if (hasStencilComponent(format)) { + barrier.subresourceRange.aspectMask |= vk::ImageAspectFlagBits::eStencil; + } + } else { + barrier.subresourceRange.aspectMask = vk::ImageAspectFlagBits::eColor; + } + + vk::PipelineStageFlags sourceStage; + vk::PipelineStageFlags destinationStage; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eNone; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + sourceStage = vk::PipelineStageFlagBits::eTransfer; + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + } else if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eDepthStencilAttachmentOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eNone; + barrier.dstAccessMask = vk::AccessFlagBits::eDepthStencilAttachmentRead | vk::AccessFlagBits::eDepthStencilAttachmentWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eEarlyFragmentTests; + } else { + throw std::invalid_argument("unsupported layout transition!"); + } + + commandBuffer.pipelineBarrier( + sourceStage, + destinationStage, + {}, + std::array{}, + std::array{}, + std::array{barrier} + ); + + endSingleTimeCommands(commandBuffer); + } + + vk::raii::CommandBuffer beginSingleTimeCommands() { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + + vk::raii::CommandBuffer commandBuffer = std::move(device.allocateCommandBuffers(allocInfo).front()); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + + commandBuffer.begin(beginInfo); + + return commandBuffer; + } + + void endSingleTimeCommands(vk::raii::CommandBuffer& commandBuffer) { + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffer + }; + + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + void loadModel() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + std::string warn, err; + + if (!tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, MODEL_PATH.c_str())) { + throw std::runtime_error(warn + err); + } + + std::unordered_map uniqueVertices{}; + + for (const auto& shape : shapes) { + for (const auto& index : shape.mesh.indices) { + Vertex vertex{}; + + vertex.pos = { + attrib.vertices[3 * index.vertex_index + 0], + attrib.vertices[3 * index.vertex_index + 1], + attrib.vertices[3 * index.vertex_index + 2] + }; + + vertex.texCoord = { + attrib.texcoords[2 * index.texcoord_index + 0], + 1.0f - attrib.texcoords[2 * index.texcoord_index + 1] + }; + + vertex.color = {1.0f, 1.0f, 1.0f}; + + if (uniqueVertices.count(vertex) == 0) { + uniqueVertices[vertex] = static_cast(vertices.size()); + vertices.push_back(vertex); + } + + indices.push_back(uniqueVertices[vertex]); + } + } + } + + void createCommandBuffers() { + commandBuffers.reserve(MAX_FRAMES_IN_FLIGHT); + + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = static_cast(MAX_FRAMES_IN_FLIGHT) + }; + + commandBuffers = device.allocateCommandBuffers(allocInfo); + } + + void recordCommandBuffer(uint32_t imageIndex) { + commandBuffers[currentFrame].begin({}); + + // Transition the swapchain image to the correct layout for rendering + vk::ImageMemoryBarrier imageBarrier{ + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + commandBuffers[currentFrame].pipelineBarrier( + vk::PipelineStageFlagBits::eTopOfPipe, + vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::DependencyFlagBits::eByRegion, + std::array{}, + std::array{}, + std::array{imageBarrier} + ); + + // Clear values for color and depth + vk::ClearValue clearColor{}; + clearColor.color = vk::ClearColorValue(std::array{0.0f, 0.0f, 0.0f, 1.0f}); + + vk::ClearValue clearDepth{}; + clearDepth.depthStencil = vk::ClearDepthStencilValue{1.0f, 0}; + + std::array clearValues = {clearColor, clearDepth}; + + // Use different rendering approach based on profile support + if (appInfo.profileSupported) { + // Use dynamic rendering with the KHR roadmap 2022 profile + vk::RenderingAttachmentInfo colorAttachment{ + .imageView = *colorImageView, + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .resolveMode = vk::ResolveModeFlagBits::eAverage, + .resolveImageView = *swapChainImageViews[imageIndex], + .resolveImageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + + vk::RenderingAttachmentInfo depthAttachment{ + .imageView = *depthImageView, + .imageLayout = vk::ImageLayout::eDepthStencilAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eDontCare, + .clearValue = clearDepth + }; + + vk::RenderingInfo renderingInfo{ + .renderArea = {{0, 0}, swapChainExtent}, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachment, + .pDepthAttachment = &depthAttachment + }; + + commandBuffers[currentFrame].beginRendering(renderingInfo); + + } else { + // Use traditional render pass if not using the KHR roadmap 2022 profile + vk::RenderPassBeginInfo renderPassInfo{ + .renderPass = *renderPass, + .framebuffer = *swapChainFramebuffers[imageIndex], + .renderArea = {{0, 0}, swapChainExtent}, + .clearValueCount = static_cast(clearValues.size()), + .pClearValues = clearValues.data() + }; + + commandBuffers[currentFrame].beginRenderPass(renderPassInfo, vk::SubpassContents::eInline); + + } + + commandBuffers[currentFrame].bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + + vk::Viewport viewport{ + .x = 0.0f, + .y = 0.0f, + .width = static_cast(swapChainExtent.width), + .height = static_cast(swapChainExtent.height), + .minDepth = 0.0f, + .maxDepth = 1.0f + }; + commandBuffers[currentFrame].setViewport(0, viewport); + + vk::Rect2D scissor{ + .offset = {0, 0}, + .extent = swapChainExtent + }; + commandBuffers[currentFrame].setScissor(0, scissor); + + commandBuffers[currentFrame].bindVertexBuffers(0, *vertexBuffer, {0}); + commandBuffers[currentFrame].bindIndexBuffer(*indexBuffer, 0, vk::IndexType::eUint32); + commandBuffers[currentFrame].bindDescriptorSets(vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, *descriptorSets[currentFrame], nullptr); + commandBuffers[currentFrame].drawIndexed(static_cast(indices.size()), 1, 0, 0, 0); + + if (appInfo.profileSupported) { + commandBuffers[currentFrame].endRendering(); + + // Transition the swapchain image to the correct layout for presentation + vk::ImageMemoryBarrier barrier{ + .srcAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .dstAccessMask = vk::AccessFlagBits::eNone, + .oldLayout = vk::ImageLayout::eColorAttachmentOptimal, + .newLayout = vk::ImageLayout::ePresentSrcKHR, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + commandBuffers[currentFrame].pipelineBarrier( + vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::PipelineStageFlagBits::eBottomOfPipe, + vk::DependencyFlagBits::eByRegion, + std::array{}, + std::array{}, + std::array{barrier} + ); + } else { + commandBuffers[currentFrame].endRenderPass(); + // Traditional render pass already transitions the image to the correct layout + } + + commandBuffers[currentFrame].end(); + } + + void createSyncObjects() { + imageAvailableSemaphores.reserve(MAX_FRAMES_IN_FLIGHT); + renderFinishedSemaphores.reserve(MAX_FRAMES_IN_FLIGHT); + inFlightFences.reserve(MAX_FRAMES_IN_FLIGHT); + presentCompleteSemaphore.reserve(swapChainImages.size()); + + vk::SemaphoreCreateInfo semaphoreInfo{}; + vk::FenceCreateInfo fenceInfo{ + .flags = vk::FenceCreateFlagBits::eSignaled + }; + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + imageAvailableSemaphores.push_back(device.createSemaphore(semaphoreInfo)); + renderFinishedSemaphores.push_back(device.createSemaphore(semaphoreInfo)); + inFlightFences.push_back(device.createFence(fenceInfo)); + } + + for (size_t i = 0; i < swapChainImages.size(); i++) { + presentCompleteSemaphore.push_back(device.createSemaphore(semaphoreInfo)); + } + } + + void updateUniformBuffer(uint32_t currentImage) { + static auto startTime = std::chrono::high_resolution_clock::now(); + + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + + UniformBufferObject ubo{}; + ubo.model = glm::rotate(glm::mat4(1.0f), time * glm::radians(90.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.view = glm::lookAt(glm::vec3(2.0f, 2.0f, 2.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.proj = glm::perspective(glm::radians(45.0f), swapChainExtent.width / (float) swapChainExtent.height, 0.1f, 10.0f); + ubo.proj[1][1] *= -1; + + memcpy(uniformBuffersMapped[currentImage], &ubo, sizeof(ubo)); + } + + void drawFrame() { + static_cast(device.waitForFences({*inFlightFences[currentFrame]}, VK_TRUE, FenceTimeout)); + + uint32_t imageIndex; + try { + auto [result, idx] = swapChain.acquireNextImage(FenceTimeout, *imageAvailableSemaphores[currentFrame]); + imageIndex = idx; + } catch (vk::OutOfDateKHRError&) { + recreateSwapChain(); + return; + } + + updateUniformBuffer(currentFrame); + + device.resetFences({*inFlightFences[currentFrame]}); + + commandBuffers[currentFrame].reset(); + recordCommandBuffer(imageIndex); + + vk::PipelineStageFlags waitDestinationStageMask(vk::PipelineStageFlagBits::eColorAttachmentOutput); + const vk::SubmitInfo submitInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*imageAvailableSemaphores[currentFrame], + .pWaitDstStageMask = &waitDestinationStageMask, + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*presentCompleteSemaphore[imageIndex] + }; + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + + const vk::PresentInfoKHR presentInfoKHR{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*presentCompleteSemaphore[imageIndex], + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + + vk::Result result; + try { + result = presentQueue.presentKHR(presentInfoKHR); + } catch (vk::OutOfDateKHRError&) { + result = vk::Result::eErrorOutOfDateKHR; + } + + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("failed to present swap chain image!"); + } + + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + + vk::SampleCountFlagBits getMaxUsableSampleCount() { + vk::PhysicalDeviceProperties physicalDeviceProperties = physicalDevice.getProperties(); + + vk::SampleCountFlags counts = physicalDeviceProperties.limits.framebufferColorSampleCounts & physicalDeviceProperties.limits.framebufferDepthSampleCounts; + if (counts & vk::SampleCountFlagBits::e64) { return vk::SampleCountFlagBits::e64; } + if (counts & vk::SampleCountFlagBits::e32) { return vk::SampleCountFlagBits::e32; } + if (counts & vk::SampleCountFlagBits::e16) { return vk::SampleCountFlagBits::e16; } + if (counts & vk::SampleCountFlagBits::e8) { return vk::SampleCountFlagBits::e8; } + if (counts & vk::SampleCountFlagBits::e4) { return vk::SampleCountFlagBits::e4; } + if (counts & vk::SampleCountFlagBits::e2) { return vk::SampleCountFlagBits::e2; } + + return vk::SampleCountFlagBits::e1; + } + + uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("failed to find suitable memory type!"); + } + + vk::SurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector& availableFormats) { + return (availableFormats[0].format == vk::Format::eUndefined) + ? vk::SurfaceFormatKHR{vk::Format::eB8G8R8A8Unorm, availableFormats[0].colorSpace} + : availableFormats[0]; + } + + vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + return std::ranges::any_of(availablePresentModes, + [](const vk::PresentModeKHR value) { return vk::PresentModeKHR::eMailbox == value; } ) ? vk::PresentModeKHR::eMailbox : vk::PresentModeKHR::eFifo; + } + + vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } + int width, height; + glfwGetFramebufferSize(window, &width, &height); + + return { + std::clamp(width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width), + std::clamp(height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height) + }; + } + + std::vector getRequiredExtensions() { + // Get the required extensions from GLFW + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + std::vector extensions(glfwExtensions, glfwExtensions + glfwExtensionCount); + + // Check if the debug utils extension is available + std::vector props = context.enumerateInstanceExtensionProperties(); + bool debugUtilsAvailable = std::ranges::any_of(props, + [](vk::ExtensionProperties const & ep) { + return strcmp(ep.extensionName, vk::EXTDebugUtilsExtensionName) == 0; + }); + + // Always include the debug utils extension if available + // This allows validation layers to be enabled via vulkanconfig + if (debugUtilsAvailable) { + extensions.push_back(vk::EXTDebugUtilsExtensionName); + } else { + std::cout << "VK_EXT_debug_utils extension not available. Validation layers may not work." << std::endl; + } + + return extensions; + } + + static VKAPI_ATTR vk::Bool32 VKAPI_CALL debugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT severity, vk::DebugUtilsMessageTypeFlagsEXT type, const vk::DebugUtilsMessengerCallbackDataEXT* pCallbackData, void*) { + if (severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eError || severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { + std::cerr << "validation layer: type " << to_string(type) << " msg: " << pCallbackData->pMessage << std::endl; + } + + return vk::False; + } + + vk::raii::ShaderModule createShaderModule(const std::vector& code) { + vk::ShaderModuleCreateInfo createInfo{ .codeSize = code.size(), .pCode = reinterpret_cast(code.data()) }; + vk::raii::ShaderModule shaderModule{ device, createInfo }; + + return shaderModule; + } + + static std::vector readFile(const std::string& filename) { + std::ifstream file(filename, std::ios::ate | std::ios::binary); + + if (!file.is_open()) { + throw std::runtime_error("failed to open file!"); + } + std::vector buffer(file.tellg()); + file.seekg(0, std::ios::beg); + file.read(buffer.data(), static_cast(buffer.size())); + file.close(); + + return buffer; + } + + SwapChainSupportDetails querySwapChainSupport(vk::raii::PhysicalDevice device) { + SwapChainSupportDetails details; + details.capabilities = device.getSurfaceCapabilitiesKHR(*surface); + details.formats = device.getSurfaceFormatsKHR(*surface); + details.presentModes = device.getSurfacePresentModesKHR(*surface); + + return details; + } + + QueueFamilyIndices findQueueFamilies(vk::raii::PhysicalDevice device) { + QueueFamilyIndices indices; + + std::vector queueFamilies = device.getQueueFamilyProperties(); + + uint32_t i = 0; + for (const auto& queueFamily : queueFamilies) { + if (queueFamily.queueFlags & vk::QueueFlagBits::eGraphics) { + indices.graphicsFamily = i; + } + + vk::Bool32 presentSupport = device.getSurfaceSupportKHR(i, *surface); + + if (presentSupport) { + indices.presentFamily = i; + } + + if (indices.isComplete()) { + break; + } + + i++; + } + + return indices; + } +}; + +int main() { + try { + HelloTriangleApplication app; + app.run(); + } catch (const std::exception& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/attachments/34_android.cpp b/attachments/34_android.cpp new file mode 100644 index 00000000..3e74c805 --- /dev/null +++ b/attachments/34_android.cpp @@ -0,0 +1,1763 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include +#if defined(__ANDROID__) +#include +#include +#endif +#include + +// Platform detection +#if defined(__ANDROID__) + #define PLATFORM_ANDROID 1 +#else + #define PLATFORM_DESKTOP 1 +#endif + + +#define STB_IMAGE_IMPLEMENTATION +#include + +#define TINYOBJLOADER_IMPLEMENTATION +#include + +// Platform-specific includes +#if PLATFORM_ANDROID + // Android-specific includes + #include + #include + #include + #include + + // Declare and implement app_dummy function from native_app_glue + extern "C" void app_dummy() { + // This is a dummy function that does nothing + // It's used to prevent the linker from stripping out the native_app_glue code + } + + // Define AAssetManager type for Android + typedef AAssetManager AssetManagerType; + + // Define logging macros for Android + #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "VulkanTutorial", __VA_ARGS__)) + #define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "VulkanTutorial", __VA_ARGS__)) + #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "VulkanTutorial", __VA_ARGS__)) + #define LOG_INFO(msg) LOGI("%s", msg) + #define LOG_ERROR(msg) LOGE("%s", msg) +#else + // Define AAssetManager type for non-Android platforms + typedef void AssetManagerType; + // Desktop-specific includes + #define GLFW_INCLUDE_VULKAN + #include + + // Define logging macros for Desktop + #define LOGI(...) printf(__VA_ARGS__); printf("\n") + #define LOGW(...) printf(__VA_ARGS__); printf("\n") + #define LOGE(...) fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n") + #define LOG_INFO(msg) std::cout << msg << std::endl + #define LOG_ERROR(msg) std::cerr << msg << std::endl +#endif + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_ENABLE_EXPERIMENTAL +#define GLM_FORCE_CXX11 +#include +#include +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +const std::string MODEL_PATH = "models/viking_room.obj"; +const std::string TEXTURE_PATH = "textures/viking_room.png"; +constexpr int MAX_FRAMES_IN_FLIGHT = 2; + +#if PLATFORM_ANDROID +// Define VpProfileProperties structure if not already defined +#ifndef VP_PROFILE_PROPERTIES_DEFINED +#define VP_PROFILE_PROPERTIES_DEFINED +struct VpProfileProperties { + char name[256]; + uint32_t specVersion; +}; +#endif + +// Define Vulkan Profile constants +#ifndef VP_KHR_ROADMAP_2022_NAME +#define VP_KHR_ROADMAP_2022_NAME "VP_KHR_roadmap_2022" +#endif + +#ifndef VP_KHR_ROADMAP_2022_SPEC_VERSION +#define VP_KHR_ROADMAP_2022_SPEC_VERSION 1 +#endif +#endif + +// Application info structure to store profile support flags +struct AppInfo { + bool profileSupported = false; + VpProfileProperties profile; +}; + +struct Vertex { + glm::vec3 pos; + glm::vec3 color; + glm::vec2 texCoord; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Vertex), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, pos) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, color) ), + vk::VertexInputAttributeDescription( 2, 0, vk::Format::eR32G32Sfloat, offsetof(Vertex, texCoord) ) + }; + } + + bool operator==(const Vertex& other) const { + return pos == other.pos && color == other.color && texCoord == other.texCoord; + } +}; + +template<> struct std::hash { + size_t operator()(Vertex const& vertex) const noexcept { + return ((hash()(vertex.pos) ^ (hash()(vertex.color) << 1)) >> 1) ^ (hash()(vertex.texCoord) << 1); + } +}; + +struct UniformBufferObject { + alignas(16) glm::mat4 model; + alignas(16) glm::mat4 view; + alignas(16) glm::mat4 proj; +}; + +// Cross-platform file reading function +std::vector readFile(const std::string& filename, std::optional assetManager = std::nullopt) { +#if PLATFORM_ANDROID + // On Android, use asset manager if provided + if (assetManager.has_value() && *assetManager != nullptr) { + // Open the asset + AAsset* asset = AAssetManager_open(*assetManager, filename.c_str(), AASSET_MODE_BUFFER); + if (!asset) { + LOGE("Failed to open asset: %s", filename.c_str()); + throw std::runtime_error("Failed to open file: " + filename); + } + + // Get the file size + off_t fileSize = AAsset_getLength(asset); + std::vector buffer(fileSize); + + // Read the file data + AAsset_read(asset, buffer.data(), fileSize); + + // Close the asset + AAsset_close(asset); + + return buffer; + } +#endif + + // Desktop version or Android fallback to filesystem + std::ifstream file(filename, std::ios::ate | std::ios::binary); + + if (!file.is_open()) { + throw std::runtime_error("Failed to open file: " + filename); + } + + size_t fileSize = static_cast(file.tellg()); + std::vector buffer(fileSize); + + file.seekg(0); + file.read(buffer.data(), fileSize); + file.close(); + + return buffer; +} + +// Cross-platform application class +class HelloTriangleApplication { +public: +#if PLATFORM_DESKTOP + // Desktop constructor + HelloTriangleApplication() { + // No Android-specific initialization needed + } +#else + // Android constructor + HelloTriangleApplication(android_app* app) : androidApp(app) { + androidApp->userData = this; + androidApp->onAppCmd = handleAppCommand; + // Note: onInputEvent is no longer a member of android_app in the current NDK version + // Input events are now handled differently + + // Get the asset manager + assetManager = androidApp->activity->assetManager; + } +#endif + + void run() { +#if PLATFORM_DESKTOP + // Desktop main loop + initWindow(); + initVulkan(); + mainLoop(); + cleanup(); +#else + // Android main loop is handled by Android + while (!initialized) { + // Wait for app to initialize + int events; + android_poll_source* source; + if (ALooper_pollOnce(0, nullptr, &events, (void**)&source) >= 0) { + if (source != nullptr) { + source->process(androidApp, source); + } + } + } +#endif + } + +#if PLATFORM_DESKTOP + // Initialize window (Desktop only) + void initWindow() { + glfwInit(); + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + glfwWindowHint(GLFW_RESIZABLE, GLFW_TRUE); + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Cross-Platform", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + glfwSetFramebufferSizeCallback(window, framebufferResizeCallback); + + LOG_INFO("Desktop window created"); + } + + // Desktop main loop + void mainLoop() { + while (!glfwWindowShouldClose(window)) { + glfwPollEvents(); + drawFrame(); + } + + device.waitIdle(); + } + + // Desktop framebuffer resize callback + static void framebufferResizeCallback(GLFWwindow* window, int, int) { + auto app = reinterpret_cast(glfwGetWindowUserPointer(window)); + app->framebufferResized = true; + } +#endif + + void cleanup() { + if (initialized) { + // Wait for device to finish operations + if (*device) { + device.waitIdle(); + } + + // Cleanup resources + cleanupSwapChain(); + + initialized = false; + } + } + +private: +#if PLATFORM_ANDROID + // Android-specific members + android_app* androidApp = nullptr; + AssetManagerType* assetManager = nullptr; +#else + // Desktop-specific members + GLFWwindow* window = nullptr; +#endif + bool initialized = false; + bool framebufferResized = false; + + // Vulkan objects + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::raii::Device device = nullptr; + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::Format swapChainImageFormat = {}; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + vk::raii::RenderPass renderPass = nullptr; + vk::raii::DescriptorSetLayout descriptorSetLayout = nullptr; + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + std::vector swapChainFramebuffers; + vk::raii::CommandPool commandPool = nullptr; + std::vector commandBuffers; + vk::raii::Buffer vertexBuffer = nullptr; + vk::raii::DeviceMemory vertexBufferMemory = nullptr; + vk::raii::Buffer indexBuffer = nullptr; + vk::raii::DeviceMemory indexBufferMemory = nullptr; + vk::raii::Image textureImage = nullptr; + vk::raii::DeviceMemory textureImageMemory = nullptr; + vk::raii::ImageView textureImageView = nullptr; + vk::raii::Sampler textureSampler = nullptr; + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + vk::raii::DescriptorPool descriptorPool = nullptr; + std::vector descriptorSets; + std::vector imageAvailableSemaphores; + std::vector renderFinishedSemaphores; + std::vector inFlightFences; + uint32_t currentFrame = 0; + + // Application info + AppInfo appInfo; + + // Model data + std::vector vertices; + std::vector indices; + + // Queue family indices + struct QueueFamilyIndices { + std::optional graphicsFamily; + std::optional presentFamily; + + bool isComplete() const { + return graphicsFamily.has_value() && presentFamily.has_value(); + } + }; + + // Swap chain support details + struct SwapChainSupportDetails { + vk::SurfaceCapabilitiesKHR capabilities; + std::vector formats; + std::vector presentModes; + }; + + // Required device extensions + const std::vector deviceExtensions = { + VK_KHR_SWAPCHAIN_EXTENSION_NAME + }; + + // Initialize Vulkan + void initVulkan() { + createInstance(); + createSurface(); + pickPhysicalDevice(); + checkFeatureSupport(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + createRenderPass(); + createDescriptorSetLayout(); + createGraphicsPipeline(); + createFramebuffers(); + createCommandPool(); + createTextureImage(); + createTextureImageView(); + createTextureSampler(); + loadModel(); + createVertexBuffer(); + createIndexBuffer(); + createUniformBuffers(); + createDescriptorPool(); + createDescriptorSets(); + createCommandBuffers(); + createSyncObjects(); + + initialized = true; + } + + // Create Vulkan instance + void createInstance() { + // Application info + vk::ApplicationInfo appInfo{ + .pApplicationName = "Vulkan Android", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_3 + }; + + // Get required extensions + std::vector extensions = getRequiredExtensions(); + + // Create instance + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() + }; + + instance = vk::raii::Instance(context, createInfo); + LOGI("Vulkan instance created"); + } + + // Create platform-specific surface + void createSurface() { + VkSurfaceKHR _surface; + +#if PLATFORM_ANDROID + // Create Android surface + VkAndroidSurfaceCreateInfoKHR createInfo = { + .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + .pNext = nullptr, + .flags = 0, + .window = androidApp->window + }; + + VkResult result = vkCreateAndroidSurfaceKHR( + *instance, + &createInfo, + nullptr, + &_surface + ); + + if (result != VK_SUCCESS) { + throw std::runtime_error("Failed to create Android surface"); + } + + LOG_INFO("Android surface created"); +#else + // Create desktop surface using GLFW + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != 0) { + throw std::runtime_error("Failed to create window surface"); + } + + LOG_INFO("Desktop surface created"); +#endif + + surface = vk::raii::SurfaceKHR(instance, _surface); + } + + // Pick physical device + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&](auto const& device) { + // Check if any of the queue families support graphics operations + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of(queueFamilies, [](auto const& qfp) { return !!(qfp.queueFlags & vk::QueueFlagBits::eGraphics); }); + + // Check if all required device extensions are available + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of(deviceExtensions, + [&availableDeviceExtensions](auto const& requiredDeviceExtension) { + return std::ranges::any_of(availableDeviceExtensions, + [requiredDeviceExtension](auto const& availableDeviceExtension) { + return strcmp(availableDeviceExtension.extensionName, requiredDeviceExtension) == 0; + }); + }); + + return supportsGraphics && supportsAllRequiredExtensions; + }); + + if (devIter != devices.end()) { + physicalDevice = *devIter; + + // Print device information + vk::PhysicalDeviceProperties deviceProperties = physicalDevice.getProperties(); + LOGI("Selected GPU: %s", deviceProperties.deviceName.data()); + } else { + throw std::runtime_error("Failed to find a suitable GPU"); + } + } + + // Check feature support + void checkFeatureSupport() { + // Define the KHR roadmap 2022 profile + appInfo.profile = { + VP_KHR_ROADMAP_2022_NAME, + VP_KHR_ROADMAP_2022_SPEC_VERSION + }; + + // Check if the profile is supported + VkBool32 supported = VK_FALSE; + +#ifdef PLATFORM_ANDROID + // Create a vp::ProfileDesc from our VpProfileProperties + vp::ProfileDesc profileDesc = { + appInfo.profile.name, + appInfo.profile.specVersion + }; + + // Use vp::GetProfileSupport instead of vpGetPhysicalDeviceProfileSupport + bool result = vp::GetProfileSupport( + *physicalDevice, // Pass the physical device directly + &profileDesc, // Pass the profile description + &supported // Output parameter for support status + ); +#else + VkResult vk_result = vpGetPhysicalDeviceProfileSupport( + *instance, + *physicalDevice, + &appInfo.profile, + &supported + ); + bool result = vk_result == VK_SUCCESS; +#endif + + if (result && supported == VK_TRUE) { + appInfo.profileSupported = true; + LOGI("Using KHR roadmap 2022 profile"); + } else { + appInfo.profileSupported = false; + LOGI("Falling back to traditional rendering (profile not supported)"); + } + } + + // Create logical device + void createLogicalDevice() { + QueueFamilyIndices indices = findQueueFamilies(physicalDevice); + + std::vector queueCreateInfos; + std::set uniqueQueueFamilies = {indices.graphicsFamily.value(), indices.presentFamily.value()}; + + float queuePriority = 1.0f; + for (uint32_t queueFamily : uniqueQueueFamilies) { + vk::DeviceQueueCreateInfo queueCreateInfo{ + .queueFamilyIndex = queueFamily, + .queueCount = 1, + .pQueuePriorities = &queuePriority + }; + queueCreateInfos.push_back(queueCreateInfo); + } + + if (appInfo.profileSupported) { + // Enable required features + vk::PhysicalDeviceFeatures2 features2; + vk::PhysicalDeviceFeatures deviceFeatures{}; + deviceFeatures.samplerAnisotropy = VK_TRUE; + deviceFeatures.sampleRateShading = VK_TRUE; + features2.features = deviceFeatures; + + // Enable dynamic rendering + vk::PhysicalDeviceDynamicRenderingFeatures dynamicRenderingFeatures; + dynamicRenderingFeatures.dynamicRendering = VK_TRUE; + features2.pNext = &dynamicRenderingFeatures; + + // Create a vk::DeviceCreateInfo with the required features + vk::DeviceCreateInfo vkDeviceCreateInfo{ + .pNext = &features2, + .queueCreateInfoCount = static_cast(queueCreateInfos.size()), + .pQueueCreateInfos = queueCreateInfos.data(), + .enabledExtensionCount = static_cast(deviceExtensions.size()), + .ppEnabledExtensionNames = deviceExtensions.data() + }; + + // Create the device with the vk::DeviceCreateInfo + device = vk::raii::Device(physicalDevice, vkDeviceCreateInfo); + } else { + // Fallback to manual device creation + vk::PhysicalDeviceFeatures deviceFeatures{}; + deviceFeatures.samplerAnisotropy = VK_TRUE; + deviceFeatures.sampleRateShading = VK_TRUE; + + vk::DeviceCreateInfo createInfo{ + .queueCreateInfoCount = static_cast(queueCreateInfos.size()), + .pQueueCreateInfos = queueCreateInfos.data(), + .enabledExtensionCount = static_cast(deviceExtensions.size()), + .ppEnabledExtensionNames = deviceExtensions.data(), + .pEnabledFeatures = &deviceFeatures + }; + + device = vk::raii::Device(physicalDevice, createInfo); + } + + graphicsQueue = device.getQueue(indices.graphicsFamily.value(), 0); + presentQueue = device.getQueue(indices.presentFamily.value(), 0); + } + + // Create swap chain + void createSwapChain() { + SwapChainSupportDetails swapChainSupport = querySwapChainSupport(physicalDevice); + + vk::SurfaceFormatKHR surfaceFormat = chooseSwapSurfaceFormat(swapChainSupport.formats); + vk::PresentModeKHR presentMode = chooseSwapPresentMode(swapChainSupport.presentModes); + vk::Extent2D extent = chooseSwapExtent(swapChainSupport.capabilities); + + uint32_t imageCount = swapChainSupport.capabilities.minImageCount + 1; + if (swapChainSupport.capabilities.maxImageCount > 0 && imageCount > swapChainSupport.capabilities.maxImageCount) { + imageCount = swapChainSupport.capabilities.maxImageCount; + } + + vk::SwapchainCreateInfoKHR createInfo{ + .surface = *surface, + .minImageCount = imageCount, + .imageFormat = surfaceFormat.format, + .imageColorSpace = surfaceFormat.colorSpace, + .imageExtent = extent, + .imageArrayLayers = 1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment + }; + + QueueFamilyIndices indices = findQueueFamilies(physicalDevice); + uint32_t queueFamilyIndices[] = {indices.graphicsFamily.value(), indices.presentFamily.value()}; + + if (indices.graphicsFamily != indices.presentFamily) { + createInfo.imageSharingMode = vk::SharingMode::eConcurrent; + createInfo.queueFamilyIndexCount = 2; + createInfo.pQueueFamilyIndices = queueFamilyIndices; + } else { + createInfo.imageSharingMode = vk::SharingMode::eExclusive; + } + + createInfo.preTransform = swapChainSupport.capabilities.currentTransform; + createInfo.compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque; + createInfo.presentMode = presentMode; + createInfo.clipped = VK_TRUE; + + swapChain = device.createSwapchainKHR(createInfo); + swapChainImages = swapChain.getImages(); + swapChainImageFormat = surfaceFormat.format; + swapChainExtent = extent; + } + + // Create image views + void createImageViews() { + swapChainImageViews.reserve(swapChainImages.size()); + + for (const auto& image : swapChainImages) { + vk::ImageViewCreateInfo createInfo{ + .image = image, + .viewType = vk::ImageViewType::e2D, + .format = swapChainImageFormat, + .components = { + .r = vk::ComponentSwizzle::eIdentity, + .g = vk::ComponentSwizzle::eIdentity, + .b = vk::ComponentSwizzle::eIdentity, + .a = vk::ComponentSwizzle::eIdentity + }, + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + swapChainImageViews.push_back(device.createImageView(createInfo)); + } + } + + // Create render pass + void createRenderPass() { + vk::AttachmentDescription colorAttachment{ + .format = swapChainImageFormat, + .samples = vk::SampleCountFlagBits::e1, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::ePresentSrcKHR + }; + + vk::AttachmentReference colorAttachmentRef{ + .attachment = 0, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + vk::SubpassDescription subpass{ + .pipelineBindPoint = vk::PipelineBindPoint::eGraphics, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachmentRef + }; + + vk::SubpassDependency dependency{ + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput, + .dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput, + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite + }; + + vk::RenderPassCreateInfo renderPassInfo{ + .attachmentCount = 1, + .pAttachments = &colorAttachment, + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = 1, + .pDependencies = &dependency + }; + + renderPass = device.createRenderPass(renderPassInfo); + } + + // Create descriptor set layout + void createDescriptorSetLayout() { + vk::DescriptorSetLayoutBinding uboLayoutBinding{ + .binding = 0, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eVertex + }; + + vk::DescriptorSetLayoutBinding samplerLayoutBinding{ + .binding = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .descriptorCount = 1, + .stageFlags = vk::ShaderStageFlagBits::eFragment + }; + + std::array bindings = {uboLayoutBinding, samplerLayoutBinding}; + + vk::DescriptorSetLayoutCreateInfo layoutInfo{ + .bindingCount = static_cast(bindings.size()), + .pBindings = bindings.data() + }; + + descriptorSetLayout = device.createDescriptorSetLayout(layoutInfo); + } + + // Create graphics pipeline + void createGraphicsPipeline() { + // Load shader code from asset files + LOGI("Loading shaders from assets"); + + // Load shader files using cross-platform function +#if PLATFORM_ANDROID + std::optional optionalAssetManager = assetManager; +#else + std::optional optionalAssetManager = std::nullopt; +#endif + std::vector vertShaderCode = readFile("shaders/vert.spv", optionalAssetManager); + std::vector fragShaderCode = readFile("shaders/frag.spv", optionalAssetManager); + + LOGI("Shaders loaded successfully"); + + // Create shader modules + vk::ShaderModuleCreateInfo vertShaderModuleInfo{ + .codeSize = vertShaderCode.size(), + .pCode = reinterpret_cast(vertShaderCode.data()) + }; + vk::raii::ShaderModule vertShaderModule = device.createShaderModule(vertShaderModuleInfo); + + vk::ShaderModuleCreateInfo fragShaderModuleInfo{ + .codeSize = fragShaderCode.size(), + .pCode = reinterpret_cast(fragShaderCode.data()) + }; + vk::raii::ShaderModule fragShaderModule = device.createShaderModule(fragShaderModuleInfo); + + // Create shader stages + vk::PipelineShaderStageCreateInfo shaderStages[] = { + { + .stage = vk::ShaderStageFlagBits::eVertex, + .module = *vertShaderModule, + .pName = "main" + }, + { + .stage = vk::ShaderStageFlagBits::eFragment, + .module = *fragShaderModule, + .pName = "main" + } + }; + + // Vertex input + auto bindingDescription = Vertex::getBindingDescription(); + auto attributeDescriptions = Vertex::getAttributeDescriptions(); + + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = &bindingDescription, + .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), + .pVertexAttributeDescriptions = attributeDescriptions.data() + }; + + // Input assembly + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = VK_FALSE + }; + + // Viewport and scissor + vk::PipelineViewportStateCreateInfo viewportState{ + .viewportCount = 1, + .scissorCount = 1 + }; + + // Rasterization + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = VK_FALSE, + .rasterizerDiscardEnable = VK_FALSE, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, + .frontFace = vk::FrontFace::eCounterClockwise, + .depthBiasEnable = VK_FALSE, + .lineWidth = 1.0f + }; + + // Multisampling + vk::PipelineMultisampleStateCreateInfo multisampling{ + .rasterizationSamples = vk::SampleCountFlagBits::e1, + .sampleShadingEnable = VK_FALSE + }; + + // Color blending + vk::PipelineColorBlendAttachmentState colorBlendAttachment{ + .blendEnable = VK_FALSE, + .colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA + }; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ + .logicOpEnable = VK_FALSE, + .logicOp = vk::LogicOp::eCopy, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachment + }; + + // Dynamic states + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + + vk::PipelineDynamicStateCreateInfo dynamicState{ + .dynamicStateCount = static_cast(dynamicStates.size()), + .pDynamicStates = dynamicStates.data() + }; + + // Pipeline layout + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ + .setLayoutCount = 1, + .pSetLayouts = &*descriptorSetLayout + }; + + pipelineLayout = device.createPipelineLayout(pipelineLayoutInfo); + + // Create the graphics pipeline + vk::GraphicsPipelineCreateInfo pipelineInfo{ + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pDepthStencilState = nullptr, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = *pipelineLayout, + .renderPass = *renderPass, + .subpass = 0 + }; + + // Create the pipeline + graphicsPipeline = device.createGraphicsPipeline(nullptr, pipelineInfo); + } + + // Create framebuffers + void createFramebuffers() { + swapChainFramebuffers.reserve(swapChainImageViews.size()); + + for (size_t i = 0; i < swapChainImageViews.size(); i++) { + vk::ImageView attachments[] = { + *swapChainImageViews[i] + }; + + vk::FramebufferCreateInfo framebufferInfo{ + .renderPass = *renderPass, + .attachmentCount = 1, + .pAttachments = attachments, + .width = swapChainExtent.width, + .height = swapChainExtent.height, + .layers = 1 + }; + + swapChainFramebuffers.push_back(device.createFramebuffer(framebufferInfo)); + } + } + + // Create command pool + void createCommandPool() { + QueueFamilyIndices queueFamilyIndices = findQueueFamilies(physicalDevice); + + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = queueFamilyIndices.graphicsFamily.value() + }; + + commandPool = device.createCommandPool(poolInfo); + } + + // Create texture image + void createTextureImage() { + // Load texture image + int texWidth, texHeight, texChannels; + stbi_uc* pixels = nullptr; + +#if PLATFORM_ANDROID + // Load image from Android assets + std::optional optionalAssetManager = assetManager; + std::vector imageData = readFile(TEXTURE_PATH, optionalAssetManager); + pixels = stbi_load_from_memory( + reinterpret_cast(imageData.data()), + static_cast(imageData.size()), + &texWidth, &texHeight, &texChannels, STBI_rgb_alpha + ); +#else + // Load image from filesystem + pixels = stbi_load(TEXTURE_PATH.c_str(), &texWidth, &texHeight, &texChannels, STBI_rgb_alpha); +#endif + + if (!pixels) { + throw std::runtime_error("Failed to load texture image: " + TEXTURE_PATH); + } + + LOG_INFO("Texture loaded successfully"); + + vk::DeviceSize imageSize = texWidth * texHeight * 4; + + // Create staging buffer + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + // Copy pixel data to staging buffer + void* data; + data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, pixels, static_cast(imageSize)); + stagingBufferMemory.unmapMemory(); + + // Free the pixel data + if (pixels != nullptr) { + stbi_image_free(pixels); + } + + // Create image + vk::ImageCreateInfo imageInfo{ + .imageType = vk::ImageType::e2D, + .format = vk::Format::eR8G8B8A8Srgb, + .extent = { + .width = static_cast(texWidth), + .height = static_cast(texHeight), + .depth = 1 + }, + .mipLevels = 1, + .arrayLayers = 1, + .samples = vk::SampleCountFlagBits::e1, + .tiling = vk::ImageTiling::eOptimal, + .usage = vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + .sharingMode = vk::SharingMode::eExclusive, + .initialLayout = vk::ImageLayout::eUndefined + }; + + textureImage = device.createImage(imageInfo); + + // Allocate memory for the image + vk::MemoryRequirements memRequirements = textureImage.getMemoryRequirements(); + + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, vk::MemoryPropertyFlagBits::eDeviceLocal) + }; + + textureImageMemory = device.allocateMemory(allocInfo); + textureImage.bindMemory(*textureImageMemory, 0); + + // Transition image layout and copy buffer to image + transitionImageLayout(textureImage, vk::Format::eR8G8B8A8Srgb, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal); + copyBufferToImage(stagingBuffer, textureImage, static_cast(texWidth), static_cast(texHeight)); + transitionImageLayout(textureImage, vk::Format::eR8G8B8A8Srgb, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal); + } + + // Create texture image view + void createTextureImageView() { + textureImageView = createImageView(textureImage, vk::Format::eR8G8B8A8Srgb); + } + + // Create texture sampler + void createTextureSampler() { + vk::SamplerCreateInfo samplerInfo{ + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .anisotropyEnable = VK_TRUE, + .maxAnisotropy = 16.0f, + .compareEnable = VK_FALSE, + .compareOp = vk::CompareOp::eAlways, + .borderColor = vk::BorderColor::eIntOpaqueBlack, + .unnormalizedCoordinates = VK_FALSE + }; + + textureSampler = device.createSampler(samplerInfo); + } + + // Load model + void loadModel() { + tinyobj::attrib_t attrib; + std::vector shapes; + std::vector materials; + std::string warn, err; + +#if PLATFORM_ANDROID + // Load OBJ file from Android assets + std::optional optionalAssetManager = assetManager; + std::vector objData = readFile(MODEL_PATH, optionalAssetManager); + std::string objString(objData.begin(), objData.end()); + std::istringstream objStream(objString); + + if (!tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, &objStream)) { + throw std::runtime_error("Failed to load model: " + MODEL_PATH + " - " + warn + err); + } +#else + // Load OBJ file from filesystem + if (!tinyobj::LoadObj(&attrib, &shapes, &materials, &warn, &err, MODEL_PATH.c_str())) { + throw std::runtime_error("Failed to load model: " + MODEL_PATH + " - " + warn + err); + } +#endif + + std::unordered_map uniqueVertices{}; + + for (const auto& shape : shapes) { + for (const auto& index : shape.mesh.indices) { + Vertex vertex{}; + + vertex.pos = { + attrib.vertices[3 * index.vertex_index + 0], + attrib.vertices[3 * index.vertex_index + 1], + attrib.vertices[3 * index.vertex_index + 2] + }; + + vertex.texCoord = { + attrib.texcoords[2 * index.texcoord_index + 0], + 1.0f - attrib.texcoords[2 * index.texcoord_index + 1] + }; + + vertex.color = {1.0f, 1.0f, 1.0f}; + + if (uniqueVertices.count(vertex) == 0) { + uniqueVertices[vertex] = static_cast(vertices.size()); + vertices.push_back(vertex); + } + + indices.push_back(uniqueVertices[vertex]); + } + } + + LOG_INFO("Model loaded successfully"); + } + + // Create vertex buffer + void createVertexBuffer() { + vk::DeviceSize bufferSize = sizeof(vertices[0]) * vertices.size(); + + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data; + data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, vertices.data(), (size_t) bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eVertexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, vertexBuffer, vertexBufferMemory); + + copyBuffer(stagingBuffer, vertexBuffer, bufferSize); + } + + // Create index buffer + void createIndexBuffer() { + vk::DeviceSize bufferSize = sizeof(indices[0]) * indices.size(); + + vk::raii::Buffer stagingBuffer = nullptr; + vk::raii::DeviceMemory stagingBufferMemory = nullptr; + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data; + data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, indices.data(), (size_t) bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, indexBuffer, indexBufferMemory); + + copyBuffer(stagingBuffer, indexBuffer, bufferSize); + } + + // Create uniform buffers + void createUniformBuffers() { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + + uniformBuffers.clear(); + uniformBuffersMemory.clear(); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + uniformBuffers.push_back(nullptr); + uniformBuffersMemory.push_back(nullptr); + } + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, uniformBuffers[i], uniformBuffersMemory[i]); + } + } + + // Create descriptor pool + void createDescriptorPool() { + std::array poolSizes = { + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eUniformBuffer, + .descriptorCount = static_cast(MAX_FRAMES_IN_FLIGHT) + }, + vk::DescriptorPoolSize{ + .type = vk::DescriptorType::eCombinedImageSampler, + .descriptorCount = static_cast(MAX_FRAMES_IN_FLIGHT) + } + }; + + vk::DescriptorPoolCreateInfo poolInfo{ + .maxSets = static_cast(MAX_FRAMES_IN_FLIGHT), + .poolSizeCount = static_cast(poolSizes.size()), + .pPoolSizes = poolSizes.data() + }; + + descriptorPool = device.createDescriptorPool(poolInfo); + } + + // Create descriptor sets + void createDescriptorSets() { + std::vector layouts(MAX_FRAMES_IN_FLIGHT, *descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = *descriptorPool, + .descriptorSetCount = static_cast(MAX_FRAMES_IN_FLIGHT), + .pSetLayouts = layouts.data() + }; + + descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = *uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + + vk::DescriptorImageInfo imageInfo{ + .sampler = *textureSampler, + .imageView = *textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + + std::array descriptorWrites = { + vk::WriteDescriptorSet{ + .dstSet = *descriptorSets[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &bufferInfo + }, + vk::WriteDescriptorSet{ + .dstSet = *descriptorSets[i], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &imageInfo + } + }; + + device.updateDescriptorSets(descriptorWrites, nullptr); + } + } + + // Create command buffers + void createCommandBuffers() { + commandBuffers.reserve(MAX_FRAMES_IN_FLIGHT); + + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = static_cast(MAX_FRAMES_IN_FLIGHT) + }; + + commandBuffers = device.allocateCommandBuffers(allocInfo); + } + + // Create synchronization objects + void createSyncObjects() { + imageAvailableSemaphores.reserve(MAX_FRAMES_IN_FLIGHT); + renderFinishedSemaphores.reserve(MAX_FRAMES_IN_FLIGHT); + inFlightFences.reserve(MAX_FRAMES_IN_FLIGHT); + + vk::SemaphoreCreateInfo semaphoreInfo{}; + vk::FenceCreateInfo fenceInfo{ + .flags = vk::FenceCreateFlagBits::eSignaled + }; + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + imageAvailableSemaphores.push_back(device.createSemaphore(semaphoreInfo)); + renderFinishedSemaphores.push_back(device.createSemaphore(semaphoreInfo)); + inFlightFences.push_back(device.createFence(fenceInfo)); + } + } + + // Clean up swap chain + void cleanupSwapChain() { + for (auto& framebuffer : swapChainFramebuffers) { + framebuffer = nullptr; + } + + for (auto& imageView : swapChainImageViews) { + imageView = nullptr; + } + + swapChain = nullptr; + } + + // Record command buffer + void recordCommandBuffer(vk::raii::CommandBuffer& commandBuffer, uint32_t imageIndex) { + vk::CommandBufferBeginInfo beginInfo{}; + commandBuffer.begin(beginInfo); + + vk::RenderPassBeginInfo renderPassInfo{ + .renderPass = *renderPass, + .framebuffer = *swapChainFramebuffers[imageIndex], + .renderArea = { + .offset = {0, 0}, + .extent = swapChainExtent + } + }; + + vk::ClearValue clearColor; + clearColor.color.float32 = std::array{0.0f, 0.0f, 0.0f, 1.0f}; + renderPassInfo.clearValueCount = 1; + renderPassInfo.pClearValues = &clearColor; + + commandBuffer.beginRenderPass(renderPassInfo, vk::SubpassContents::eInline); + commandBuffer.bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + + vk::Viewport viewport{ + .x = 0.0f, + .y = 0.0f, + .width = static_cast(swapChainExtent.width), + .height = static_cast(swapChainExtent.height), + .minDepth = 0.0f, + .maxDepth = 1.0f + }; + commandBuffer.setViewport(0, viewport); + + vk::Rect2D scissor{ + .offset = {0, 0}, + .extent = swapChainExtent + }; + commandBuffer.setScissor(0, scissor); + + commandBuffer.bindVertexBuffers(0, {*vertexBuffer}, {0}); + commandBuffer.bindIndexBuffer(*indexBuffer, 0, vk::IndexType::eUint32); + commandBuffer.bindDescriptorSets(vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, {*descriptorSets[currentFrame]}, nullptr); + commandBuffer.drawIndexed(static_cast(indices.size()), 1, 0, 0, 0); + + commandBuffer.endRenderPass(); + commandBuffer.end(); + } + + // Draw frame + void drawFrame() { + static_cast(device.waitForFences({*inFlightFences[currentFrame]}, VK_TRUE, FenceTimeout)); + + uint32_t imageIndex; + try { + auto [result, idx] = swapChain.acquireNextImage(FenceTimeout, *imageAvailableSemaphores[currentFrame]); + imageIndex = idx; + } catch (vk::OutOfDateKHRError&) { + recreateSwapChain(); + return; + } + + // Update uniform buffer with current transformation + updateUniformBuffer(currentFrame); + + device.resetFences({*inFlightFences[currentFrame]}); + + commandBuffers[currentFrame].reset(); + recordCommandBuffer(commandBuffers[currentFrame], imageIndex); + + vk::PipelineStageFlags waitDestinationStageMask(vk::PipelineStageFlagBits::eColorAttachmentOutput); + const vk::SubmitInfo submitInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*imageAvailableSemaphores[currentFrame], + .pWaitDstStageMask = &waitDestinationStageMask, + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*renderFinishedSemaphores[currentFrame] + }; + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + + const vk::PresentInfoKHR presentInfoKHR{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*renderFinishedSemaphores[currentFrame], + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + + vk::Result result; + try { + result = presentQueue.presentKHR(presentInfoKHR); + } catch (vk::OutOfDateKHRError&) { + result = vk::Result::eErrorOutOfDateKHR; + } + + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("Failed to present swap chain image"); + } + + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + + // Recreate swap chain + void recreateSwapChain() { + // Wait for device to finish operations + device.waitIdle(); + + // Clean up old swap chain + cleanupSwapChain(); + + // Create new swap chain + createSwapChain(); + createImageViews(); + createFramebuffers(); + } + + // Get required extensions + std::vector getRequiredExtensions() { +#if PLATFORM_ANDROID + // Android requires these extensions + std::vector extensions = { + VK_KHR_SURFACE_EXTENSION_NAME, + VK_KHR_ANDROID_SURFACE_EXTENSION_NAME + }; +#else + // Get the required extensions from GLFW + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + std::vector extensions(glfwExtensions, glfwExtensions + glfwExtensionCount); +#endif + + // Check if the debug utils extension is available + std::vector props = context.enumerateInstanceExtensionProperties(); + bool debugUtilsAvailable = std::ranges::any_of(props, + [](vk::ExtensionProperties const & ep) { + return strcmp(ep.extensionName, vk::EXTDebugUtilsExtensionName) == 0; + }); + + // Always include the debug utils extension if available + if (debugUtilsAvailable) { + extensions.push_back(vk::EXTDebugUtilsExtensionName); +#if PLATFORM_DESKTOP + } else { + LOG_INFO("VK_EXT_debug_utils extension not available. Validation layers may not work."); +#endif + } + + return extensions; + } + + // Choose swap surface format + vk::SurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector& availableFormats) { + // Prefer SRGB format + for (const auto& availableFormat : availableFormats) { + if (availableFormat.format == vk::Format::eB8G8R8A8Srgb && + availableFormat.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear) { + return availableFormat; + } + } + + // If not available, just use the first format + return availableFormats[0]; + } + + // Choose swap present mode + vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + // Prefer mailbox mode for triple buffering + for (const auto& availablePresentMode : availablePresentModes) { + if (availablePresentMode == vk::PresentModeKHR::eMailbox) { + return availablePresentMode; + } + } + + // Fallback to FIFO (guaranteed to be available) + return vk::PresentModeKHR::eFifo; + } + + // Choose swap extent + vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } else { +#if PLATFORM_ANDROID + // Get the window size from Android + int32_t width = ANativeWindow_getWidth(androidApp->window); + int32_t height = ANativeWindow_getHeight(androidApp->window); +#else + // Get the window size from GLFW + int width, height; + glfwGetFramebufferSize(window, &width, &height); +#endif + + vk::Extent2D actualExtent = { + static_cast(width), + static_cast(height) + }; + + actualExtent.width = std::clamp(actualExtent.width, + capabilities.minImageExtent.width, + capabilities.maxImageExtent.width); + actualExtent.height = std::clamp(actualExtent.height, + capabilities.minImageExtent.height, + capabilities.maxImageExtent.height); + + return actualExtent; + } + } + + // Query swap chain support + SwapChainSupportDetails querySwapChainSupport(vk::raii::PhysicalDevice device) { + SwapChainSupportDetails details; + details.capabilities = device.getSurfaceCapabilitiesKHR(*surface); + details.formats = device.getSurfaceFormatsKHR(*surface); + details.presentModes = device.getSurfacePresentModesKHR(*surface); + return details; + } + + // Find queue families + QueueFamilyIndices findQueueFamilies(vk::raii::PhysicalDevice device) { + QueueFamilyIndices indices; + + std::vector queueFamilies = device.getQueueFamilyProperties(); + + uint32_t i = 0; + for (const auto& queueFamily : queueFamilies) { + if (queueFamily.queueFlags & vk::QueueFlagBits::eGraphics) { + indices.graphicsFamily = i; + } + + vk::Bool32 presentSupport = device.getSurfaceSupportKHR(i, *surface); + if (presentSupport) { + indices.presentFamily = i; + } + + if (indices.isComplete()) { + break; + } + + i++; + } + + return indices; + } + + // Create buffer + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) { + vk::BufferCreateInfo bufferInfo{ + .size = size, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive + }; + + buffer = device.createBuffer(bufferInfo); + + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + + bufferMemory = device.allocateMemory(allocInfo); + buffer.bindMemory(*bufferMemory, 0); + } + + // Copy buffer + void copyBuffer(vk::raii::Buffer& srcBuffer, vk::raii::Buffer& dstBuffer, vk::DeviceSize size) { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + + vk::raii::CommandBuffer commandBuffer = std::move(device.allocateCommandBuffers(allocInfo)[0]); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer.begin(beginInfo); + + vk::BufferCopy copyRegion{ + .srcOffset = 0, + .dstOffset = 0, + .size = size + }; + commandBuffer.copyBuffer(*srcBuffer, *dstBuffer, copyRegion); + + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffer + }; + + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + // Find memory type + uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("Failed to find suitable memory type"); + } + + // Create image view + vk::raii::ImageView createImageView(vk::raii::Image& image, vk::Format format) { + vk::ImageViewCreateInfo viewInfo{ + .image = *image, + .viewType = vk::ImageViewType::e2D, + .format = format, + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + return device.createImageView(viewInfo); + } + + // Transition image layout + void transitionImageLayout(vk::raii::Image& image, vk::Format format, vk::ImageLayout oldLayout, vk::ImageLayout newLayout) { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + + vk::raii::CommandBuffer commandBuffer = std::move(device.allocateCommandBuffers(allocInfo)[0]); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer.begin(beginInfo); + + vk::ImageMemoryBarrier barrier{ + .oldLayout = oldLayout, + .newLayout = newLayout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = *image, + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + + vk::PipelineStageFlags sourceStage; + vk::PipelineStageFlags destinationStage; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eNone; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + sourceStage = vk::PipelineStageFlagBits::eTransfer; + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + } else { + throw std::invalid_argument("Unsupported layout transition"); + } + + commandBuffer.pipelineBarrier( + sourceStage, destinationStage, + vk::DependencyFlagBits::eByRegion, + nullptr, + nullptr, + barrier + ); + + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffer + }; + + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + // Copy buffer to image + void copyBufferToImage(vk::raii::Buffer& buffer, vk::raii::Image& image, uint32_t width, uint32_t height) { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + + vk::raii::CommandBuffer commandBuffer = std::move(device.allocateCommandBuffers(allocInfo)[0]); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer.begin(beginInfo); + + vk::BufferImageCopy region{ + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .mipLevel = 0, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .imageOffset = {0, 0, 0}, + .imageExtent = {width, height, 1} + }; + + commandBuffer.copyBufferToImage( + *buffer, + *image, + vk::ImageLayout::eTransferDstOptimal, + region + ); + + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffer + }; + + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + // Update uniform buffer + void updateUniformBuffer(uint32_t currentImage) { + static auto startTime = std::chrono::high_resolution_clock::now(); + + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + + UniformBufferObject ubo{}; + ubo.model = glm::rotate(glm::mat4(1.0f), time * glm::radians(90.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.view = glm::lookAt(glm::vec3(2.0f, 2.0f, 2.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.proj = glm::perspective(glm::radians(45.0f), swapChainExtent.width / (float) swapChainExtent.height, 0.1f, 10.0f); + ubo.proj[1][1] *= -1; + + void* data; + data = uniformBuffersMemory[currentImage].mapMemory(0, sizeof(ubo)); + memcpy(data, &ubo, sizeof(ubo)); + uniformBuffersMemory[currentImage].unmapMemory(); + } + +#if PLATFORM_ANDROID + // Handle app commands + static void handleAppCommand(android_app* app, int32_t cmd) { + auto* vulkanApp = static_cast(app->userData); + switch (cmd) { + case APP_CMD_INIT_WINDOW: + // Window created, initialize Vulkan + if (app->window != nullptr) { + vulkanApp->initVulkan(); + } + break; + case APP_CMD_TERM_WINDOW: + // Window destroyed, clean up Vulkan + vulkanApp->cleanup(); + break; + default: + break; + } + } + + // Handle input events + static int32_t handleInputEvent(android_app* app, AInputEvent* event) { + auto* vulkanApp = static_cast(app->userData); + if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) { + // Handle touch events + float x = AMotionEvent_getX(event, 0); + float y = AMotionEvent_getY(event, 0); + + // Process touch coordinates + LOGI("Touch at: %f, %f", x, y); + + return 1; + } + return 0; + } +#endif +}; + +// Platform-specific entry point +#if PLATFORM_ANDROID +// Android main entry point +void android_main(android_app* app) { + // Make sure glue isn't stripped + app_dummy(); + + try { + // Create and run the Vulkan application + HelloTriangleApplication vulkanApp(app); + vulkanApp.run(); + } catch (const std::exception& e) { + LOGE("Exception caught: %s", e.what()); + } +} +#else +// Desktop main entry point +int main() { + try { + HelloTriangleApplication app; + app.run(); + } catch (const std::exception& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} +#endif diff --git a/attachments/35_gltf_ktx.cpp b/attachments/35_gltf_ktx.cpp new file mode 100644 index 00000000..20cc665e --- /dev/null +++ b/attachments/35_gltf_ktx.cpp @@ -0,0 +1,1472 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include +#if defined(__ANDROID__) +#include +#include +#endif +#include + +#if defined(__ANDROID__) + #define PLATFORM_ANDROID 1 +#else + #define PLATFORM_DESKTOP 1 +#endif + +// Include tinygltf instead of tinyobjloader +// TINYGLTF_IMPLEMENTATION is already defined in the command line +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include + +// Include KTX library for texture loading +#include + +#if PLATFORM_ANDROID + #include + #include + #include + #include + + // Declare and implement app_dummy function from native_app_glue + extern "C" void app_dummy() { + // This is a dummy function that does nothing + // It's used to prevent the linker from stripping out the native_app_glue code + } + + // Define AAssetManager type for Android + typedef AAssetManager AssetManagerType; + + #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "VulkanTutorial", __VA_ARGS__)) + #define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "VulkanTutorial", __VA_ARGS__)) + #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "VulkanTutorial", __VA_ARGS__)) +#else + // Define AAssetManager type for non-Android platforms + typedef void AssetManagerType; + // Desktop-specific includes + #define GLFW_INCLUDE_VULKAN + #include + + // Define logging macros for Desktop + #define LOGI(...) printf(__VA_ARGS__); printf("\n") + #define LOGW(...) printf(__VA_ARGS__); printf("\n") + #define LOGE(...) fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n") +#endif + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_ENABLE_EXPERIMENTAL +#define GLM_FORCE_CXX11 +#include +#include +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +// Update paths to use glTF model and KTX2 texture +const std::string MODEL_PATH = "models/viking_room.glb"; +const std::string TEXTURE_PATH = "textures/viking_room.ktx2"; +constexpr int MAX_FRAMES_IN_FLIGHT = 2; + +// Define VpProfileProperties structure for Android only +#if PLATFORM_ANDROID +#ifndef VP_PROFILE_PROPERTIES_DEFINED +#define VP_PROFILE_PROPERTIES_DEFINED +struct VpProfileProperties { + char name[256]; + uint32_t specVersion; +}; +#endif +#endif + +// Define Vulkan Profile constants +#ifndef VP_KHR_ROADMAP_2022_NAME +#define VP_KHR_ROADMAP_2022_NAME "VP_KHR_roadmap_2022" +#endif + +#ifndef VP_KHR_ROADMAP_2022_SPEC_VERSION +#define VP_KHR_ROADMAP_2022_SPEC_VERSION 1 +#endif + +struct AppInfo { + bool profileSupported = false; + VpProfileProperties profile; +}; + +#if PLATFORM_ANDROID +void android_main(android_app* app); + +struct AndroidAppState { + ANativeWindow* nativeWindow = nullptr; + bool initialized = false; + android_app* app = nullptr; +}; +#endif + +#ifdef NDEBUG +constexpr bool enableValidationLayers = false; +#else +constexpr bool enableValidationLayers = true; +#endif + +struct Vertex { + glm::vec3 pos; + glm::vec3 color; + glm::vec2 texCoord; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Vertex), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, pos) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, color) ), + vk::VertexInputAttributeDescription( 2, 0, vk::Format::eR32G32Sfloat, offsetof(Vertex, texCoord) ) + }; + } + + bool operator==(const Vertex& other) const { + return pos == other.pos && color == other.color && texCoord == other.texCoord; + } +}; + +template<> struct std::hash { + size_t operator()(Vertex const& vertex) const noexcept { + return ((hash()(vertex.pos) ^ (hash()(vertex.color) << 1)) >> 1) ^ (hash()(vertex.texCoord) << 1); + } +}; + +struct UniformBufferObject { + alignas(16) glm::mat4 model; + alignas(16) glm::mat4 view; + alignas(16) glm::mat4 proj; +}; + +class VulkanApplication { +public: +#if PLATFORM_ANDROID + void run(android_app* app) { + androidAppState.nativeWindow = app->window; + androidAppState.app = app; + app->userData = &androidAppState; + app->onAppCmd = handleAppCommand; + // Note: onInputEvent is no longer a member of android_app in the current NDK version + // Input events are now handled differently + + int events; + android_poll_source* source; + + while (app->destroyRequested == 0) { + while (ALooper_pollOnce(androidAppState.initialized ? 0 : -1, nullptr, &events, (void**)&source) >= 0) { + if (source != nullptr) { + source->process(app, source); + } + } + + if (androidAppState.initialized && androidAppState.nativeWindow != nullptr) { + drawFrame(); + } + } + + if (androidAppState.initialized) { + device.waitIdle(); + } + } +#else + void run() { + initWindow(); + initVulkan(); + mainLoop(); + cleanup(); + } +#endif + +private: +#if PLATFORM_ANDROID + AndroidAppState androidAppState; + + static void handleAppCommand(android_app* app, int32_t cmd) { + auto* appState = static_cast(app->userData); + + switch (cmd) { + case APP_CMD_INIT_WINDOW: + if (app->window != nullptr) { + appState->nativeWindow = app->window; + // We can't cast AndroidAppState to VulkanApplication directly + // Instead, we need to access the VulkanApplication instance through a global variable + // or another mechanism. For now, we'll just set the initialized flag. + appState->initialized = true; + } + break; + case APP_CMD_TERM_WINDOW: + appState->nativeWindow = nullptr; + break; + default: + break; + } + } + + static int32_t handleInputEvent(android_app* app, AInputEvent* event) { + if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) { + float x = AMotionEvent_getX(event, 0); + float y = AMotionEvent_getY(event, 0); + + LOGI("Touch at: %f, %f", x, y); + + return 1; + } + return 0; + } +#else + GLFWwindow* window = nullptr; +#endif + + AppInfo appInfo; + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::DebugUtilsMessengerEXT debugMessenger = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::raii::Device device = nullptr; + + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::Format swapChainImageFormat = vk::Format::eUndefined; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + + vk::raii::DescriptorSetLayout descriptorSetLayout = nullptr; + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + + vk::raii::Image depthImage = nullptr; + vk::raii::DeviceMemory depthImageMemory = nullptr; + vk::raii::ImageView depthImageView = nullptr; + + vk::raii::Image textureImage = nullptr; + vk::raii::DeviceMemory textureImageMemory = nullptr; + vk::raii::ImageView textureImageView = nullptr; + vk::raii::Sampler textureSampler = nullptr; + vk::Format textureImageFormat = vk::Format::eUndefined; + + std::vector vertices; + std::vector indices; + vk::raii::Buffer vertexBuffer = nullptr; + vk::raii::DeviceMemory vertexBufferMemory = nullptr; + vk::raii::Buffer indexBuffer = nullptr; + vk::raii::DeviceMemory indexBufferMemory = nullptr; + + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + + vk::raii::DescriptorPool descriptorPool = nullptr; + std::vector descriptorSets; + + vk::raii::CommandPool commandPool = nullptr; + std::vector commandBuffers; + uint32_t graphicsIndex = 0; + + std::vector presentCompleteSemaphore; + std::vector renderFinishedSemaphore; + std::vector inFlightFences; + uint32_t currentFrame = 0; + + bool framebufferResized = false; + + std::vector requiredDeviceExtension = { + vk::KHRSwapchainExtensionName, + vk::KHRCreateRenderpass2ExtensionName + }; + +#if PLATFORM_DESKTOP + void initWindow() { + glfwInit(); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + glfwSetFramebufferSizeCallback(window, framebufferResizeCallback); + } + + static void framebufferResizeCallback(GLFWwindow* window, int width, int height) { + auto app = static_cast(glfwGetWindowUserPointer(window)); + app->framebufferResized = true; + } +#endif + +public: + void initVulkan() { + createInstance(); + setupDebugMessenger(); + createSurface(); + pickPhysicalDevice(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + createDescriptorSetLayout(); + createGraphicsPipeline(); + createCommandPool(); + createDepthResources(); + createTextureImage(); + createTextureImageView(); + createTextureSampler(); + loadModel(); + createVertexBuffer(); + createIndexBuffer(); + createUniformBuffers(); + createDescriptorPool(); + createDescriptorSets(); + createCommandBuffers(); + createSyncObjects(); + } + +private: + +#if PLATFORM_DESKTOP + void mainLoop() { + while (!glfwWindowShouldClose(window)) { + glfwPollEvents(); + drawFrame(); + } + + device.waitIdle(); + } +#endif + + void cleanupSwapChain() { + swapChainImageViews.clear(); + } + +#if PLATFORM_DESKTOP + void cleanup() const { + glfwDestroyWindow(window); + glfwTerminate(); + } +#endif + + void recreateSwapChain() { +#if PLATFORM_DESKTOP + int width = 0, height = 0; + glfwGetFramebufferSize(window, &width, &height); + while (width == 0 || height == 0) { + glfwGetFramebufferSize(window, &width, &height); + glfwWaitEvents(); + } +#endif + + device.waitIdle(); + + cleanupSwapChain(); + createSwapChain(); + createImageViews(); + createDepthResources(); + } + + void createInstance() { + constexpr vk::ApplicationInfo appInfo{ + .pApplicationName = "Hello Triangle", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_3 + }; + + auto extensions = getRequiredExtensions(); + + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() + }; + + instance = vk::raii::Instance(context, createInfo); + LOGI("Vulkan instance created"); + } + + void setupDebugMessenger() { + // Debug messenger setup is disabled for now to avoid compatibility issues + // This is a simplified approach to get the code compiling + if (!enableValidationLayers) return; + + LOGI("Debug messenger setup skipped for compatibility"); + } + + void createSurface() { +#if PLATFORM_DESKTOP + VkSurfaceKHR _surface; + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != VK_SUCCESS) { + throw std::runtime_error("failed to create window surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); +#else + VkSurfaceKHR _surface; + VkAndroidSurfaceCreateInfoKHR createInfo{ + .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + .window = androidAppState.nativeWindow + }; + if (vkCreateAndroidSurfaceKHR(*instance, &createInfo, nullptr, &_surface) != VK_SUCCESS) { + throw std::runtime_error("failed to create Android surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); +#endif + } + + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&](auto const& device) { + // Check if the device supports the Vulkan 1.3 API version + bool supportsVulkan1_3 = device.getProperties().apiVersion >= VK_API_VERSION_1_3; + + // Check if any of the queue families support graphics operations + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of(queueFamilies, [](auto const& qfp) { return !!(qfp.queueFlags & vk::QueueFlagBits::eGraphics); }); + + // Check if all required device extensions are available + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of(requiredDeviceExtension, + [&availableDeviceExtensions](auto const& requiredDeviceExtension) { + return std::ranges::any_of(availableDeviceExtensions, + [requiredDeviceExtension](auto const& availableDeviceExtension) { + return strcmp(availableDeviceExtension.extensionName, requiredDeviceExtension) == 0; + }); + }); + + auto features = device.template getFeatures2(); + bool supportsRequiredFeatures = features.template get().dynamicRendering && + features.template get().extendedDynamicState; + + return supportsVulkan1_3 && supportsGraphics && supportsAllRequiredExtensions && supportsRequiredFeatures; + }); + + if (devIter != devices.end()) { + physicalDevice = *devIter; + + // Check for Vulkan profile support + VpProfileProperties profileProperties; +#if PLATFORM_ANDROID + strcpy(profileProperties.name, VP_KHR_ROADMAP_2022_NAME); +#else + strcpy(profileProperties.profileName, VP_KHR_ROADMAP_2022_NAME); +#endif + profileProperties.specVersion = VP_KHR_ROADMAP_2022_SPEC_VERSION; + + VkBool32 supported = VK_FALSE; + bool result = false; + +#if PLATFORM_ANDROID + // Create a vp::ProfileDesc from our VpProfileProperties + vp::ProfileDesc profileDesc = { + profileProperties.name, + profileProperties.specVersion + }; + + // Use vp::GetProfileSupport for Android + result = vp::GetProfileSupport( + *physicalDevice, // Pass the physical device directly + &profileDesc, // Pass the profile description + &supported // Output parameter for support status + ); +#else + // Use vpGetPhysicalDeviceProfileSupport for Desktop + VkResult vk_result = vpGetPhysicalDeviceProfileSupport( + *instance, + *physicalDevice, + &profileProperties, + &supported + ); + result = vk_result == static_cast(vk::Result::eSuccess); +#endif + const char* name = nullptr; +#ifdef PLATFORM_ANDROID + name = profileProperties.name; +#else + name = profileProperties.profileName; +#endif + + if (result && supported == VK_TRUE) { + appInfo.profileSupported = true; + appInfo.profile = profileProperties; + LOGI("Device supports Vulkan profile: %s", name); + } else { + LOGI("Device does not support Vulkan profile: %s", name); + } + } else { + throw std::runtime_error("failed to find a suitable GPU!"); + } + } + + void createLogicalDevice() { + // find the index of the first queue family that supports graphics + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + + // get the first index into queueFamilyProperties which supports graphics + auto graphicsQueueFamilyProperty = std::ranges::find_if( queueFamilyProperties, []( auto const & qfp ) + { return (qfp.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast(0); } ); + + graphicsIndex = static_cast( std::distance( queueFamilyProperties.begin(), graphicsQueueFamilyProperty ) ); + + // determine a queueFamilyIndex that supports present + // first check if the graphicsIndex is good enough + auto presentIndex = physicalDevice.getSurfaceSupportKHR( graphicsIndex, *surface ) + ? graphicsIndex + : ~0; + if ( presentIndex == queueFamilyProperties.size() ) + { + // the graphicsIndex doesn't support present -> look for another family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + graphicsIndex = static_cast( i ); + presentIndex = graphicsIndex; + break; + } + } + if ( presentIndex == queueFamilyProperties.size() ) + { + // there's nothing like a single family index that supports both graphics and present -> look for another + // family index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + presentIndex = static_cast( i ); + break; + } + } + } + } + if ( ( graphicsIndex == queueFamilyProperties.size() ) || ( presentIndex == queueFamilyProperties.size() ) ) + { + throw std::runtime_error( "Could not find a queue for graphics or present -> terminating" ); + } + + // query for Vulkan 1.3 features + auto features = physicalDevice.getFeatures2(); + vk::PhysicalDeviceVulkan13Features vulkan13Features; + vk::PhysicalDeviceExtendedDynamicStateFeaturesEXT extendedDynamicStateFeatures; + vulkan13Features.dynamicRendering = vk::True; + vulkan13Features.synchronization2 = vk::True; + extendedDynamicStateFeatures.extendedDynamicState = vk::True; + vulkan13Features.pNext = &extendedDynamicStateFeatures; + features.pNext = &vulkan13Features; + // create a Device + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo { .queueFamilyIndex = graphicsIndex, .queueCount = 1, .pQueuePriorities = &queuePriority }; + vk::DeviceCreateInfo deviceCreateInfo{ + .pNext = &features, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &deviceQueueCreateInfo, + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data() + }; + + // Create the device with the appropriate features + device = vk::raii::Device(physicalDevice, deviceCreateInfo); + + graphicsQueue = vk::raii::Queue(device, graphicsIndex, 0); + presentQueue = vk::raii::Queue(device, presentIndex, 0); + } + + void createSwapChain() { + auto surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR(*surface); + swapChainImageFormat = chooseSwapSurfaceFormat(physicalDevice.getSurfaceFormatsKHR(*surface)); + swapChainExtent = chooseSwapExtent(surfaceCapabilities); + auto minImageCount = std::max(3u, surfaceCapabilities.minImageCount); + minImageCount = (surfaceCapabilities.maxImageCount > 0 && minImageCount > surfaceCapabilities.maxImageCount) ? surfaceCapabilities.maxImageCount : minImageCount; + vk::SwapchainCreateInfoKHR swapChainCreateInfo{ + .surface = *surface, .minImageCount = minImageCount, + .imageFormat = swapChainImageFormat, .imageColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear, + .imageExtent = swapChainExtent, .imageArrayLayers =1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment, .imageSharingMode = vk::SharingMode::eExclusive, + .preTransform = surfaceCapabilities.currentTransform, .compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque, + .presentMode = chooseSwapPresentMode(physicalDevice.getSurfacePresentModesKHR(*surface)), + .clipped = true }; + + swapChain = vk::raii::SwapchainKHR(device, swapChainCreateInfo); + swapChainImages = swapChain.getImages(); + } + + void createImageViews() { + vk::ImageViewCreateInfo imageViewCreateInfo{ + .viewType = vk::ImageViewType::e2D, + .format = swapChainImageFormat, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + for ( auto image : swapChainImages ) + { + imageViewCreateInfo.image = image; + swapChainImageViews.emplace_back( device, imageViewCreateInfo ); + } + } + + void createDescriptorSetLayout() { + std::array bindings = { + vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex, nullptr), + vk::DescriptorSetLayoutBinding( 1, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment, nullptr) + }; + + vk::DescriptorSetLayoutCreateInfo layoutInfo{ .bindingCount = static_cast(bindings.size()), .pBindings = bindings.data() }; + descriptorSetLayout = vk::raii::DescriptorSetLayout(device, layoutInfo); + } + + void createGraphicsPipeline() { + vk::raii::ShaderModule shaderModule = createShaderModule(this->readFile("shaders/slang.spv")); + + vk::PipelineShaderStageCreateInfo vertShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eVertex, .module = *shaderModule, .pName = "vertMain" }; + vk::PipelineShaderStageCreateInfo fragShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eFragment, .module = *shaderModule, .pName = "fragMain" }; + vk::PipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo, fragShaderStageInfo}; + + auto bindingDescription = Vertex::getBindingDescription(); + auto attributeDescriptions = Vertex::getAttributeDescriptions(); + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = &bindingDescription, + .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), + .pVertexAttributeDescriptions = attributeDescriptions.data() + }; + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = vk::False + }; + vk::PipelineViewportStateCreateInfo viewportState{ + .viewportCount = 1, + .scissorCount = 1 + }; + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = vk::False, + .rasterizerDiscardEnable = vk::False, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, // Re-enabled culling for better performance + .frontFace = vk::FrontFace::eClockwise, // Keeping Clockwise for glTF + .depthBiasEnable = vk::False + }; + rasterizer.lineWidth = 1.0f; + vk::PipelineMultisampleStateCreateInfo multisampling{ + .rasterizationSamples = vk::SampleCountFlagBits::e1, + .sampleShadingEnable = vk::False + }; + vk::PipelineDepthStencilStateCreateInfo depthStencil{ + .depthTestEnable = vk::True, + .depthWriteEnable = vk::True, + .depthCompareOp = vk::CompareOp::eLess, + .depthBoundsTestEnable = vk::False, + .stencilTestEnable = vk::False + }; + vk::PipelineColorBlendAttachmentState colorBlendAttachment; + colorBlendAttachment.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; + colorBlendAttachment.blendEnable = vk::False; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ + .logicOpEnable = vk::False, + .logicOp = vk::LogicOp::eCopy, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachment + }; + + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + vk::PipelineDynamicStateCreateInfo dynamicState{ .dynamicStateCount = static_cast(dynamicStates.size()), .pDynamicStates = dynamicStates.data() }; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ .setLayoutCount = 1, .pSetLayouts = &*descriptorSetLayout, .pushConstantRangeCount = 0 }; + + pipelineLayout = vk::raii::PipelineLayout(device, pipelineLayoutInfo); + + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo{ .colorAttachmentCount = 1, .pColorAttachmentFormats = &swapChainImageFormat }; + vk::GraphicsPipelineCreateInfo pipelineInfo{ .pNext = &pipelineRenderingCreateInfo, + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pDepthStencilState = &depthStencil, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = *pipelineLayout, + .renderPass = nullptr + }; + + graphicsPipeline = vk::raii::Pipeline(device, nullptr, pipelineInfo); + } + + void createCommandPool() { + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = graphicsIndex + }; + commandPool = vk::raii::CommandPool(device, poolInfo); + } + + void createDepthResources() { + vk::Format depthFormat = findDepthFormat(); + + createImage(swapChainExtent.width, swapChainExtent.height, depthFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, depthImage, depthImageMemory); + depthImageView = createImageView(depthImage, depthFormat, vk::ImageAspectFlagBits::eDepth); + } + + vk::Format findSupportedFormat(const std::vector& candidates, vk::ImageTiling tiling, vk::FormatFeatureFlags features) const { + for (const auto format : candidates) { + vk::FormatProperties props = physicalDevice.getFormatProperties(format); + + if (tiling == vk::ImageTiling::eLinear && (props.linearTilingFeatures & features) == features) { + return format; + } + if (tiling == vk::ImageTiling::eOptimal && (props.optimalTilingFeatures & features) == features) { + return format; + } + } + + throw std::runtime_error("failed to find supported format!"); + } + + [[nodiscard]] vk::Format findDepthFormat() const { + return findSupportedFormat( + {vk::Format::eD32Sfloat, vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint}, + vk::ImageTiling::eOptimal, + vk::FormatFeatureFlagBits::eDepthStencilAttachment + ); + } + + static bool hasStencilComponent(vk::Format format) { + return format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint; + } + + void createTextureImage() { + // Load KTX2 texture instead of using stb_image + ktxTexture* kTexture; + KTX_error_code result = ktxTexture_CreateFromNamedFile( + TEXTURE_PATH.c_str(), + KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, + &kTexture); + + if (result != KTX_SUCCESS) { + throw std::runtime_error("failed to load ktx texture image!"); + } + + // Get texture dimensions and data + uint32_t texWidth = kTexture->baseWidth; + uint32_t texHeight = kTexture->baseHeight; + ktx_size_t imageSize = ktxTexture_GetImageSize(kTexture, 0); + ktx_uint8_t* ktxTextureData = ktxTexture_GetData(kTexture); + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, ktxTextureData, imageSize); + stagingBufferMemory.unmapMemory(); + + // Determine the Vulkan format from KTX format + vk::Format textureFormat; + + // Check if the KTX texture has a format + if (kTexture->classId == ktxTexture2_c) { + // For KTX2 files, we can get the format directly + auto* ktx2 = reinterpret_cast(kTexture); + textureFormat = static_cast(ktx2->vkFormat); + if (textureFormat == vk::Format::eUndefined) { + // If the format is undefined, fall back to a reasonable default + textureFormat = vk::Format::eR8G8B8A8Unorm; + } + } else { + // For KTX1 files or if we can't determine the format, use a reasonable default + textureFormat = vk::Format::eR8G8B8A8Unorm; + } + + textureImageFormat = textureFormat; + + createImage(texWidth, texHeight, textureFormat, vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + vk::MemoryPropertyFlagBits::eDeviceLocal, textureImage, textureImageMemory); + + transitionImageLayout(textureImage, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal); + copyBufferToImage(stagingBuffer, textureImage, texWidth, texHeight); + transitionImageLayout(textureImage, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal); + + ktxTexture_Destroy(kTexture); + } + + void createTextureImageView() { + textureImageView = createImageView(textureImage, textureImageFormat, vk::ImageAspectFlagBits::eColor); + } + + void createTextureSampler() { + vk::PhysicalDeviceProperties properties = physicalDevice.getProperties(); + vk::SamplerCreateInfo samplerInfo{ + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .mipLodBias = 0.0f, + .anisotropyEnable = vk::True, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .compareEnable = vk::False, + .compareOp = vk::CompareOp::eAlways + }; + textureSampler = vk::raii::Sampler(device, samplerInfo); + } + + vk::raii::ImageView createImageView(vk::raii::Image& image, vk::Format format, vk::ImageAspectFlags aspectFlags) { + vk::ImageViewCreateInfo viewInfo{ + .image = *image, + .viewType = vk::ImageViewType::e2D, + .format = format, + .subresourceRange = { aspectFlags, 0, 1, 0, 1 } + }; + return vk::raii::ImageView(device, viewInfo); + } + + void createImage(uint32_t width, uint32_t height, vk::Format format, vk::ImageTiling tiling, vk::ImageUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Image& image, vk::raii::DeviceMemory& imageMemory) { + vk::ImageCreateInfo imageInfo{ + .imageType = vk::ImageType::e2D, + .format = format, + .extent = {width, height, 1}, + .mipLevels = 1, + .arrayLayers = 1, + .samples = vk::SampleCountFlagBits::e1, + .tiling = tiling, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive, + .initialLayout = vk::ImageLayout::eUndefined + }; + image = vk::raii::Image(device, imageInfo); + + vk::MemoryRequirements memRequirements = image.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + imageMemory = vk::raii::DeviceMemory(device, allocInfo); + image.bindMemory(*imageMemory, 0); + } + + void transitionImageLayout(const vk::raii::Image& image, vk::ImageLayout oldLayout, vk::ImageLayout newLayout) { + auto commandBuffer = beginSingleTimeCommands(); + + vk::ImageMemoryBarrier barrier{ + .oldLayout = oldLayout, + .newLayout = newLayout, + .image = *image, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + vk::PipelineStageFlags sourceStage; + vk::PipelineStageFlags destinationStage; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = {}; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + sourceStage = vk::PipelineStageFlagBits::eTransfer; + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + } else { + throw std::invalid_argument("unsupported layout transition!"); + } + commandBuffer->pipelineBarrier( sourceStage, destinationStage, {}, {}, nullptr, barrier ); + endSingleTimeCommands(*commandBuffer); + } + + void copyBufferToImage(const vk::raii::Buffer& buffer, vk::raii::Image& image, uint32_t width, uint32_t height) { + std::unique_ptr commandBuffer = beginSingleTimeCommands(); + vk::BufferImageCopy region{ + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { vk::ImageAspectFlagBits::eColor, 0, 0, 1 }, + .imageOffset = {0, 0, 0}, + .imageExtent = {width, height, 1} + }; + commandBuffer->copyBufferToImage(*buffer, *image, vk::ImageLayout::eTransferDstOptimal, {region}); + endSingleTimeCommands(*commandBuffer); + } + + void loadModel() { + // Use tinygltf to load the model instead of tinyobjloader + tinygltf::Model model; + tinygltf::TinyGLTF loader; + std::string err; + std::string warn; + + bool ret = loader.LoadBinaryFromFile(&model, &err, &warn, MODEL_PATH); + + if (!warn.empty()) { + std::cout << "glTF warning: " << warn << std::endl; + } + + if (!err.empty()) { + std::cout << "glTF error: " << err << std::endl; + } + + if (!ret) { + throw std::runtime_error("Failed to load glTF model"); + } + + vertices.clear(); + indices.clear(); + + // Process all meshes in the model + for (const auto& mesh : model.meshes) { + for (const auto& primitive : mesh.primitives) { + // Get indices + const tinygltf::Accessor& indexAccessor = model.accessors[primitive.indices]; + const tinygltf::BufferView& indexBufferView = model.bufferViews[indexAccessor.bufferView]; + const tinygltf::Buffer& indexBuffer = model.buffers[indexBufferView.buffer]; + + // Get vertex positions + const tinygltf::Accessor& posAccessor = model.accessors[primitive.attributes.at("POSITION")]; + const tinygltf::BufferView& posBufferView = model.bufferViews[posAccessor.bufferView]; + const tinygltf::Buffer& posBuffer = model.buffers[posBufferView.buffer]; + + // Get texture coordinates if available + bool hasTexCoords = primitive.attributes.find("TEXCOORD_0") != primitive.attributes.end(); + const tinygltf::Accessor* texCoordAccessor = nullptr; + const tinygltf::BufferView* texCoordBufferView = nullptr; + const tinygltf::Buffer* texCoordBuffer = nullptr; + + if (hasTexCoords) { + texCoordAccessor = &model.accessors[primitive.attributes.at("TEXCOORD_0")]; + texCoordBufferView = &model.bufferViews[texCoordAccessor->bufferView]; + texCoordBuffer = &model.buffers[texCoordBufferView->buffer]; + } + + uint32_t baseVertex = static_cast(vertices.size()); + + for (size_t i = 0; i < posAccessor.count; i++) { + Vertex vertex{}; + + const float* pos = reinterpret_cast(&posBuffer.data[posBufferView.byteOffset + posAccessor.byteOffset + i * 12]); + // glTF uses a right-handed coordinate system with Y-up + // Vulkan uses a right-handed coordinate system with Y-down + // We need to flip the Y coordinate + vertex.pos = {pos[0], -pos[1], pos[2]}; + + if (hasTexCoords) { + const float* texCoord = reinterpret_cast(&texCoordBuffer->data[texCoordBufferView->byteOffset + texCoordAccessor->byteOffset + i * 8]); + vertex.texCoord = {texCoord[0], texCoord[1]}; + } else { + vertex.texCoord = {0.0f, 0.0f}; + } + + vertex.color = {1.0f, 1.0f, 1.0f}; + + vertices.push_back(vertex); + } + + const unsigned char* indexData = &indexBuffer.data[indexBufferView.byteOffset + indexAccessor.byteOffset]; + size_t indexCount = indexAccessor.count; + size_t indexStride = 0; + + // Determine index stride based on component type + if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { + indexStride = sizeof(uint16_t); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { + indexStride = sizeof(uint32_t); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { + indexStride = sizeof(uint8_t); + } else { + throw std::runtime_error("Unsupported index component type"); + } + + indices.reserve(indices.size() + indexCount); + + for (size_t i = 0; i < indexCount; i++) { + uint32_t index = 0; + + if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { + index = *reinterpret_cast(indexData + i * indexStride); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { + index = *reinterpret_cast(indexData + i * indexStride); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { + index = *reinterpret_cast(indexData + i * indexStride); + } + + indices.push_back(baseVertex + index); + } + } + } + } + + void createVertexBuffer() { + vk::DeviceSize bufferSize = sizeof(vertices[0]) * vertices.size(); + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* dataStaging = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(dataStaging, vertices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eVertexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, vertexBuffer, vertexBufferMemory); + + copyBuffer(stagingBuffer, vertexBuffer, bufferSize); + } + + void createIndexBuffer() { + vk::DeviceSize bufferSize = sizeof(indices[0]) * indices.size(); + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, indices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, indexBuffer, indexBufferMemory); + + copyBuffer(stagingBuffer, indexBuffer, bufferSize); + } + + void createUniformBuffers() { + uniformBuffers.clear(); + uniformBuffersMemory.clear(); + uniformBuffersMapped.clear(); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + vk::raii::Buffer buffer({}); + vk::raii::DeviceMemory bufferMem({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, buffer, bufferMem); + uniformBuffers.emplace_back(std::move(buffer)); + uniformBuffersMemory.emplace_back(std::move(bufferMem)); + uniformBuffersMapped.emplace_back( uniformBuffersMemory[i].mapMemory(0, bufferSize)); + } + } + + void createDescriptorPool() { + std::array poolSize { + vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, MAX_FRAMES_IN_FLIGHT), + vk::DescriptorPoolSize( vk::DescriptorType::eCombinedImageSampler, MAX_FRAMES_IN_FLIGHT) + }; + vk::DescriptorPoolCreateInfo poolInfo{ + .flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + .maxSets = MAX_FRAMES_IN_FLIGHT, + .poolSizeCount = static_cast(poolSize.size()), + .pPoolSizes = poolSize.data() + }; + descriptorPool = vk::raii::DescriptorPool(device, poolInfo); + } + + void createDescriptorSets() { + std::vector layouts(MAX_FRAMES_IN_FLIGHT, *descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = *descriptorPool, + .descriptorSetCount = static_cast(layouts.size()), + .pSetLayouts = layouts.data() + }; + + descriptorSets.clear(); + descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = *uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + vk::DescriptorImageInfo imageInfo{ + .sampler = *textureSampler, + .imageView = *textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + std::array descriptorWrites{ + vk::WriteDescriptorSet{ + .dstSet = *descriptorSets[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &bufferInfo + }, + vk::WriteDescriptorSet{ + .dstSet = *descriptorSets[i], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &imageInfo + } + }; + device.updateDescriptorSets(descriptorWrites, {}); + } + } + + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) { + vk::BufferCreateInfo bufferInfo{ + .size = size, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive + }; + buffer = vk::raii::Buffer(device, bufferInfo); + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + bufferMemory = vk::raii::DeviceMemory(device, allocInfo); + buffer.bindMemory(*bufferMemory, 0); + } + + std::unique_ptr beginSingleTimeCommands() { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + std::unique_ptr commandBuffer = std::make_unique(std::move(vk::raii::CommandBuffers(device, allocInfo).front())); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer->begin(beginInfo); + + return commandBuffer; + } + + void endSingleTimeCommands(const vk::raii::CommandBuffer& commandBuffer) const { + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandBuffer }; + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + void copyBuffer(vk::raii::Buffer & srcBuffer, vk::raii::Buffer & dstBuffer, vk::DeviceSize size) { + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = *commandPool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1 }; + vk::raii::CommandBuffer commandCopyBuffer = std::move(device.allocateCommandBuffers(allocInfo).front()); + commandCopyBuffer.begin(vk::CommandBufferBeginInfo{ .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); + commandCopyBuffer.copyBuffer(*srcBuffer, *dstBuffer, vk::BufferCopy{ .size = size }); + commandCopyBuffer.end(); + graphicsQueue.submit(vk::SubmitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandCopyBuffer }, nullptr); + graphicsQueue.waitIdle(); + } + + uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("failed to find suitable memory type!"); + } + + void createCommandBuffers() { + commandBuffers.clear(); + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = *commandPool, .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = MAX_FRAMES_IN_FLIGHT }; + commandBuffers = vk::raii::CommandBuffers(device, allocInfo); + } + + void recordCommandBuffer(uint32_t imageIndex) { + commandBuffers[currentFrame].begin({}); + transition_image_layout( + imageIndex, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eColorAttachmentOptimal, + {}, + vk::AccessFlagBits2::eColorAttachmentWrite, + vk::PipelineStageFlagBits2::eTopOfPipe, + vk::PipelineStageFlagBits2::eColorAttachmentOutput + ); + vk::ClearValue clearColor = vk::ClearColorValue(0.0f, 0.0f, 0.0f, 1.0f); + vk::RenderingAttachmentInfo attachmentInfo = { + .imageView = *swapChainImageViews[imageIndex], + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + vk::RenderingInfo renderingInfo = { + .renderArea = { .offset = { 0, 0 }, .extent = swapChainExtent }, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &attachmentInfo + }; + commandBuffers[currentFrame].beginRendering(renderingInfo); + commandBuffers[currentFrame].bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + commandBuffers[currentFrame].setViewport(0, vk::Viewport(0.0f, 0.0f, static_cast(swapChainExtent.width), static_cast(swapChainExtent.height), 0.0f, 1.0f)); + commandBuffers[currentFrame].setScissor(0, vk::Rect2D(vk::Offset2D(0, 0), swapChainExtent)); + commandBuffers[currentFrame].bindVertexBuffers(0, *vertexBuffer, {0}); + commandBuffers[currentFrame].bindIndexBuffer( *indexBuffer, 0, vk::IndexType::eUint32 ); + commandBuffers[currentFrame].bindDescriptorSets(vk::PipelineBindPoint::eGraphics, *pipelineLayout, 0, *descriptorSets[currentFrame], nullptr); + commandBuffers[currentFrame].drawIndexed(indices.size(), 1, 0, 0, 0); + commandBuffers[currentFrame].endRendering(); + transition_image_layout( + imageIndex, + vk::ImageLayout::eColorAttachmentOptimal, + vk::ImageLayout::ePresentSrcKHR, + vk::AccessFlagBits2::eColorAttachmentWrite, + {}, + vk::PipelineStageFlagBits2::eColorAttachmentOutput, + vk::PipelineStageFlagBits2::eBottomOfPipe + ); + commandBuffers[currentFrame].end(); + } + + void transition_image_layout( + uint32_t imageIndex, + vk::ImageLayout old_layout, + vk::ImageLayout new_layout, + vk::AccessFlags2 src_access_mask, + vk::AccessFlags2 dst_access_mask, + vk::PipelineStageFlags2 src_stage_mask, + vk::PipelineStageFlags2 dst_stage_mask + ) { + vk::ImageMemoryBarrier2 barrier = { + .srcStageMask = src_stage_mask, + .srcAccessMask = src_access_mask, + .dstStageMask = dst_stage_mask, + .dstAccessMask = dst_access_mask, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + vk::DependencyInfo dependency_info = { + .dependencyFlags = {}, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + commandBuffers[currentFrame].pipelineBarrier2(dependency_info); + } + + void createSyncObjects() { + presentCompleteSemaphore.clear(); + renderFinishedSemaphore.clear(); + inFlightFences.clear(); + + for (size_t i = 0; i < swapChainImages.size(); i++) { + presentCompleteSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + renderFinishedSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + } + + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + inFlightFences.emplace_back(device, vk::FenceCreateInfo{ .flags = vk::FenceCreateFlagBits::eSignaled }); + } + } + + void updateUniformBuffer(uint32_t currentImage) const { + static auto startTime = std::chrono::high_resolution_clock::now(); + + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + + UniformBufferObject ubo{}; + glm::mat4 initialRotation = glm::rotate(glm::mat4(1.0f), glm::radians(-90.0f), glm::vec3(1.0f, 0.0f, 0.0f)); + glm::mat4 continuousRotation = glm::rotate(glm::mat4(1.0f), time * glm::radians(90.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.model = continuousRotation * initialRotation; + ubo.view = lookAt(glm::vec3(2.0f, 2.0f, 2.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 0.0f, 1.0f)); + ubo.proj = glm::perspective(glm::radians(45.0f), static_cast(swapChainExtent.width) / static_cast(swapChainExtent.height), 0.1f, 10.0f); + ubo.proj[1][1] *= -1; + + memcpy(uniformBuffersMapped[currentImage], &ubo, sizeof(ubo)); + } + + void drawFrame() { + while ( vk::Result::eTimeout == device.waitForFences( *inFlightFences[currentFrame], vk::True, UINT64_MAX ) ) + ; + auto [result, imageIndex] = swapChain.acquireNextImage( UINT64_MAX, *presentCompleteSemaphore[currentFrame], nullptr ); + + if (result == vk::Result::eErrorOutOfDateKHR) { + recreateSwapChain(); + return; + } + if (result != vk::Result::eSuccess && result != vk::Result::eSuboptimalKHR) { + throw std::runtime_error("failed to acquire swap chain image!"); + } + updateUniformBuffer(currentFrame); + + device.resetFences( *inFlightFences[currentFrame] ); + commandBuffers[currentFrame].reset(); + recordCommandBuffer(imageIndex); + + vk::PipelineStageFlags waitDestinationStageMask( vk::PipelineStageFlagBits::eColorAttachmentOutput ); + const vk::SubmitInfo submitInfo{ .waitSemaphoreCount = 1, .pWaitSemaphores = &*presentCompleteSemaphore[currentFrame], + .pWaitDstStageMask = &waitDestinationStageMask, .commandBufferCount = 1, .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 1, .pSignalSemaphores = &*renderFinishedSemaphore[imageIndex] }; + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + + + const vk::PresentInfoKHR presentInfoKHR{ .waitSemaphoreCount = 1, .pWaitSemaphores = &*renderFinishedSemaphore[imageIndex], + .swapchainCount = 1, .pSwapchains = &*swapChain, .pImageIndices = &imageIndex }; + result = presentQueue.presentKHR(presentInfoKHR); + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("failed to present swap chain image!"); + } + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + + [[nodiscard]] vk::raii::ShaderModule createShaderModule(const std::vector& code) const { + vk::ShaderModuleCreateInfo createInfo{ .codeSize = code.size(), .pCode = reinterpret_cast(code.data()) }; + vk::raii::ShaderModule shaderModule{ device, createInfo }; + + return shaderModule; + } + + static vk::Format chooseSwapSurfaceFormat(const std::vector& availableFormats) { + return (availableFormats[0].format == vk::Format::eUndefined) ? vk::Format::eB8G8R8A8Unorm : availableFormats[0].format; + } + + static vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + return std::ranges::any_of(availablePresentModes, + [](const vk::PresentModeKHR value) { return vk::PresentModeKHR::eMailbox == value; } ) ? vk::PresentModeKHR::eMailbox : vk::PresentModeKHR::eFifo; + } + + vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } +#if PLATFORM_DESKTOP + int width, height; + glfwGetFramebufferSize(window, &width, &height); +#else + ANativeWindow* window = androidAppState.nativeWindow; + int width = ANativeWindow_getWidth(window); + int height = ANativeWindow_getHeight(window); +#endif + return { + std::clamp(width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width), + std::clamp(height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height) + }; + } + + [[nodiscard]] std::vector getRequiredExtensions() const { + std::vector extensions; + +#if PLATFORM_DESKTOP + // Get GLFW extensions + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + extensions.assign(glfwExtensions, glfwExtensions + glfwExtensionCount); +#else + // Android extensions + extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + extensions.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); +#endif + + // Add debug extensions if validation layers are enabled + if (enableValidationLayers) { + extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + } + + return extensions; + } + + [[nodiscard]] bool checkValidationLayerSupport() const { + return (std::ranges::any_of(context.enumerateInstanceLayerProperties(), + []( vk::LayerProperties const & lp ) { return ( strcmp( "VK_LAYER_KHRONOS_validation", lp.layerName ) == 0 ); } ) ); + } + + static VKAPI_ATTR vk::Bool32 VKAPI_CALL debugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT severity, vk::DebugUtilsMessageTypeFlagsEXT type, const vk::DebugUtilsMessengerCallbackDataEXT* pCallbackData, void*) { + if (severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eError || severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { + std::cerr << "validation layer: type " << to_string(type) << " msg: " << pCallbackData->pMessage << std::endl; + } + + return vk::False; + } + + std::vector readFile(const std::string& filename) { +#if PLATFORM_ANDROID + // Android asset loading + if (androidAppState.app == nullptr) { + LOGE("Android app not initialized"); + throw std::runtime_error("Android app not initialized"); + } + AAsset* asset = AAssetManager_open(androidAppState.app->activity->assetManager, filename.c_str(), AASSET_MODE_BUFFER); + if (!asset) { + throw std::runtime_error("failed to open file: " + filename); + } + + size_t size = AAsset_getLength(asset); + std::vector buffer(size); + AAsset_read(asset, buffer.data(), size); + AAsset_close(asset); +#else + // Desktop file loading + std::ifstream file(filename, std::ios::ate | std::ios::binary); + if (!file.is_open()) { + throw std::runtime_error("failed to open file: " + filename); + } + + size_t fileSize = static_cast(file.tellg()); + std::vector buffer(fileSize); + file.seekg(0); + file.read(buffer.data(), fileSize); + file.close(); +#endif + return buffer; + } +}; + +#if PLATFORM_ANDROID +void android_main(android_app* app) { + app_dummy(); + + VulkanApplication vulkanApp; + vulkanApp.run(app); +} +#else +int main() { + try { + VulkanApplication app; + app.run(); + } catch (const std::exception& e) { + LOGE("%s", e.what()); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} +#endif diff --git a/attachments/36_multiple_objects.cpp b/attachments/36_multiple_objects.cpp new file mode 100644 index 00000000..2cf9b8bb --- /dev/null +++ b/attachments/36_multiple_objects.cpp @@ -0,0 +1,1605 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include +#if defined(__ANDROID__) +#include +#include +#endif +#include + +#if defined(__ANDROID__) + #define PLATFORM_ANDROID 1 +#else + #define PLATFORM_DESKTOP 1 +#endif + +// Include tinygltf instead of tinyobjloader +// TINYGLTF_IMPLEMENTATION is already defined in the command line +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include + +// Include KTX library for texture loading +#include + +#if PLATFORM_ANDROID + #include + #include + #include + #include + + // Declare and implement app_dummy function from native_app_glue + extern "C" void app_dummy() { + // This is a dummy function that does nothing + // It's used to prevent the linker from stripping out the native_app_glue code + } + + // Define AAssetManager type for Android + typedef AAssetManager AssetManagerType; + + #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "VulkanTutorial", __VA_ARGS__)) + #define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "VulkanTutorial", __VA_ARGS__)) + #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "VulkanTutorial", __VA_ARGS__)) +#else + // Define AAssetManager type for non-Android platforms + typedef void AssetManagerType; + // Desktop-specific includes + #define GLFW_INCLUDE_VULKAN + #include + + // Define logging macros for Desktop + #define LOGI(...) printf(__VA_ARGS__); printf("\n") + #define LOGW(...) printf(__VA_ARGS__); printf("\n") + #define LOGE(...) fprintf(stderr, __VA_ARGS__); fprintf(stderr, "\n") +#endif + +#define GLM_FORCE_RADIANS +#define GLM_FORCE_DEPTH_ZERO_TO_ONE +#define GLM_ENABLE_EXPERIMENTAL +#define GLM_FORCE_CXX11 +#include +#include +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +// Update paths to use glTF model and KTX2 texture +const std::string MODEL_PATH = "models/viking_room.glb"; +const std::string TEXTURE_PATH = "textures/viking_room.ktx2"; +constexpr int MAX_FRAMES_IN_FLIGHT = 2; +// Define the number of objects to render +constexpr int MAX_OBJECTS = 3; + +// Define VpProfileProperties structure for Android only +#if PLATFORM_ANDROID +#ifndef VP_PROFILE_PROPERTIES_DEFINED +#define VP_PROFILE_PROPERTIES_DEFINED +struct VpProfileProperties { + char name[256]; + uint32_t specVersion; +}; +#endif +#endif + +// Define Vulkan Profile constants +#ifndef VP_KHR_ROADMAP_2022_NAME +#define VP_KHR_ROADMAP_2022_NAME "VP_KHR_roadmap_2022" +#endif + +#ifndef VP_KHR_ROADMAP_2022_SPEC_VERSION +#define VP_KHR_ROADMAP_2022_SPEC_VERSION 1 +#endif + +struct AppInfo { + bool profileSupported = false; + VpProfileProperties profile; +}; + +#if PLATFORM_ANDROID +void android_main(android_app* app); + +struct AndroidAppState { + ANativeWindow* nativeWindow = nullptr; + bool initialized = false; + android_app* app = nullptr; +}; +#endif + +#ifdef NDEBUG +constexpr bool enableValidationLayers = false; +#else +constexpr bool enableValidationLayers = true; +#endif + +struct Vertex { + glm::vec3 pos; + glm::vec3 color; + glm::vec2 texCoord; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Vertex), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, pos) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32Sfloat, offsetof(Vertex, color) ), + vk::VertexInputAttributeDescription( 2, 0, vk::Format::eR32G32Sfloat, offsetof(Vertex, texCoord) ) + }; + } + + bool operator==(const Vertex& other) const { + return pos == other.pos && color == other.color && texCoord == other.texCoord; + } +}; + +template<> struct std::hash { + size_t operator()(Vertex const& vertex) const noexcept { + return ((hash()(vertex.pos) ^ (hash()(vertex.color) << 1)) >> 1) ^ (hash()(vertex.texCoord) << 1); + } +}; + +// Define a structure to hold per-object data +struct GameObject { + // Transform properties + glm::vec3 position = {0.0f, 0.0f, 0.0f}; + glm::vec3 rotation = {0.0f, 0.0f, 0.0f}; + glm::vec3 scale = {1.0f, 1.0f, 1.0f}; + + // Uniform buffer for this object (one per frame in flight) + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + + // Descriptor sets for this object (one per frame in flight) + std::vector descriptorSets; + + // Calculate model matrix based on position, rotation, and scale + glm::mat4 getModelMatrix() const { + glm::mat4 model = glm::mat4(1.0f); + model = glm::translate(model, position); + model = glm::rotate(model, rotation.x, glm::vec3(1.0f, 0.0f, 0.0f)); + model = glm::rotate(model, rotation.y, glm::vec3(0.0f, 1.0f, 0.0f)); + model = glm::rotate(model, rotation.z, glm::vec3(0.0f, 0.0f, 1.0f)); + model = glm::scale(model, scale); + return model; + } +}; + +struct UniformBufferObject { + alignas(16) glm::mat4 model; + alignas(16) glm::mat4 view; + alignas(16) glm::mat4 proj; +}; + +class VulkanApplication { +public: +#if PLATFORM_ANDROID + void cleanupAndroid() { + // Clean up resources in each GameObject + for (auto& gameObject : gameObjects) { + // Unmap memory + for (size_t i = 0; i < gameObject.uniformBuffersMemory.size(); i++) { + if (gameObject.uniformBuffersMapped[i] != nullptr) { + gameObject.uniformBuffersMemory[i].unmapMemory(); + } + } + + // Clear vectors to release resources + gameObject.uniformBuffers.clear(); + gameObject.uniformBuffersMemory.clear(); + gameObject.uniformBuffersMapped.clear(); + gameObject.descriptorSets.clear(); + } + } + + void run(android_app* app) { + androidAppState.nativeWindow = app->window; + androidAppState.app = app; + app->userData = &androidAppState; + app->onAppCmd = handleAppCommand; + // Note: onInputEvent is no longer a member of android_app in the current NDK version + // Input events are now handled differently + + int events; + android_poll_source* source; + + while (app->destroyRequested == 0) { + while (ALooper_pollOnce(androidAppState.initialized ? 0 : -1, nullptr, &events, (void**)&source) >= 0) { + if (source != nullptr) { + source->process(app, source); + } + } + + if (androidAppState.initialized && androidAppState.nativeWindow != nullptr) { + drawFrame(); + } + } + + if (androidAppState.initialized) { + device.waitIdle(); + cleanupAndroid(); + } + } +#else + void run() { + initWindow(); + initVulkan(); + mainLoop(); + cleanup(); + } +#endif + +private: +#if PLATFORM_ANDROID + AndroidAppState androidAppState; + + static void handleAppCommand(android_app* app, int32_t cmd) { + auto* appState = static_cast(app->userData); + + switch (cmd) { + case APP_CMD_INIT_WINDOW: + if (app->window != nullptr) { + appState->nativeWindow = app->window; + // We can't cast AndroidAppState to VulkanApplication directly + // Instead, we need to access the VulkanApplication instance through a global variable + // or another mechanism. For now, we'll just set the initialized flag. + appState->initialized = true; + } + break; + case APP_CMD_TERM_WINDOW: + appState->nativeWindow = nullptr; + break; + default: + break; + } + } + + static int32_t handleInputEvent(android_app* app, AInputEvent* event) { + if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) { + float x = AMotionEvent_getX(event, 0); + float y = AMotionEvent_getY(event, 0); + + LOGI("Touch at: %f, %f", x, y); + + return 1; + } + return 0; + } +#else + GLFWwindow* window = nullptr; +#endif + + AppInfo appInfo = {}; + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::DebugUtilsMessengerEXT debugMessenger = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::raii::Device device = nullptr; + + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::Format swapChainImageFormat = vk::Format::eUndefined; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + + vk::raii::DescriptorSetLayout descriptorSetLayout = nullptr; + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + + vk::raii::Image depthImage = nullptr; + vk::raii::DeviceMemory depthImageMemory = nullptr; + vk::raii::ImageView depthImageView = nullptr; + + vk::raii::Image textureImage = nullptr; + vk::raii::DeviceMemory textureImageMemory = nullptr; + vk::raii::ImageView textureImageView = nullptr; + vk::raii::Sampler textureSampler = nullptr; + vk::Format textureImageFormat = vk::Format::eUndefined; + + std::vector vertices; + std::vector indices; + vk::raii::Buffer vertexBuffer = nullptr; + vk::raii::DeviceMemory vertexBufferMemory = nullptr; + vk::raii::Buffer indexBuffer = nullptr; + vk::raii::DeviceMemory indexBufferMemory = nullptr; + + // Array of game objects to render + std::array gameObjects; + + vk::raii::DescriptorPool descriptorPool = nullptr; + + vk::raii::CommandPool commandPool = nullptr; + std::vector commandBuffers; + uint32_t graphicsIndex = 0; + + std::vector presentCompleteSemaphore; + std::vector renderFinishedSemaphore; + std::vector inFlightFences; + uint32_t currentFrame = 0; + + bool framebufferResized = false; + + std::vector requiredDeviceExtension = { + vk::KHRSwapchainExtensionName, + vk::KHRCreateRenderpass2ExtensionName + }; + +#if PLATFORM_DESKTOP + void initWindow() { + glfwInit(); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + glfwSetFramebufferSizeCallback(window, framebufferResizeCallback); + } + + static void framebufferResizeCallback(GLFWwindow* window, int width, int height) { + auto app = static_cast(glfwGetWindowUserPointer(window)); + app->framebufferResized = true; + } +#endif + +public: + void initVulkan() { + createInstance(); + setupDebugMessenger(); + createSurface(); + pickPhysicalDevice(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + createDescriptorSetLayout(); + createGraphicsPipeline(); + createCommandPool(); + createDepthResources(); + createTextureImage(); + createTextureImageView(); + createTextureSampler(); + loadModel(); + createVertexBuffer(); + createIndexBuffer(); + setupGameObjects(); + createUniformBuffers(); + createDescriptorPool(); + createDescriptorSets(); + createCommandBuffers(); + createSyncObjects(); + } + +private: + +#if PLATFORM_DESKTOP + void mainLoop() { + while (!glfwWindowShouldClose(window)) { + glfwPollEvents(); + drawFrame(); + } + + device.waitIdle(); + } +#endif + + void cleanupSwapChain() { + swapChainImageViews.clear(); + } + +#if PLATFORM_DESKTOP + void cleanup() { + // Clean up resources in each GameObject + for (auto& gameObject : gameObjects) { + // Unmap memory + for (size_t i = 0; i < gameObject.uniformBuffersMemory.size(); i++) { + if (gameObject.uniformBuffersMapped[i] != nullptr) { + gameObject.uniformBuffersMemory[i].unmapMemory(); + } + } + + // Clear vectors to release resources + gameObject.uniformBuffers.clear(); + gameObject.uniformBuffersMemory.clear(); + gameObject.uniformBuffersMapped.clear(); + gameObject.descriptorSets.clear(); + } + + // Clean up GLFW resources + glfwDestroyWindow(window); + glfwTerminate(); + } +#endif + + void recreateSwapChain() { +#if PLATFORM_DESKTOP + int width = 0, height = 0; + glfwGetFramebufferSize(window, &width, &height); + while (width == 0 || height == 0) { + glfwGetFramebufferSize(window, &width, &height); + glfwWaitEvents(); + } +#endif + + device.waitIdle(); + + cleanupSwapChain(); + createSwapChain(); + createImageViews(); + createDepthResources(); + } + + void createInstance() { + constexpr vk::ApplicationInfo appInfo{ + .pApplicationName = "Hello Triangle", + .applicationVersion = VK_MAKE_VERSION(1, 0, 0), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION(1, 0, 0), + .apiVersion = VK_API_VERSION_1_3 + }; + + auto extensions = getRequiredExtensions(); + + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() + }; + + instance = vk::raii::Instance(context, createInfo); + LOGI("Vulkan instance created"); + } + + void setupDebugMessenger() { + // Debug messenger setup is disabled for now to avoid compatibility issues + // This is a simplified approach to get the code compiling + if (!enableValidationLayers) return; + + LOGI("Debug messenger setup skipped for compatibility"); + } + + void createSurface() { +#if PLATFORM_DESKTOP + VkSurfaceKHR _surface; + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != VK_SUCCESS) { + throw std::runtime_error("failed to create window surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); +#else + VkSurfaceKHR _surface; + VkAndroidSurfaceCreateInfoKHR createInfo{ + .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + .window = androidAppState.nativeWindow + }; + if (vkCreateAndroidSurfaceKHR(*instance, &createInfo, nullptr, &_surface) != VK_SUCCESS) { + throw std::runtime_error("failed to create Android surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); +#endif + } + + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&](auto const& device) { + // Check if the device supports the Vulkan 1.3 API version + bool supportsVulkan1_3 = device.getProperties().apiVersion >= VK_API_VERSION_1_3; + + // Check if any of the queue families support graphics operations + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of(queueFamilies, [](auto const& qfp) { return !!(qfp.queueFlags & vk::QueueFlagBits::eGraphics); }); + + // Check if all required device extensions are available + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of(requiredDeviceExtension, + [&availableDeviceExtensions](auto const& requiredDeviceExtension) { + return std::ranges::any_of(availableDeviceExtensions, + [requiredDeviceExtension](auto const& availableDeviceExtension) { + return strcmp(availableDeviceExtension.extensionName, requiredDeviceExtension) == 0; + }); + }); + + auto features = device.template getFeatures2(); + bool supportsRequiredFeatures = features.template get().dynamicRendering && + features.template get().extendedDynamicState; + + return supportsVulkan1_3 && supportsGraphics && supportsAllRequiredExtensions && supportsRequiredFeatures; + }); + + if (devIter != devices.end()) { + physicalDevice = *devIter; + + // Check for Vulkan profile support + VpProfileProperties profileProperties; +#if PLATFORM_ANDROID + strcpy(profileProperties.name, VP_KHR_ROADMAP_2022_NAME); +#else + strcpy(profileProperties.profileName, VP_KHR_ROADMAP_2022_NAME); +#endif + profileProperties.specVersion = VP_KHR_ROADMAP_2022_SPEC_VERSION; + + VkBool32 supported = VK_FALSE; + bool result = false; + +#if PLATFORM_ANDROID + // Create a vp::ProfileDesc from our VpProfileProperties + vp::ProfileDesc profileDesc = { + profileProperties.name, + profileProperties.specVersion + }; + + // Use vp::GetProfileSupport for Android + result = vp::GetProfileSupport( + *physicalDevice, // Pass the physical device directly + &profileDesc, // Pass the profile description + &supported // Output parameter for support status + ); +#else + // Use vpGetPhysicalDeviceProfileSupport for Desktop + VkResult vk_result = vpGetPhysicalDeviceProfileSupport( + *instance, + *physicalDevice, + &profileProperties, + &supported + ); + result = vk_result == static_cast(vk::Result::eSuccess); +#endif + const char* name = nullptr; +#ifdef PLATFORM_ANDROID + name = profileProperties.name; +#else + name = profileProperties.profileName; +#endif + + if (result && supported == VK_TRUE) { + appInfo.profileSupported = true; + appInfo.profile = profileProperties; + LOGI("Device supports Vulkan profile: %s", name); + } else { + LOGI("Device does not support Vulkan profile: %s", name); + } + } else { + throw std::runtime_error("failed to find a suitable GPU!"); + } + } + + void createLogicalDevice() { + // find the index of the first queue family that supports graphics + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + + // get the first index into queueFamilyProperties which supports graphics + auto graphicsQueueFamilyProperty = std::ranges::find_if( queueFamilyProperties, []( auto const & qfp ) + { return (qfp.queueFlags & vk::QueueFlagBits::eGraphics) != static_cast(0); } ); + + graphicsIndex = static_cast( std::distance( queueFamilyProperties.begin(), graphicsQueueFamilyProperty ) ); + + // determine a queueFamilyIndex that supports present + // first check if the graphicsIndex is good enough + auto presentIndex = physicalDevice.getSurfaceSupportKHR( graphicsIndex, *surface ) + ? graphicsIndex + : ~0; + if ( presentIndex == queueFamilyProperties.size() ) + { + // the graphicsIndex doesn't support present -> look for another family index that supports both + // graphics and present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( ( queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics ) && + physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + graphicsIndex = static_cast( i ); + presentIndex = graphicsIndex; + break; + } + } + if ( presentIndex == queueFamilyProperties.size() ) + { + // there's nothing like a single family index that supports both graphics and present -> look for another + // family index that supports present + for ( size_t i = 0; i < queueFamilyProperties.size(); i++ ) + { + if ( physicalDevice.getSurfaceSupportKHR( static_cast( i ), *surface ) ) + { + presentIndex = static_cast( i ); + break; + } + } + } + } + if ( ( graphicsIndex == queueFamilyProperties.size() ) || ( presentIndex == queueFamilyProperties.size() ) ) + { + throw std::runtime_error( "Could not find a queue for graphics or present -> terminating" ); + } + + // query for Vulkan 1.3 features + auto features = physicalDevice.getFeatures2(); + vk::PhysicalDeviceVulkan13Features vulkan13Features; + vk::PhysicalDeviceExtendedDynamicStateFeaturesEXT extendedDynamicStateFeatures; + vulkan13Features.dynamicRendering = vk::True; + vulkan13Features.synchronization2 = vk::True; + extendedDynamicStateFeatures.extendedDynamicState = vk::True; + vulkan13Features.pNext = &extendedDynamicStateFeatures; + features.pNext = &vulkan13Features; + // create a Device + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo { .queueFamilyIndex = graphicsIndex, .queueCount = 1, .pQueuePriorities = &queuePriority }; + vk::DeviceCreateInfo deviceCreateInfo{ + .pNext = &features, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &deviceQueueCreateInfo, + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data() + }; + + // Create the device with the appropriate features + device = vk::raii::Device(physicalDevice, deviceCreateInfo); + + graphicsQueue = vk::raii::Queue(device, graphicsIndex, 0); + presentQueue = vk::raii::Queue(device, presentIndex, 0); + } + + void createSwapChain() { + auto surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR(*surface); + swapChainImageFormat = chooseSwapSurfaceFormat(physicalDevice.getSurfaceFormatsKHR(*surface)); + swapChainExtent = chooseSwapExtent(surfaceCapabilities); + auto minImageCount = std::max(3u, surfaceCapabilities.minImageCount); + minImageCount = (surfaceCapabilities.maxImageCount > 0 && minImageCount > surfaceCapabilities.maxImageCount) ? surfaceCapabilities.maxImageCount : minImageCount; + vk::SwapchainCreateInfoKHR swapChainCreateInfo{ + .surface = *surface, .minImageCount = minImageCount, + .imageFormat = swapChainImageFormat, .imageColorSpace = vk::ColorSpaceKHR::eSrgbNonlinear, + .imageExtent = swapChainExtent, .imageArrayLayers =1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment, .imageSharingMode = vk::SharingMode::eExclusive, + .preTransform = surfaceCapabilities.currentTransform, .compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque, + .presentMode = chooseSwapPresentMode(physicalDevice.getSurfacePresentModesKHR(*surface)), + .clipped = true }; + + swapChain = vk::raii::SwapchainKHR(device, swapChainCreateInfo); + swapChainImages = swapChain.getImages(); + } + + void createImageViews() { + vk::ImageViewCreateInfo imageViewCreateInfo{ + .viewType = vk::ImageViewType::e2D, + .format = swapChainImageFormat, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + for ( auto image : swapChainImages ) + { + imageViewCreateInfo.image = image; + swapChainImageViews.emplace_back( device, imageViewCreateInfo ); + } + } + + void createDescriptorSetLayout() { + std::array bindings = { + vk::DescriptorSetLayoutBinding( 0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eVertex, nullptr), + vk::DescriptorSetLayoutBinding( 1, vk::DescriptorType::eCombinedImageSampler, 1, vk::ShaderStageFlagBits::eFragment, nullptr) + }; + + vk::DescriptorSetLayoutCreateInfo layoutInfo{ .bindingCount = static_cast(bindings.size()), .pBindings = bindings.data() }; + descriptorSetLayout = vk::raii::DescriptorSetLayout(device, layoutInfo); + } + + void createGraphicsPipeline() { + vk::raii::ShaderModule shaderModule = createShaderModule(this->readFile("shaders/slang.spv")); + + vk::PipelineShaderStageCreateInfo vertShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eVertex, .module = *shaderModule, .pName = "vertMain" }; + vk::PipelineShaderStageCreateInfo fragShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eFragment, .module = *shaderModule, .pName = "fragMain" }; + vk::PipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo, fragShaderStageInfo}; + + auto bindingDescription = Vertex::getBindingDescription(); + auto attributeDescriptions = Vertex::getAttributeDescriptions(); + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ + .vertexBindingDescriptionCount = 1, + .pVertexBindingDescriptions = &bindingDescription, + .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), + .pVertexAttributeDescriptions = attributeDescriptions.data() + }; + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ + .topology = vk::PrimitiveTopology::eTriangleList, + .primitiveRestartEnable = vk::False + }; + vk::PipelineViewportStateCreateInfo viewportState{ + .viewportCount = 1, + .scissorCount = 1 + }; + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = vk::False, + .rasterizerDiscardEnable = vk::False, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, // Re-enabled culling for better performance + .frontFace = vk::FrontFace::eClockwise, // Keeping Clockwise for glTF + .depthBiasEnable = vk::False + }; + rasterizer.lineWidth = 1.0f; + vk::PipelineMultisampleStateCreateInfo multisampling{ + .rasterizationSamples = vk::SampleCountFlagBits::e1, + .sampleShadingEnable = vk::False + }; + vk::PipelineDepthStencilStateCreateInfo depthStencil{ + .depthTestEnable = vk::True, + .depthWriteEnable = vk::True, + .depthCompareOp = vk::CompareOp::eLess, + .depthBoundsTestEnable = vk::False, + .stencilTestEnable = vk::False + }; + vk::PipelineColorBlendAttachmentState colorBlendAttachment; + colorBlendAttachment.colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA; + colorBlendAttachment.blendEnable = vk::False; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ + .logicOpEnable = vk::False, + .logicOp = vk::LogicOp::eCopy, + .attachmentCount = 1, + .pAttachments = &colorBlendAttachment + }; + + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + vk::PipelineDynamicStateCreateInfo dynamicState{ .dynamicStateCount = static_cast(dynamicStates.size()), .pDynamicStates = dynamicStates.data() }; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ .setLayoutCount = 1, .pSetLayouts = &*descriptorSetLayout, .pushConstantRangeCount = 0 }; + + pipelineLayout = vk::raii::PipelineLayout(device, pipelineLayoutInfo); + + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo{ .colorAttachmentCount = 1, .pColorAttachmentFormats = &swapChainImageFormat }; + vk::GraphicsPipelineCreateInfo pipelineInfo{ .pNext = &pipelineRenderingCreateInfo, + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pDepthStencilState = &depthStencil, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = *pipelineLayout, + .renderPass = nullptr + }; + + graphicsPipeline = vk::raii::Pipeline(device, nullptr, pipelineInfo); + } + + void createCommandPool() { + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = graphicsIndex + }; + commandPool = vk::raii::CommandPool(device, poolInfo); + } + + void createDepthResources() { + vk::Format depthFormat = findDepthFormat(); + + createImage(swapChainExtent.width, swapChainExtent.height, depthFormat, vk::ImageTiling::eOptimal, vk::ImageUsageFlagBits::eDepthStencilAttachment, vk::MemoryPropertyFlagBits::eDeviceLocal, depthImage, depthImageMemory); + depthImageView = createImageView(depthImage, depthFormat, vk::ImageAspectFlagBits::eDepth); + } + + vk::Format findSupportedFormat(const std::vector& candidates, vk::ImageTiling tiling, vk::FormatFeatureFlags features) const { + for (const auto format : candidates) { + vk::FormatProperties props = physicalDevice.getFormatProperties(format); + + if (tiling == vk::ImageTiling::eLinear && (props.linearTilingFeatures & features) == features) { + return format; + } + if (tiling == vk::ImageTiling::eOptimal && (props.optimalTilingFeatures & features) == features) { + return format; + } + } + + throw std::runtime_error("failed to find supported format!"); + } + + [[nodiscard]] vk::Format findDepthFormat() const { + return findSupportedFormat( + {vk::Format::eD32Sfloat, vk::Format::eD32SfloatS8Uint, vk::Format::eD24UnormS8Uint}, + vk::ImageTiling::eOptimal, + vk::FormatFeatureFlagBits::eDepthStencilAttachment + ); + } + + static bool hasStencilComponent(vk::Format format) { + return format == vk::Format::eD32SfloatS8Uint || format == vk::Format::eD24UnormS8Uint; + } + + void createTextureImage() { + // Load KTX2 texture instead of using stb_image + ktxTexture* kTexture; + KTX_error_code result = ktxTexture_CreateFromNamedFile( + TEXTURE_PATH.c_str(), + KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, + &kTexture); + + if (result != KTX_SUCCESS) { + throw std::runtime_error("failed to load ktx texture image!"); + } + + // Get texture dimensions and data + uint32_t texWidth = kTexture->baseWidth; + uint32_t texHeight = kTexture->baseHeight; + ktx_size_t imageSize = ktxTexture_GetImageSize(kTexture, 0); + ktx_uint8_t* ktxTextureData = ktxTexture_GetData(kTexture); + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, ktxTextureData, imageSize); + stagingBufferMemory.unmapMemory(); + + // Determine the Vulkan format from KTX format + vk::Format textureFormat; + + // Check if the KTX texture has a format + if (kTexture->classId == ktxTexture2_c) { + // For KTX2 files, we can get the format directly + auto* ktx2 = reinterpret_cast(kTexture); + textureFormat = static_cast(ktx2->vkFormat); + if (textureFormat == vk::Format::eUndefined) { + // If the format is undefined, fall back to a reasonable default + textureFormat = vk::Format::eR8G8B8A8Unorm; + } + } else { + // For KTX1 files or if we can't determine the format, use a reasonable default + textureFormat = vk::Format::eR8G8B8A8Unorm; + } + + textureImageFormat = textureFormat; + + createImage(texWidth, texHeight, textureFormat, vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + vk::MemoryPropertyFlagBits::eDeviceLocal, textureImage, textureImageMemory); + + transitionImageLayout(textureImage, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal); + copyBufferToImage(stagingBuffer, textureImage, texWidth, texHeight); + transitionImageLayout(textureImage, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal); + + ktxTexture_Destroy(kTexture); + } + + void createTextureImageView() { + textureImageView = createImageView(textureImage, textureImageFormat, vk::ImageAspectFlagBits::eColor); + } + + void createTextureSampler() { + vk::PhysicalDeviceProperties properties = physicalDevice.getProperties(); + vk::SamplerCreateInfo samplerInfo{ + .magFilter = vk::Filter::eLinear, + .minFilter = vk::Filter::eLinear, + .mipmapMode = vk::SamplerMipmapMode::eLinear, + .addressModeU = vk::SamplerAddressMode::eRepeat, + .addressModeV = vk::SamplerAddressMode::eRepeat, + .addressModeW = vk::SamplerAddressMode::eRepeat, + .mipLodBias = 0.0f, + .anisotropyEnable = vk::True, + .maxAnisotropy = properties.limits.maxSamplerAnisotropy, + .compareEnable = vk::False, + .compareOp = vk::CompareOp::eAlways + }; + textureSampler = vk::raii::Sampler(device, samplerInfo); + } + + vk::raii::ImageView createImageView(vk::raii::Image& image, vk::Format format, vk::ImageAspectFlags aspectFlags) { + vk::ImageViewCreateInfo viewInfo{ + .image = *image, + .viewType = vk::ImageViewType::e2D, + .format = format, + .subresourceRange = { aspectFlags, 0, 1, 0, 1 } + }; + return vk::raii::ImageView(device, viewInfo); + } + + void createImage(uint32_t width, uint32_t height, vk::Format format, vk::ImageTiling tiling, vk::ImageUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Image& image, vk::raii::DeviceMemory& imageMemory) { + vk::ImageCreateInfo imageInfo{ + .imageType = vk::ImageType::e2D, + .format = format, + .extent = {width, height, 1}, + .mipLevels = 1, + .arrayLayers = 1, + .samples = vk::SampleCountFlagBits::e1, + .tiling = tiling, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive, + .initialLayout = vk::ImageLayout::eUndefined + }; + image = vk::raii::Image(device, imageInfo); + + vk::MemoryRequirements memRequirements = image.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + imageMemory = vk::raii::DeviceMemory(device, allocInfo); + image.bindMemory(*imageMemory, 0); + } + + void transitionImageLayout(const vk::raii::Image& image, vk::ImageLayout oldLayout, vk::ImageLayout newLayout) { + auto commandBuffer = beginSingleTimeCommands(); + + vk::ImageMemoryBarrier barrier{ + .oldLayout = oldLayout, + .newLayout = newLayout, + .image = *image, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + + vk::PipelineStageFlags sourceStage; + vk::PipelineStageFlags destinationStage; + + if (oldLayout == vk::ImageLayout::eUndefined && newLayout == vk::ImageLayout::eTransferDstOptimal) { + barrier.srcAccessMask = {}; + barrier.dstAccessMask = vk::AccessFlagBits::eTransferWrite; + + sourceStage = vk::PipelineStageFlagBits::eTopOfPipe; + destinationStage = vk::PipelineStageFlagBits::eTransfer; + } else if (oldLayout == vk::ImageLayout::eTransferDstOptimal && newLayout == vk::ImageLayout::eShaderReadOnlyOptimal) { + barrier.srcAccessMask = vk::AccessFlagBits::eTransferWrite; + barrier.dstAccessMask = vk::AccessFlagBits::eShaderRead; + + sourceStage = vk::PipelineStageFlagBits::eTransfer; + destinationStage = vk::PipelineStageFlagBits::eFragmentShader; + } else { + throw std::invalid_argument("unsupported layout transition!"); + } + commandBuffer->pipelineBarrier( sourceStage, destinationStage, {}, {}, nullptr, barrier ); + endSingleTimeCommands(*commandBuffer); + } + + void copyBufferToImage(const vk::raii::Buffer& buffer, vk::raii::Image& image, uint32_t width, uint32_t height) { + std::unique_ptr commandBuffer = beginSingleTimeCommands(); + vk::BufferImageCopy region{ + .bufferOffset = 0, + .bufferRowLength = 0, + .bufferImageHeight = 0, + .imageSubresource = { vk::ImageAspectFlagBits::eColor, 0, 0, 1 }, + .imageOffset = {0, 0, 0}, + .imageExtent = {width, height, 1} + }; + commandBuffer->copyBufferToImage(*buffer, *image, vk::ImageLayout::eTransferDstOptimal, {region}); + endSingleTimeCommands(*commandBuffer); + } + + void loadModel() { + // Use tinygltf to load the model instead of tinyobjloader + tinygltf::Model model; + tinygltf::TinyGLTF loader; + std::string err; + std::string warn; + + bool ret = loader.LoadBinaryFromFile(&model, &err, &warn, MODEL_PATH); + + if (!warn.empty()) { + std::cout << "glTF warning: " << warn << std::endl; + } + + if (!err.empty()) { + std::cout << "glTF error: " << err << std::endl; + } + + if (!ret) { + throw std::runtime_error("Failed to load glTF model"); + } + + vertices.clear(); + indices.clear(); + + // Process all meshes in the model + for (const auto& mesh : model.meshes) { + for (const auto& primitive : mesh.primitives) { + // Get indices + const tinygltf::Accessor& indexAccessor = model.accessors[primitive.indices]; + const tinygltf::BufferView& indexBufferView = model.bufferViews[indexAccessor.bufferView]; + const tinygltf::Buffer& indexBuffer = model.buffers[indexBufferView.buffer]; + + // Get vertex positions + const tinygltf::Accessor& posAccessor = model.accessors[primitive.attributes.at("POSITION")]; + const tinygltf::BufferView& posBufferView = model.bufferViews[posAccessor.bufferView]; + const tinygltf::Buffer& posBuffer = model.buffers[posBufferView.buffer]; + + // Get texture coordinates if available + bool hasTexCoords = primitive.attributes.find("TEXCOORD_0") != primitive.attributes.end(); + const tinygltf::Accessor* texCoordAccessor = nullptr; + const tinygltf::BufferView* texCoordBufferView = nullptr; + const tinygltf::Buffer* texCoordBuffer = nullptr; + + if (hasTexCoords) { + texCoordAccessor = &model.accessors[primitive.attributes.at("TEXCOORD_0")]; + texCoordBufferView = &model.bufferViews[texCoordAccessor->bufferView]; + texCoordBuffer = &model.buffers[texCoordBufferView->buffer]; + } + + uint32_t baseVertex = static_cast(vertices.size()); + + for (size_t i = 0; i < posAccessor.count; i++) { + Vertex vertex{}; + + const float* pos = reinterpret_cast(&posBuffer.data[posBufferView.byteOffset + posAccessor.byteOffset + i * 12]); + // glTF uses a right-handed coordinate system with Y-up + // Vulkan uses a right-handed coordinate system with Y-down + // We need to flip the Y coordinate + vertex.pos = {pos[0], -pos[1], pos[2]}; + + if (hasTexCoords) { + const float* texCoord = reinterpret_cast(&texCoordBuffer->data[texCoordBufferView->byteOffset + texCoordAccessor->byteOffset + i * 8]); + vertex.texCoord = {texCoord[0], texCoord[1]}; + } else { + vertex.texCoord = {0.0f, 0.0f}; + } + + vertex.color = {1.0f, 1.0f, 1.0f}; + + vertices.push_back(vertex); + } + + const unsigned char* indexData = &indexBuffer.data[indexBufferView.byteOffset + indexAccessor.byteOffset]; + size_t indexCount = indexAccessor.count; + size_t indexStride = 0; + + // Determine index stride based on component type + if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { + indexStride = sizeof(uint16_t); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { + indexStride = sizeof(uint32_t); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { + indexStride = sizeof(uint8_t); + } else { + throw std::runtime_error("Unsupported index component type"); + } + + indices.reserve(indices.size() + indexCount); + + for (size_t i = 0; i < indexCount; i++) { + uint32_t index = 0; + + if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { + index = *reinterpret_cast(indexData + i * indexStride); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { + index = *reinterpret_cast(indexData + i * indexStride); + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { + index = *reinterpret_cast(indexData + i * indexStride); + } + + indices.push_back(baseVertex + index); + } + } + } + } + + void createVertexBuffer() { + vk::DeviceSize bufferSize = sizeof(vertices[0]) * vertices.size(); + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* dataStaging = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(dataStaging, vertices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eVertexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, vertexBuffer, vertexBufferMemory); + + copyBuffer(stagingBuffer, vertexBuffer, bufferSize); + } + + void createIndexBuffer() { + vk::DeviceSize bufferSize = sizeof(indices[0]) * indices.size(); + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* data = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(data, indices.data(), bufferSize); + stagingBufferMemory.unmapMemory(); + + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferDst | vk::BufferUsageFlagBits::eIndexBuffer, vk::MemoryPropertyFlagBits::eDeviceLocal, indexBuffer, indexBufferMemory); + + copyBuffer(stagingBuffer, indexBuffer, bufferSize); + } + + // Initialize the game objects with different positions, rotations, and scales + void setupGameObjects() { + // Object 1 - Center + gameObjects[0].position = {0.0f, 0.0f, 0.0f}; + gameObjects[0].rotation = {0.0f, 0.0f, 0.0f}; + gameObjects[0].scale = {1.0f, 1.0f, 1.0f}; + + // Object 2 - Left + gameObjects[1].position = {-2.0f, 0.0f, -1.0f}; + gameObjects[1].rotation = {0.0f, glm::radians(45.0f), 0.0f}; + gameObjects[1].scale = {0.75f, 0.75f, 0.75f}; + + // Object 3 - Right + gameObjects[2].position = {2.0f, 0.0f, -1.0f}; + gameObjects[2].rotation = {0.0f, glm::radians(-45.0f), 0.0f}; + gameObjects[2].scale = {0.75f, 0.75f, 0.75f}; + } + + // Create uniform buffers for each object + void createUniformBuffers() { + // For each game object + for (auto& gameObject : gameObjects) { + gameObject.uniformBuffers.clear(); + gameObject.uniformBuffersMemory.clear(); + gameObject.uniformBuffersMapped.clear(); + + // Create uniform buffers for each frame in flight + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + vk::raii::Buffer buffer({}); + vk::raii::DeviceMemory bufferMem({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, buffer, bufferMem); + gameObject.uniformBuffers.emplace_back(std::move(buffer)); + gameObject.uniformBuffersMemory.emplace_back(std::move(bufferMem)); + gameObject.uniformBuffersMapped.emplace_back(gameObject.uniformBuffersMemory[i].mapMemory(0, bufferSize)); + } + } + } + + void createDescriptorPool() { + // We need MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT descriptor sets + std::array poolSize { + vk::DescriptorPoolSize(vk::DescriptorType::eUniformBuffer, MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT), + vk::DescriptorPoolSize(vk::DescriptorType::eCombinedImageSampler, MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT) + }; + vk::DescriptorPoolCreateInfo poolInfo{ + .flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + .maxSets = MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT, + .poolSizeCount = static_cast(poolSize.size()), + .pPoolSizes = poolSize.data() + }; + descriptorPool = vk::raii::DescriptorPool(device, poolInfo); + } + + void createDescriptorSets() { + // For each game object + for (auto& gameObject : gameObjects) { + // Create descriptor sets for each frame in flight + std::vector layouts(MAX_FRAMES_IN_FLIGHT, *descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = *descriptorPool, + .descriptorSetCount = static_cast(layouts.size()), + .pSetLayouts = layouts.data() + }; + + gameObject.descriptorSets.clear(); + gameObject.descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = *gameObject.uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + vk::DescriptorImageInfo imageInfo{ + .sampler = *textureSampler, + .imageView = *textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + std::array descriptorWrites{ + vk::WriteDescriptorSet{ + .dstSet = *gameObject.descriptorSets[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &bufferInfo + }, + vk::WriteDescriptorSet{ + .dstSet = *gameObject.descriptorSets[i], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &imageInfo + } + }; + device.updateDescriptorSets(descriptorWrites, {}); + } + } + } + + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) { + vk::BufferCreateInfo bufferInfo{ + .size = size, + .usage = usage, + .sharingMode = vk::SharingMode::eExclusive + }; + buffer = vk::raii::Buffer(device, bufferInfo); + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{ + .allocationSize = memRequirements.size, + .memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties) + }; + bufferMemory = vk::raii::DeviceMemory(device, allocInfo); + buffer.bindMemory(*bufferMemory, 0); + } + + std::unique_ptr beginSingleTimeCommands() { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPool, + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = 1 + }; + std::unique_ptr commandBuffer = std::make_unique(std::move(vk::raii::CommandBuffers(device, allocInfo).front())); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + commandBuffer->begin(beginInfo); + + return commandBuffer; + } + + void endSingleTimeCommands(const vk::raii::CommandBuffer& commandBuffer) const { + commandBuffer.end(); + + vk::SubmitInfo submitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandBuffer }; + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + void copyBuffer(vk::raii::Buffer & srcBuffer, vk::raii::Buffer & dstBuffer, vk::DeviceSize size) { + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = *commandPool, .level = vk::CommandBufferLevel::ePrimary, .commandBufferCount = 1 }; + vk::raii::CommandBuffer commandCopyBuffer = std::move(device.allocateCommandBuffers(allocInfo).front()); + commandCopyBuffer.begin(vk::CommandBufferBeginInfo{ .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit }); + commandCopyBuffer.copyBuffer(*srcBuffer, *dstBuffer, vk::BufferCopy{ .size = size }); + commandCopyBuffer.end(); + graphicsQueue.submit(vk::SubmitInfo{ .commandBufferCount = 1, .pCommandBuffers = &*commandCopyBuffer }, nullptr); + graphicsQueue.waitIdle(); + } + + uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("failed to find suitable memory type!"); + } + + void createCommandBuffers() { + commandBuffers.clear(); + vk::CommandBufferAllocateInfo allocInfo{ .commandPool = *commandPool, .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = MAX_FRAMES_IN_FLIGHT }; + commandBuffers = vk::raii::CommandBuffers(device, allocInfo); + } + + void recordCommandBuffer(uint32_t imageIndex) { + commandBuffers[currentFrame].begin({}); + transition_image_layout( + imageIndex, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eColorAttachmentOptimal, + {}, + vk::AccessFlagBits2::eColorAttachmentWrite, + vk::PipelineStageFlagBits2::eTopOfPipe, + vk::PipelineStageFlagBits2::eColorAttachmentOutput + ); + vk::ClearValue clearColor = vk::ClearColorValue(0.0f, 0.0f, 0.0f, 1.0f); + vk::RenderingAttachmentInfo attachmentInfo = { + .imageView = *swapChainImageViews[imageIndex], + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + vk::RenderingInfo renderingInfo = { + .renderArea = { .offset = { 0, 0 }, .extent = swapChainExtent }, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &attachmentInfo + }; + commandBuffers[currentFrame].beginRendering(renderingInfo); + commandBuffers[currentFrame].bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + commandBuffers[currentFrame].setViewport(0, vk::Viewport(0.0f, 0.0f, static_cast(swapChainExtent.width), static_cast(swapChainExtent.height), 0.0f, 1.0f)); + commandBuffers[currentFrame].setScissor(0, vk::Rect2D(vk::Offset2D(0, 0), swapChainExtent)); + + // Bind vertex and index buffers (shared by all objects) + commandBuffers[currentFrame].bindVertexBuffers(0, *vertexBuffer, {0}); + commandBuffers[currentFrame].bindIndexBuffer(*indexBuffer, 0, vk::IndexType::eUint32); + + // Draw each object with its own descriptor set + for (const auto& gameObject : gameObjects) { + // Bind the descriptor set for this object + commandBuffers[currentFrame].bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, + *pipelineLayout, + 0, + *gameObject.descriptorSets[currentFrame], + nullptr + ); + + // Draw the object + commandBuffers[currentFrame].drawIndexed(indices.size(), 1, 0, 0, 0); + } + + commandBuffers[currentFrame].endRendering(); + transition_image_layout( + imageIndex, + vk::ImageLayout::eColorAttachmentOptimal, + vk::ImageLayout::ePresentSrcKHR, + vk::AccessFlagBits2::eColorAttachmentWrite, + {}, + vk::PipelineStageFlagBits2::eColorAttachmentOutput, + vk::PipelineStageFlagBits2::eBottomOfPipe + ); + commandBuffers[currentFrame].end(); + } + + void transition_image_layout( + uint32_t imageIndex, + vk::ImageLayout old_layout, + vk::ImageLayout new_layout, + vk::AccessFlags2 src_access_mask, + vk::AccessFlags2 dst_access_mask, + vk::PipelineStageFlags2 src_stage_mask, + vk::PipelineStageFlags2 dst_stage_mask + ) { + vk::ImageMemoryBarrier2 barrier = { + .srcStageMask = src_stage_mask, + .srcAccessMask = src_access_mask, + .dstStageMask = dst_stage_mask, + .dstAccessMask = dst_access_mask, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + vk::DependencyInfo dependency_info = { + .dependencyFlags = {}, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + commandBuffers[currentFrame].pipelineBarrier2(dependency_info); + } + + void createSyncObjects() { + presentCompleteSemaphore.clear(); + renderFinishedSemaphore.clear(); + inFlightFences.clear(); + + for (size_t i = 0; i < swapChainImages.size(); i++) { + presentCompleteSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + renderFinishedSemaphore.emplace_back(device, vk::SemaphoreCreateInfo()); + } + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + inFlightFences.emplace_back(device, vk::FenceCreateInfo{ .flags = vk::FenceCreateFlagBits::eSignaled }); + } + } + + void updateUniformBuffers() { + static auto startTime = std::chrono::high_resolution_clock::now(); + static auto lastFrameTime = startTime; + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + float deltaTime = std::chrono::duration(currentTime - lastFrameTime).count(); + lastFrameTime = currentTime; + + // Camera and projection matrices (shared by all objects) + glm::mat4 view = glm::lookAt(glm::vec3(2.0f, 2.0f, 6.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); + glm::mat4 proj = glm::perspective(glm::radians(45.0f), static_cast(swapChainExtent.width) / static_cast(swapChainExtent.height), 0.1f, 20.0f); + // Update uniform buffers for each object + for (auto& gameObject : gameObjects) { + // Apply continuous rotation to the object based on frame time + const float rotationSpeed = 0.5f; // Rotation speed in radians per second + gameObject.rotation.y += rotationSpeed * deltaTime; // Slow rotation around Y axis scaled by frame time + + // Get the model matrix for this object + glm::mat4 model = gameObject.getModelMatrix(); + + // Create and update the UBO + UniformBufferObject ubo{ + .model = model, + .view = view, + .proj = proj + }; + + // Copy the UBO data to the mapped memory + memcpy(gameObject.uniformBuffersMapped[currentFrame], &ubo, sizeof(ubo)); + } + } + + void drawFrame() { + while (vk::Result::eTimeout == device.waitForFences(*inFlightFences[currentFrame], vk::True, UINT64_MAX)); + auto [result, imageIndex] = swapChain.acquireNextImage(UINT64_MAX, *presentCompleteSemaphore[currentFrame], nullptr); + + if (result == vk::Result::eErrorOutOfDateKHR) { + recreateSwapChain(); + return; + } + if (result != vk::Result::eSuccess && result != vk::Result::eSuboptimalKHR) { + throw std::runtime_error("failed to acquire swap chain image!"); + } + + // Update uniform buffers for all objects + updateUniformBuffers(); + + device.resetFences(*inFlightFences[currentFrame]); + commandBuffers[currentFrame].reset(); + recordCommandBuffer(imageIndex); + + vk::PipelineStageFlags waitDestinationStageMask(vk::PipelineStageFlagBits::eColorAttachmentOutput); + const vk::SubmitInfo submitInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*presentCompleteSemaphore[currentFrame], + .pWaitDstStageMask = &waitDestinationStageMask, + .commandBufferCount = 1, + .pCommandBuffers = &*commandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*renderFinishedSemaphore[imageIndex] + }; + graphicsQueue.submit(submitInfo, *inFlightFences[currentFrame]); + + const vk::PresentInfoKHR presentInfoKHR{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*renderFinishedSemaphore[imageIndex], + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + result = presentQueue.presentKHR(presentInfoKHR); + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("failed to present swap chain image!"); + } + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + + [[nodiscard]] vk::raii::ShaderModule createShaderModule(const std::vector& code) const { + vk::ShaderModuleCreateInfo createInfo{ .codeSize = code.size(), .pCode = reinterpret_cast(code.data()) }; + vk::raii::ShaderModule shaderModule{ device, createInfo }; + + return shaderModule; + } + + static vk::Format chooseSwapSurfaceFormat(const std::vector& availableFormats) { + return (availableFormats[0].format == vk::Format::eUndefined) ? vk::Format::eB8G8R8A8Unorm : availableFormats[0].format; + } + + static vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + return std::ranges::any_of(availablePresentModes, + [](const vk::PresentModeKHR value) { return vk::PresentModeKHR::eMailbox == value; } ) ? vk::PresentModeKHR::eMailbox : vk::PresentModeKHR::eFifo; + } + + vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } +#if PLATFORM_DESKTOP + int width, height; + glfwGetFramebufferSize(window, &width, &height); +#else + ANativeWindow* window = androidAppState.nativeWindow; + int width = ANativeWindow_getWidth(window); + int height = ANativeWindow_getHeight(window); +#endif + return { + std::clamp(width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width), + std::clamp(height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height) + }; + } + + [[nodiscard]] std::vector getRequiredExtensions() const { + std::vector extensions; + +#if PLATFORM_DESKTOP + // Get GLFW extensions + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + extensions.assign(glfwExtensions, glfwExtensions + glfwExtensionCount); +#else + // Android extensions + extensions.push_back(VK_KHR_SURFACE_EXTENSION_NAME); + extensions.push_back(VK_KHR_ANDROID_SURFACE_EXTENSION_NAME); +#endif + + // Add debug extensions if validation layers are enabled + if (enableValidationLayers) { + extensions.push_back(VK_EXT_DEBUG_UTILS_EXTENSION_NAME); + } + + return extensions; + } + + [[nodiscard]] bool checkValidationLayerSupport() const { + return (std::ranges::any_of(context.enumerateInstanceLayerProperties(), + []( vk::LayerProperties const & lp ) { return ( strcmp( "VK_LAYER_KHRONOS_validation", lp.layerName ) == 0 ); } ) ); + } + + static VKAPI_ATTR vk::Bool32 VKAPI_CALL debugCallback(vk::DebugUtilsMessageSeverityFlagBitsEXT severity, vk::DebugUtilsMessageTypeFlagsEXT type, const vk::DebugUtilsMessengerCallbackDataEXT* pCallbackData, void*) { + if (severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eError || severity == vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { + std::cerr << "validation layer: type " << to_string(type) << " msg: " << pCallbackData->pMessage << std::endl; + } + + return vk::False; + } + + std::vector readFile(const std::string& filename) { +#if PLATFORM_ANDROID + // Android asset loading + if (androidAppState.app == nullptr) { + LOGE("Android app not initialized"); + throw std::runtime_error("Android app not initialized"); + } + AAsset* asset = AAssetManager_open(androidAppState.app->activity->assetManager, filename.c_str(), AASSET_MODE_BUFFER); + if (!asset) { + throw std::runtime_error("failed to open file: " + filename); + } + + size_t size = AAsset_getLength(asset); + std::vector buffer(size); + AAsset_read(asset, buffer.data(), size); + AAsset_close(asset); +#else + // Desktop file loading + std::ifstream file(filename, std::ios::ate | std::ios::binary); + if (!file.is_open()) { + throw std::runtime_error("failed to open file: " + filename); + } + + size_t fileSize = static_cast(file.tellg()); + std::vector buffer(fileSize); + file.seekg(0); + file.read(buffer.data(), fileSize); + file.close(); +#endif + return buffer; + } +}; + +#if PLATFORM_ANDROID +void android_main(android_app* app) { + app_dummy(); + + VulkanApplication vulkanApp; + vulkanApp.run(app); +} +#else +int main() { + try { + VulkanApplication app; + app.run(); + } catch (const std::exception& e) { + LOGE("%s", e.what()); + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} +#endif diff --git a/attachments/37_multithreading.cpp b/attachments/37_multithreading.cpp new file mode 100644 index 00000000..e658961d --- /dev/null +++ b/attachments/37_multithreading.cpp @@ -0,0 +1,1234 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +import vulkan_hpp; +#include + +#define GLFW_INCLUDE_VULKAN // REQUIRED only for GLFW CreateWindowSurface. +#include + +#define GLM_FORCE_RADIANS +#include +#include + +constexpr uint32_t WIDTH = 800; +constexpr uint32_t HEIGHT = 600; +constexpr uint64_t FenceTimeout = 100000000; +constexpr uint32_t PARTICLE_COUNT = 8192; + +constexpr int MAX_FRAMES_IN_FLIGHT = 2; + + +struct UniformBufferObject { + float deltaTime = 1.0f; +}; + +struct Particle { + glm::vec2 position; + glm::vec2 velocity; + glm::vec4 color; + + static vk::VertexInputBindingDescription getBindingDescription() { + return { 0, sizeof(Particle), vk::VertexInputRate::eVertex }; + } + + static std::array getAttributeDescriptions() { + return { + vk::VertexInputAttributeDescription( 0, 0, vk::Format::eR32G32Sfloat, offsetof(Particle, position) ), + vk::VertexInputAttributeDescription( 1, 0, vk::Format::eR32G32B32A32Sfloat, offsetof(Particle, color) ), + }; + } +}; + +// Simple logging function +template +void log(Args&&... args) { + // Only log in debug builds +#ifdef _DEBUG + (std::cout << ... << std::forward(args)) << std::endl; +#endif +} + +class ThreadSafeResourceManager { +private: + std::mutex resourceMutex; + std::vector commandPools; + std::vector commandBuffers; + +public: + void createThreadCommandPools(vk::raii::Device& device, uint32_t queueFamilyIndex, uint32_t threadCount) { + std::lock_guard lock(resourceMutex); + + commandBuffers.clear(); + commandPools.clear(); + + for (uint32_t i = 0; i < threadCount; i++) { + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = queueFamilyIndex + }; + try { + commandPools.emplace_back(device, poolInfo); + } catch (const std::exception&) { + throw; // Re-throw the exception to be caught by the caller + } + } + } + + vk::raii::CommandPool& getCommandPool(uint32_t threadIndex) { + std::lock_guard lock(resourceMutex); + return commandPools[threadIndex]; + } + + void allocateCommandBuffers(vk::raii::Device& device, uint32_t threadCount, uint32_t buffersPerThread) { + std::lock_guard lock(resourceMutex); + + commandBuffers.clear(); + + if (commandPools.size() < threadCount) { + throw std::runtime_error("Not enough command pools for thread count"); + } + + for (uint32_t i = 0; i < threadCount; i++) { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPools[i], + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = buffersPerThread + }; + try { + auto threadBuffers = device.allocateCommandBuffers(allocInfo); + for (auto& buffer : threadBuffers) { + commandBuffers.emplace_back(std::move(buffer)); + } + } catch (const std::exception&) { + throw; // Re-throw the exception to be caught by the caller + } + } + } + + vk::raii::CommandBuffer& getCommandBuffer(uint32_t index) { + // No need for mutex here as each thread accesses its own command buffer + if (index >= commandBuffers.size()) { + throw std::runtime_error("Command buffer index out of range: " + std::to_string(index) + + " (available: " + std::to_string(commandBuffers.size()) + ")"); + } + return commandBuffers[index]; + } +}; + +class MultithreadedApplication { +public: + void run() { + initWindow(); + initVulkan(); + initThreads(); + mainLoop(); + cleanup(); + } + +private: + GLFWwindow* window = nullptr; + + vk::raii::Context context; + vk::raii::Instance instance = nullptr; + vk::raii::SurfaceKHR surface = nullptr; + + vk::raii::PhysicalDevice physicalDevice = nullptr; + vk::raii::Device device = nullptr; + + vk::raii::Queue graphicsQueue = nullptr; + vk::raii::Queue computeQueue = nullptr; + vk::raii::Queue presentQueue = nullptr; + + vk::raii::SwapchainKHR swapChain = nullptr; + std::vector swapChainImages; + vk::SurfaceFormatKHR swapChainImageFormat; + vk::Extent2D swapChainExtent; + std::vector swapChainImageViews; + + vk::raii::PipelineLayout pipelineLayout = nullptr; + vk::raii::Pipeline graphicsPipeline = nullptr; + + vk::raii::DescriptorSetLayout computeDescriptorSetLayout = nullptr; + vk::raii::PipelineLayout computePipelineLayout = nullptr; + vk::raii::Pipeline computePipeline = nullptr; + + std::vector shaderStorageBuffers; + std::vector shaderStorageBuffersMemory; + + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + + vk::raii::DescriptorPool descriptorPool = nullptr; + std::vector computeDescriptorSets; + + vk::raii::CommandPool commandPool = nullptr; + std::vector graphicsCommandBuffers; + uint32_t graphicsQueueFamilyIndex = 0; + + vk::raii::Semaphore timelineSemaphore = nullptr; + uint64_t timelineValue = 0; + std::vector imageAvailableSemaphores; + std::vector inFlightFences; + uint32_t currentFrame = 0; + + double lastFrameTime = 0.0; + + // Removed resize-related variables and FSM state management as per simplification request + + double lastTime = 0.0f; + + uint32_t threadCount = 0; + std::vector workerThreads; + std::atomic shouldExit{false}; + std::vector> threadWorkReady; + std::vector> threadWorkDone; + + std::mutex queueSubmitMutex; + std::mutex workCompleteMutex; + std::condition_variable workCompleteCv; + + ThreadSafeResourceManager resourceManager; + struct ParticleGroup { + uint32_t startIndex; + uint32_t count; + }; + std::vector particleGroups; + + std::vector requiredDeviceExtension = { + vk::KHRSwapchainExtensionName, + vk::KHRSpirv14ExtensionName, + vk::KHRSynchronization2ExtensionName, + vk::KHRCreateRenderpass2ExtensionName + }; + + // Helper functions + [[nodiscard]] static std::vector getRequiredExtensions() { + uint32_t glfwExtensionCount = 0; + auto glfwExtensions = glfwGetRequiredInstanceExtensions(&glfwExtensionCount); + std::vector extensions(glfwExtensions, glfwExtensions + glfwExtensionCount); + return extensions; + } + static vk::SurfaceFormatKHR chooseSwapSurfaceFormat(const std::vector& availableFormats) { + for (const auto& availableFormat : availableFormats) { + if (availableFormat.format == vk::Format::eB8G8R8A8Srgb && availableFormat.colorSpace == vk::ColorSpaceKHR::eSrgbNonlinear) { + return availableFormat; + } + } + + return availableFormats[0]; + } + static vk::PresentModeKHR chooseSwapPresentMode(const std::vector& availablePresentModes) { + for (const auto& availablePresentMode : availablePresentModes) { + if (availablePresentMode == vk::PresentModeKHR::eMailbox) { + return availablePresentMode; + } + } + return vk::PresentModeKHR::eFifo; + } + [[nodiscard]] vk::Extent2D chooseSwapExtent(const vk::SurfaceCapabilitiesKHR& capabilities) const { + if (capabilities.currentExtent.width != std::numeric_limits::max()) { + return capabilities.currentExtent; + } + int width, height; + glfwGetFramebufferSize(window, &width, &height); + + return { + std::clamp(width, capabilities.minImageExtent.width, capabilities.maxImageExtent.width), + std::clamp(height, capabilities.minImageExtent.height, capabilities.maxImageExtent.height) + }; + } + [[nodiscard]] vk::raii::ShaderModule createShaderModule(const std::vector& code) const { + vk::ShaderModuleCreateInfo createInfo{ .codeSize = code.size(), .pCode = reinterpret_cast(code.data()) }; + vk::raii::ShaderModule shaderModule{ device, createInfo }; + + return shaderModule; + } + static std::vector readFile(const std::string& filename) { + std::ifstream file(filename, std::ios::ate | std::ios::binary); + + if (!file.is_open()) { + throw std::runtime_error("failed to open file!"); + } + std::vector buffer(file.tellg()); + file.seekg(0, std::ios::beg); + file.read(buffer.data(), static_cast(buffer.size())); + file.close(); + + return buffer; + } + + void initWindow() { + glfwInit(); + + glfwWindowHint(GLFW_CLIENT_API, GLFW_NO_API); + glfwWindowHint(GLFW_RESIZABLE, GLFW_FALSE); + + window = glfwCreateWindow(WIDTH, HEIGHT, "Vulkan Multithreading", nullptr, nullptr); + glfwSetWindowUserPointer(window, this); + + lastTime = glfwGetTime(); + } + + void initVulkan() { + createInstance(); + createSurface(); + pickPhysicalDevice(); + createLogicalDevice(); + createSwapChain(); + createImageViews(); + createComputeDescriptorSetLayout(); + createGraphicsPipeline(); + createComputePipeline(); + createCommandPool(); + createShaderStorageBuffers(); + createUniformBuffers(); + createDescriptorPool(); + createComputeDescriptorSets(); + createGraphicsCommandBuffers(); + createSyncObjects(); + } + + void initThreads() { + // Increase thread count for better parallelism + threadCount = 8u; + log("Initializing ", threadCount, " threads for sequential execution"); + + threadWorkReady = std::vector>(threadCount); + threadWorkDone = std::vector>(threadCount); + + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkReady[i] = false; + threadWorkDone[i] = true; + } + + initThreadResources(); + + const uint32_t particlesPerThread = PARTICLE_COUNT / threadCount; + particleGroups.resize(threadCount); + + for (uint32_t i = 0; i < threadCount; i++) { + particleGroups[i].startIndex = i * particlesPerThread; + particleGroups[i].count = (i == threadCount - 1) ? + (PARTICLE_COUNT - i * particlesPerThread) : particlesPerThread; + log("Thread ", i, " will process particles ", + particleGroups[i].startIndex, " to ", + (particleGroups[i].startIndex + particleGroups[i].count - 1), + " (count: ", particleGroups[i].count, ")"); + } + + for (uint32_t i = 0; i < threadCount; i++) { + workerThreads.emplace_back(&MultithreadedApplication::workerThreadFunc, this, i); + log("Started worker thread ", i); + } + } + + void workerThreadFunc(uint32_t threadIndex) { + while (!shouldExit) { + // Wait for work using condition variable + { + std::unique_lock lock(workCompleteMutex); + workCompleteCv.wait(lock, [this, threadIndex]() { + return shouldExit || threadWorkReady[threadIndex].load(std::memory_order_acquire); + }); + + if (shouldExit) { + break; + } + + if (!threadWorkReady[threadIndex].load(std::memory_order_acquire)) { + continue; + } + } + + const ParticleGroup& group = particleGroups[threadIndex]; + bool workCompleted = false; + + try { + // Get command buffer and record commands + vk::raii::CommandBuffer* cmdBuffer = &resourceManager.getCommandBuffer(threadIndex); + recordComputeCommandBuffer(*cmdBuffer, group.startIndex, group.count); + workCompleted = true; + } catch (const std::exception&) { + workCompleted = false; + } + + // Mark work as done + threadWorkDone[threadIndex].store(true, std::memory_order_release); + threadWorkReady[threadIndex].store(false, std::memory_order_release); + + // If this is not the last thread, signal the next thread to start + if (threadIndex < threadCount - 1) { + threadWorkReady[threadIndex + 1].store(true, std::memory_order_release); + } + + // Notify main thread and other threads + { + std::lock_guard lock(workCompleteMutex); + workCompleteCv.notify_all(); + } + } + } + + void mainLoop() { + const double targetFrameTime = 1.0 / 60.0; + + while (!glfwWindowShouldClose(window)) { + double frameStartTime = glfwGetTime(); + + glfwPollEvents(); + drawFrame(); + + double currentTime = glfwGetTime(); + lastFrameTime = (currentTime - lastTime) * 1000.0; + lastTime = currentTime; + + double frameTime = currentTime - frameStartTime; + + if (frameTime < targetFrameTime) { + double sleepTime = targetFrameTime - frameTime; + std::this_thread::sleep_for(std::chrono::duration(sleepTime)); + } + } + + device.waitIdle(); + } + + void cleanupSwapChain() { + swapChainImageViews.clear(); + graphicsPipeline = nullptr; + pipelineLayout = nullptr; + computePipeline = nullptr; + computePipelineLayout = nullptr; + computeDescriptorSets.clear(); + computeDescriptorSetLayout = nullptr; + descriptorPool = nullptr; + + // Unmap and clean up uniform buffers + for (size_t i = 0; i < uniformBuffersMapped.size(); i++) { + uniformBuffersMemory[i].unmapMemory(); + } + uniformBuffers.clear(); + uniformBuffersMemory.clear(); + uniformBuffersMapped.clear(); + + // Clean up shader storage buffers + shaderStorageBuffers.clear(); + shaderStorageBuffersMemory.clear(); + } + + void stopThreads() { + shouldExit.store(true, std::memory_order_release); + + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkDone[i].store(true, std::memory_order_release); + threadWorkReady[i].store(false, std::memory_order_release); + } + + // Notify all threads in case they're waiting on the condition variable + { + std::lock_guard lock(workCompleteMutex); + workCompleteCv.notify_all(); + } + + for (auto& thread : workerThreads) { + if (thread.joinable()) { + thread.join(); + } + } + + workerThreads.clear(); + } + + void initThreadResources() { + resourceManager.createThreadCommandPools(device, graphicsQueueFamilyIndex, threadCount); + resourceManager.allocateCommandBuffers(device, threadCount, 1); + } + + void cleanup() { + stopThreads(); + + glfwDestroyWindow(window); + glfwTerminate(); + } + + + void createInstance() { + constexpr vk::ApplicationInfo appInfo{ .pApplicationName = "Vulkan Multithreading", + .applicationVersion = VK_MAKE_VERSION( 1, 0, 0 ), + .pEngineName = "No Engine", + .engineVersion = VK_MAKE_VERSION( 1, 0, 0 ), + .apiVersion = vk::ApiVersion14 }; + auto extensions = getRequiredExtensions(); + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledLayerCount = 0, + .ppEnabledLayerNames = nullptr, + .enabledExtensionCount = static_cast(extensions.size()), + .ppEnabledExtensionNames = extensions.data() }; + instance = vk::raii::Instance(context, createInfo); + } + + + void createSurface() { + VkSurfaceKHR _surface; + if (glfwCreateWindowSurface(*instance, window, nullptr, &_surface) != 0) { + throw std::runtime_error("failed to create window surface!"); + } + surface = vk::raii::SurfaceKHR(instance, _surface); + } + + void pickPhysicalDevice() { + std::vector devices = instance.enumeratePhysicalDevices(); + const auto devIter = std::ranges::find_if( + devices, + [&](auto const & device) + { + bool supportsVulkan1_3 = device.getProperties().apiVersion >= VK_API_VERSION_1_3; + + auto queueFamilies = device.getQueueFamilyProperties(); + bool supportsGraphics = + std::ranges::any_of(queueFamilies, [](auto const & qfp) { return !!(qfp.queueFlags & vk::QueueFlagBits::eGraphics); }); + + auto availableDeviceExtensions = device.enumerateDeviceExtensionProperties(); + bool supportsAllRequiredExtensions = + std::ranges::all_of(requiredDeviceExtension, + [&availableDeviceExtensions](auto const & requiredDeviceExtension) + { + return std::ranges::any_of(availableDeviceExtensions, + [requiredDeviceExtension](auto const & availableDeviceExtension) + { return strcmp(availableDeviceExtension.extensionName, requiredDeviceExtension) == 0; }); + }); + + auto features = device.template getFeatures2(); + bool supportsRequiredFeatures = features.template get().dynamicRendering && + features.template get().extendedDynamicState; + + return supportsVulkan1_3 && supportsGraphics && supportsAllRequiredExtensions && supportsRequiredFeatures; + }); + if (devIter != devices.end()) + { + physicalDevice = *devIter; + } + else + { + throw std::runtime_error("failed to find a suitable GPU!"); + } + } + + void createLogicalDevice() { + std::vector queueFamilyProperties = physicalDevice.getQueueFamilyProperties(); + + auto graphicsAndComputeQueueFamilyProperty = std::ranges::find_if(queueFamilyProperties, [](auto const & qfp) + { return (qfp.queueFlags & vk::QueueFlagBits::eGraphics && qfp.queueFlags & vk::QueueFlagBits::eCompute); }); + + graphicsQueueFamilyIndex = static_cast(std::distance(queueFamilyProperties.begin(), graphicsAndComputeQueueFamilyProperty)); + + auto presentIndex = physicalDevice.getSurfaceSupportKHR(graphicsQueueFamilyIndex, surface) + ? graphicsQueueFamilyIndex + : ~0; + if (presentIndex == queueFamilyProperties.size()) + { + for (size_t i = 0; i < queueFamilyProperties.size(); i++) + { + if (((queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eGraphics && queueFamilyProperties[i].queueFlags & vk::QueueFlagBits::eCompute)) && + physicalDevice.getSurfaceSupportKHR(static_cast(i), surface)) + { + graphicsQueueFamilyIndex = static_cast(i); + presentIndex = graphicsQueueFamilyIndex; + break; + } + } + if (presentIndex == queueFamilyProperties.size()) + { + for (size_t i = 0; i < queueFamilyProperties.size(); i++) + { + if (physicalDevice.getSurfaceSupportKHR(static_cast(i), surface)) + { + presentIndex = static_cast(i); + break; + } + } + } + } + if ((graphicsQueueFamilyIndex == queueFamilyProperties.size()) || (presentIndex == queueFamilyProperties.size())) + { + throw std::runtime_error("Could not find a queue for graphics or present -> terminating"); + } + + auto features = physicalDevice.getFeatures2(); + features.features.samplerAnisotropy = vk::True; + vk::PhysicalDeviceVulkan13Features vulkan13Features; + vk::PhysicalDeviceExtendedDynamicStateFeaturesEXT extendedDynamicStateFeatures; + vk::PhysicalDeviceTimelineSemaphoreFeaturesKHR timelineSemaphoreFeatures; + timelineSemaphoreFeatures.timelineSemaphore = vk::True; + vulkan13Features.dynamicRendering = vk::True; + vulkan13Features.synchronization2 = vk::True; + extendedDynamicStateFeatures.extendedDynamicState = vk::True; + extendedDynamicStateFeatures.pNext = &timelineSemaphoreFeatures; + vulkan13Features.pNext = &extendedDynamicStateFeatures; + features.pNext = &vulkan13Features; + + float queuePriority = 0.0f; + vk::DeviceQueueCreateInfo deviceQueueCreateInfo{.queueFamilyIndex = graphicsQueueFamilyIndex, .queueCount = 1, .pQueuePriorities = &queuePriority}; + vk::DeviceCreateInfo deviceCreateInfo{ + .pNext = &features, + .queueCreateInfoCount = 1, + .pQueueCreateInfos = &deviceQueueCreateInfo, + .enabledExtensionCount = static_cast(requiredDeviceExtension.size()), + .ppEnabledExtensionNames = requiredDeviceExtension.data() + }; + + device = vk::raii::Device(physicalDevice, deviceCreateInfo); + graphicsQueue = vk::raii::Queue(device, graphicsQueueFamilyIndex, 0); + computeQueue = vk::raii::Queue(device, graphicsQueueFamilyIndex, 0); + presentQueue = vk::raii::Queue(device, presentIndex, 0); + } + + void createSwapChain() { + auto surfaceCapabilities = physicalDevice.getSurfaceCapabilitiesKHR(surface); + swapChainImageFormat = chooseSwapSurfaceFormat(physicalDevice.getSurfaceFormatsKHR(surface)); + swapChainExtent = chooseSwapExtent(surfaceCapabilities); + auto minImageCount = std::max(3u, surfaceCapabilities.minImageCount); + minImageCount = (surfaceCapabilities.maxImageCount > 0 && minImageCount > surfaceCapabilities.maxImageCount) ? surfaceCapabilities.maxImageCount : minImageCount; + + vk::raii::SwapchainKHR oldSwapChain = std::move(swapChain); + + vk::SwapchainCreateInfoKHR swapChainCreateInfo{ + .flags = vk::SwapchainCreateFlagsKHR(), + .surface = surface, .minImageCount = minImageCount, + .imageFormat = swapChainImageFormat.format, .imageColorSpace = swapChainImageFormat.colorSpace, + .imageExtent = swapChainExtent, .imageArrayLayers =1, + .imageUsage = vk::ImageUsageFlagBits::eColorAttachment, .imageSharingMode = vk::SharingMode::eExclusive, + .preTransform = surfaceCapabilities.currentTransform, .compositeAlpha = vk::CompositeAlphaFlagBitsKHR::eOpaque, + .presentMode = chooseSwapPresentMode(physicalDevice.getSurfacePresentModesKHR(surface)), + .clipped = true, + .oldSwapchain = *oldSwapChain ? *oldSwapChain : nullptr }; + + swapChain = vk::raii::SwapchainKHR(device, swapChainCreateInfo); + oldSwapChain = nullptr; + swapChainImages = swapChain.getImages(); + } + + void createImageViews() { + vk::ImageViewCreateInfo imageViewCreateInfo{ + .viewType = vk::ImageViewType::e2D, + .format = swapChainImageFormat.format, + .components = {vk::ComponentSwizzle::eIdentity, vk::ComponentSwizzle::eIdentity, vk::ComponentSwizzle::eIdentity, vk::ComponentSwizzle::eIdentity}, + .subresourceRange = { vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1 } + }; + for ( auto image : swapChainImages ) + { + imageViewCreateInfo.image = image; + swapChainImageViews.emplace_back( device, imageViewCreateInfo ); + } + } + + void createComputeDescriptorSetLayout() { + std::array layoutBindings{ + vk::DescriptorSetLayoutBinding(0, vk::DescriptorType::eUniformBuffer, 1, vk::ShaderStageFlagBits::eCompute, nullptr), + vk::DescriptorSetLayoutBinding(1, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute, nullptr), + vk::DescriptorSetLayoutBinding(2, vk::DescriptorType::eStorageBuffer, 1, vk::ShaderStageFlagBits::eCompute, nullptr) + }; + + vk::DescriptorSetLayoutCreateInfo layoutInfo{ .bindingCount = static_cast(layoutBindings.size()), .pBindings = layoutBindings.data() }; + computeDescriptorSetLayout = vk::raii::DescriptorSetLayout( device, layoutInfo ); + } + + void createGraphicsPipeline() { + vk::raii::ShaderModule shaderModule = createShaderModule(readFile("shaders/slang.spv")); + + vk::PipelineShaderStageCreateInfo vertShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eVertex, .module = shaderModule, .pName = "vertMain" }; + vk::PipelineShaderStageCreateInfo fragShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eFragment, .module = shaderModule, .pName = "fragMain" }; + vk::PipelineShaderStageCreateInfo shaderStages[] = {vertShaderStageInfo, fragShaderStageInfo}; + + auto bindingDescription = Particle::getBindingDescription(); + auto attributeDescriptions = Particle::getAttributeDescriptions(); + + vk::PipelineVertexInputStateCreateInfo vertexInputInfo{ .vertexBindingDescriptionCount = 1, .pVertexBindingDescriptions = &bindingDescription, .vertexAttributeDescriptionCount = static_cast(attributeDescriptions.size()), .pVertexAttributeDescriptions = attributeDescriptions.data() }; + vk::PipelineInputAssemblyStateCreateInfo inputAssembly{ .topology = vk::PrimitiveTopology::ePointList, .primitiveRestartEnable = vk::False }; + vk::PipelineViewportStateCreateInfo viewportState{ .viewportCount = 1, .scissorCount = 1 }; + vk::PipelineRasterizationStateCreateInfo rasterizer{ + .depthClampEnable = vk::False, + .rasterizerDiscardEnable = vk::False, + .polygonMode = vk::PolygonMode::eFill, + .cullMode = vk::CullModeFlagBits::eBack, + .frontFace = vk::FrontFace::eCounterClockwise, + .depthBiasEnable = vk::False, + .lineWidth = 1.0f + }; + vk::PipelineMultisampleStateCreateInfo multisampling{ .rasterizationSamples = vk::SampleCountFlagBits::e1, .sampleShadingEnable = vk::False }; + + vk::PipelineColorBlendAttachmentState colorBlendAttachment{ + .blendEnable = vk::True, + .srcColorBlendFactor = vk::BlendFactor::eSrcAlpha, + .dstColorBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha, + .colorBlendOp = vk::BlendOp::eAdd, + .srcAlphaBlendFactor = vk::BlendFactor::eOneMinusSrcAlpha, + .dstAlphaBlendFactor = vk::BlendFactor::eZero, + .alphaBlendOp = vk::BlendOp::eAdd, + .colorWriteMask = vk::ColorComponentFlagBits::eR | vk::ColorComponentFlagBits::eG | vk::ColorComponentFlagBits::eB | vk::ColorComponentFlagBits::eA, + }; + + vk::PipelineColorBlendStateCreateInfo colorBlending{ .logicOpEnable = vk::False, .logicOp = vk::LogicOp::eCopy, .attachmentCount = 1, .pAttachments = &colorBlendAttachment }; + + std::vector dynamicStates = { + vk::DynamicState::eViewport, + vk::DynamicState::eScissor + }; + vk::PipelineDynamicStateCreateInfo dynamicState{ .dynamicStateCount = static_cast(dynamicStates.size()), .pDynamicStates = dynamicStates.data() }; + + vk::PipelineLayoutCreateInfo pipelineLayoutInfo; + pipelineLayout = vk::raii::PipelineLayout( device, pipelineLayoutInfo ); + + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo{ .colorAttachmentCount = 1, .pColorAttachmentFormats = &swapChainImageFormat.format }; + vk::GraphicsPipelineCreateInfo pipelineInfo{ .pNext = &pipelineRenderingCreateInfo, + .stageCount = 2, + .pStages = shaderStages, + .pVertexInputState = &vertexInputInfo, + .pInputAssemblyState = &inputAssembly, + .pViewportState = &viewportState, + .pRasterizationState = &rasterizer, + .pMultisampleState = &multisampling, + .pColorBlendState = &colorBlending, + .pDynamicState = &dynamicState, + .layout = *pipelineLayout, + .subpass = 0 + }; + + graphicsPipeline = vk::raii::Pipeline(device, nullptr, pipelineInfo); + } + + void createComputePipeline() { + vk::raii::ShaderModule shaderModule = createShaderModule(readFile("shaders/slang.spv")); + + // Create push constant range for particle group information + vk::PushConstantRange pushConstantRange{ + .stageFlags = vk::ShaderStageFlagBits::eCompute, + .offset = 0, + .size = sizeof(uint32_t) * 2 // startIndex and count + }; + + vk::PipelineShaderStageCreateInfo computeShaderStageInfo{ .stage = vk::ShaderStageFlagBits::eCompute, .module = shaderModule, .pName = "compMain" }; + vk::PipelineLayoutCreateInfo pipelineLayoutInfo{ + .setLayoutCount = 1, + .pSetLayouts = &*computeDescriptorSetLayout, + .pushConstantRangeCount = 1, + .pPushConstantRanges = &pushConstantRange + }; + computePipelineLayout = vk::raii::PipelineLayout( device, pipelineLayoutInfo ); + vk::ComputePipelineCreateInfo pipelineInfo{ .stage = computeShaderStageInfo, .layout = *computePipelineLayout }; + computePipeline = vk::raii::Pipeline(device, nullptr, pipelineInfo); + } + + void createCommandPool() { + vk::CommandPoolCreateInfo poolInfo{}; + poolInfo.flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer; + poolInfo.queueFamilyIndex = graphicsQueueFamilyIndex; + commandPool = vk::raii::CommandPool(device, poolInfo); + } + + void createShaderStorageBuffers() { + std::default_random_engine rndEngine(static_cast(time(nullptr))); + std::uniform_real_distribution rndDist(0.0f, 1.0f); + + std::vector particles(PARTICLE_COUNT); + for (auto& particle : particles) { + // Generate a random position for the particle + float theta = rndDist(rndEngine) * 2.0f * 3.14159265358979323846f; + + // Use square root of random value to ensure uniform distribution across the area + // This prevents clustering near the center (which causes the donut effect) + float r = sqrtf(rndDist(rndEngine)) * 0.25f; + + float x = r * cosf(theta) * HEIGHT / WIDTH; + float y = r * sinf(theta); + particle.position = glm::vec2(x, y); + + // Ensure a minimum velocity and scale based on distance from center + float minVelocity = 0.001f; + float velocityScale = 0.003f; + float velocityMagnitude = std::max(minVelocity, r * velocityScale); + particle.velocity = normalize(glm::vec2(x,y)) * velocityMagnitude; + particle.color = glm::vec4(rndDist(rndEngine), rndDist(rndEngine), rndDist(rndEngine), 1.0f); + } + + vk::DeviceSize bufferSize = sizeof(Particle) * PARTICLE_COUNT; + + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + void* dataStaging = stagingBufferMemory.mapMemory(0, bufferSize); + memcpy(dataStaging, particles.data(), (size_t)bufferSize); + stagingBufferMemory.unmapMemory(); + + shaderStorageBuffers.clear(); + shaderStorageBuffersMemory.clear(); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::raii::Buffer shaderStorageBufferTemp({}); + vk::raii::DeviceMemory shaderStorageBufferTempMemory({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eStorageBuffer | vk::BufferUsageFlagBits::eVertexBuffer | vk::BufferUsageFlagBits::eTransferDst, vk::MemoryPropertyFlagBits::eDeviceLocal, shaderStorageBufferTemp, shaderStorageBufferTempMemory); + copyBuffer(stagingBuffer, shaderStorageBufferTemp, bufferSize); + shaderStorageBuffers.emplace_back(std::move(shaderStorageBufferTemp)); + shaderStorageBuffersMemory.emplace_back(std::move(shaderStorageBufferTempMemory)); + } + } + + void createUniformBuffers() { + uniformBuffers.clear(); + uniformBuffersMemory.clear(); + uniformBuffersMapped.clear(); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + vk::raii::Buffer buffer({}); + vk::raii::DeviceMemory bufferMem({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, buffer, bufferMem); + uniformBuffers.emplace_back(std::move(buffer)); + uniformBuffersMemory.emplace_back(std::move(bufferMem)); + uniformBuffersMapped.emplace_back( uniformBuffersMemory[i].mapMemory(0, bufferSize)); + } + } + + void createDescriptorPool() { + std::array poolSize { + vk::DescriptorPoolSize( vk::DescriptorType::eUniformBuffer, MAX_FRAMES_IN_FLIGHT), + vk::DescriptorPoolSize( vk::DescriptorType::eStorageBuffer, MAX_FRAMES_IN_FLIGHT * 2) + }; + vk::DescriptorPoolCreateInfo poolInfo{}; + poolInfo.flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet; + poolInfo.maxSets = MAX_FRAMES_IN_FLIGHT; + poolInfo.poolSizeCount = poolSize.size(); + poolInfo.pPoolSizes = poolSize.data(); + descriptorPool = vk::raii::DescriptorPool(device, poolInfo); + } + + void createComputeDescriptorSets() { + std::vector layouts(MAX_FRAMES_IN_FLIGHT, computeDescriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{}; + allocInfo.descriptorPool = *descriptorPool; + allocInfo.descriptorSetCount = MAX_FRAMES_IN_FLIGHT; + allocInfo.pSetLayouts = layouts.data(); + computeDescriptorSets.clear(); + computeDescriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo(uniformBuffers[i], 0, sizeof(UniformBufferObject)); + + vk::DescriptorBufferInfo storageBufferInfoLastFrame(shaderStorageBuffers[(i + MAX_FRAMES_IN_FLIGHT - 1) % MAX_FRAMES_IN_FLIGHT], 0, sizeof(Particle) * PARTICLE_COUNT); + vk::DescriptorBufferInfo storageBufferInfoCurrentFrame(shaderStorageBuffers[i], 0, sizeof(Particle) * PARTICLE_COUNT); + std::array descriptorWrites{ + vk::WriteDescriptorSet{ .dstSet = *computeDescriptorSets[i], .dstBinding = 0, .dstArrayElement = 0, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eUniformBuffer, .pImageInfo = nullptr, .pBufferInfo = &bufferInfo, .pTexelBufferView = nullptr }, + vk::WriteDescriptorSet{ .dstSet = *computeDescriptorSets[i], .dstBinding = 1, .dstArrayElement = 0, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eStorageBuffer, .pImageInfo = nullptr, .pBufferInfo = &storageBufferInfoLastFrame, .pTexelBufferView = nullptr }, + vk::WriteDescriptorSet{ .dstSet = *computeDescriptorSets[i], .dstBinding = 2, .dstArrayElement = 0, .descriptorCount = 1, .descriptorType = vk::DescriptorType::eStorageBuffer, .pImageInfo = nullptr, .pBufferInfo = &storageBufferInfoCurrentFrame, .pTexelBufferView = nullptr }, + }; + device.updateDescriptorSets(descriptorWrites, {}); + } + } + + void createBuffer(vk::DeviceSize size, vk::BufferUsageFlags usage, vk::MemoryPropertyFlags properties, vk::raii::Buffer& buffer, vk::raii::DeviceMemory& bufferMemory) const { + vk::BufferCreateInfo bufferInfo{}; + bufferInfo.size = size; + bufferInfo.usage = usage; + bufferInfo.sharingMode = vk::SharingMode::eExclusive; + buffer = vk::raii::Buffer(device, bufferInfo); + vk::MemoryRequirements memRequirements = buffer.getMemoryRequirements(); + vk::MemoryAllocateInfo allocInfo{}; + allocInfo.allocationSize = memRequirements.size; + allocInfo.memoryTypeIndex = findMemoryType(memRequirements.memoryTypeBits, properties); + bufferMemory = vk::raii::DeviceMemory(device, allocInfo); + buffer.bindMemory(bufferMemory, 0); + } + + [[nodiscard]] vk::raii::CommandBuffer beginSingleTimeCommands() const { + vk::CommandBufferAllocateInfo allocInfo{}; + allocInfo.commandPool = *commandPool; + allocInfo.level = vk::CommandBufferLevel::ePrimary; + allocInfo.commandBufferCount = 1; + vk::raii::CommandBuffer commandBuffer = std::move(vk::raii::CommandBuffers( device, allocInfo ).front()); + + vk::CommandBufferBeginInfo beginInfo{ .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit }; + commandBuffer.begin(beginInfo); + + return commandBuffer; + } + + void endSingleTimeCommands(const vk::raii::CommandBuffer& commandBuffer) const { + commandBuffer.end(); + + vk::SubmitInfo submitInfo{}; + submitInfo.commandBufferCount = 1; + submitInfo.pCommandBuffers = &*commandBuffer; + graphicsQueue.submit(submitInfo, nullptr); + graphicsQueue.waitIdle(); + } + + void copyBuffer(const vk::raii::Buffer & srcBuffer, const vk::raii::Buffer & dstBuffer, vk::DeviceSize size) const { + vk::raii::CommandBuffer commandCopyBuffer = beginSingleTimeCommands(); + commandCopyBuffer.copyBuffer(srcBuffer, dstBuffer, vk::BufferCopy(0, 0, size)); + endSingleTimeCommands(commandCopyBuffer); + } + + [[nodiscard]] uint32_t findMemoryType(uint32_t typeFilter, vk::MemoryPropertyFlags properties) const { + vk::PhysicalDeviceMemoryProperties memProperties = physicalDevice.getMemoryProperties(); + + for (uint32_t i = 0; i < memProperties.memoryTypeCount; i++) { + if ((typeFilter & (1 << i)) && (memProperties.memoryTypes[i].propertyFlags & properties) == properties) { + return i; + } + } + + throw std::runtime_error("failed to find suitable memory type!"); + } + + void createGraphicsCommandBuffers() { + graphicsCommandBuffers.clear(); + vk::CommandBufferAllocateInfo allocInfo{}; + allocInfo.commandPool = *commandPool; + allocInfo.level = vk::CommandBufferLevel::ePrimary; + allocInfo.commandBufferCount = MAX_FRAMES_IN_FLIGHT; + graphicsCommandBuffers = vk::raii::CommandBuffers(device, allocInfo); + } + + void recordComputeCommandBuffer(vk::raii::CommandBuffer& cmdBuffer, uint32_t startIndex, uint32_t count) { + cmdBuffer.reset(); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + cmdBuffer.begin(beginInfo); + + cmdBuffer.bindPipeline(vk::PipelineBindPoint::eCompute, *computePipeline); + cmdBuffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute, *computePipelineLayout, 0, {*computeDescriptorSets[currentFrame]}, {}); + + struct PushConstants { + uint32_t startIndex; + uint32_t count; + } pushConstants{startIndex, count}; + + cmdBuffer.pushConstants(*computePipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, pushConstants); + + uint32_t groupCount = (count + 255) / 256; + cmdBuffer.dispatch(groupCount, 1, 1); + + cmdBuffer.end(); + } + + void recordGraphicsCommandBuffer(uint32_t imageIndex) { + graphicsCommandBuffers[currentFrame].reset(); + + vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eOneTimeSubmit + }; + graphicsCommandBuffers[currentFrame].begin(beginInfo); + + transition_image_layout( + imageIndex, + vk::ImageLayout::eUndefined, + vk::ImageLayout::eColorAttachmentOptimal, + {}, + vk::AccessFlagBits2::eColorAttachmentWrite, + vk::PipelineStageFlagBits2::eTopOfPipe, + vk::PipelineStageFlagBits2::eColorAttachmentOutput + ); + + vk::ClearValue clearColor = vk::ClearColorValue(0.0f, 0.0f, 0.0f, 1.0f); + vk::RenderingAttachmentInfo attachmentInfo = { + .imageView = swapChainImageViews[imageIndex], + .imageLayout = vk::ImageLayout::eColorAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + vk::RenderingInfo renderingInfo = { + .renderArea = { .offset = { 0, 0 }, .extent = swapChainExtent }, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &attachmentInfo + }; + + graphicsCommandBuffers[currentFrame].beginRendering(renderingInfo); + + graphicsCommandBuffers[currentFrame].bindPipeline(vk::PipelineBindPoint::eGraphics, *graphicsPipeline); + graphicsCommandBuffers[currentFrame].setViewport(0, vk::Viewport(0.0f, 0.0f, static_cast(swapChainExtent.width), static_cast(swapChainExtent.height), 0.0f, 1.0f)); + graphicsCommandBuffers[currentFrame].setScissor( 0, vk::Rect2D( vk::Offset2D( 0, 0 ), swapChainExtent ) ); + graphicsCommandBuffers[currentFrame].bindVertexBuffers(0, { shaderStorageBuffers[currentFrame] }, {0}); + graphicsCommandBuffers[currentFrame].draw( PARTICLE_COUNT, 1, 0, 0 ); + graphicsCommandBuffers[currentFrame].endRendering(); + + transition_image_layout( + imageIndex, + vk::ImageLayout::eColorAttachmentOptimal, + vk::ImageLayout::ePresentSrcKHR, + vk::AccessFlagBits2::eColorAttachmentWrite, + {}, + vk::PipelineStageFlagBits2::eColorAttachmentOutput, + vk::PipelineStageFlagBits2::eBottomOfPipe + ); + + graphicsCommandBuffers[currentFrame].end(); + } + + void transition_image_layout( + uint32_t imageIndex, + vk::ImageLayout old_layout, + vk::ImageLayout new_layout, + vk::AccessFlags2 src_access_mask, + vk::AccessFlags2 dst_access_mask, + vk::PipelineStageFlags2 src_stage_mask, + vk::PipelineStageFlags2 dst_stage_mask + ) { + vk::ImageMemoryBarrier2 barrier = { + .srcStageMask = src_stage_mask, + .srcAccessMask = src_access_mask, + .dstStageMask = dst_stage_mask, + .dstAccessMask = dst_access_mask, + .oldLayout = old_layout, + .newLayout = new_layout, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[imageIndex], + .subresourceRange = { + .aspectMask = vk::ImageAspectFlagBits::eColor, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + } + }; + vk::DependencyInfo dependency_info = { + .dependencyFlags = {}, + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + graphicsCommandBuffers[currentFrame].pipelineBarrier2(dependency_info); + } + + void signalThreadsToWork() { + // Mark all threads as not done + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkDone[i].store(false, std::memory_order_release); + } + + // Memory barrier to ensure all threads see the updated threadWorkDone values + std::atomic_thread_fence(std::memory_order_seq_cst); + + // Only signal the first thread to start work + threadWorkReady[0].store(true, std::memory_order_release); + + // Notify all threads in case they're waiting on the condition variable + { + std::lock_guard lock(workCompleteMutex); + workCompleteCv.notify_all(); + } + } + + void waitForThreadsToComplete() { + std::unique_lock lock(workCompleteMutex); + + // Wait for the last thread to complete with a timeout + auto waitResult = workCompleteCv.wait_for(lock, std::chrono::milliseconds(3000), [this]() { + return threadWorkDone[threadCount - 1].load(std::memory_order_acquire); + }); + + // If we timed out, force completion + if (!waitResult) { + // Force all threads to complete + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkDone[i].store(true, std::memory_order_release); + threadWorkReady[i].store(false, std::memory_order_release); + } + + // Notify all threads + workCompleteCv.notify_all(); + lock.unlock(); + + // Give threads a chance to respond to the forced completion + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + } + + void createSyncObjects() { + imageAvailableSemaphores.clear(); + inFlightFences.clear(); + + vk::SemaphoreTypeCreateInfo semaphoreType{ .semaphoreType = vk::SemaphoreType::eTimeline, .initialValue = 0 }; + timelineSemaphore = vk::raii::Semaphore(device, {.pNext = &semaphoreType}); + timelineValue = 0; + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + imageAvailableSemaphores.emplace_back(device, vk::SemaphoreCreateInfo()); + + vk::FenceCreateInfo fenceInfo; + fenceInfo.flags = vk::FenceCreateFlagBits::eSignaled; + inFlightFences.emplace_back(device, fenceInfo); + } + } + + void updateUniformBuffer(uint32_t currentImage) { + UniformBufferObject ubo{}; + ubo.deltaTime = static_cast(lastFrameTime) * 2.0f; + + memcpy(uniformBuffersMapped[currentImage], &ubo, sizeof(ubo)); + } + + void drawFrame() { + // Wait for the previous frame to finish + while (vk::Result::eTimeout == device.waitForFences(*inFlightFences[currentFrame], vk::True, UINT64_MAX)) + ; + device.resetFences(*inFlightFences[currentFrame]); + + // Acquire the next image + auto [result, imageIndex] = swapChain.acquireNextImage(UINT64_MAX, *imageAvailableSemaphores[currentFrame], nullptr); + + // Update timeline values for synchronization + uint64_t computeWaitValue = timelineValue; + uint64_t computeSignalValue = ++timelineValue; + uint64_t graphicsWaitValue = computeSignalValue; + uint64_t graphicsSignalValue = ++timelineValue; + + // Update uniform buffer with the latest delta time + updateUniformBuffer(currentFrame); + + // Signal worker threads to start processing particles + signalThreadsToWork(); + + // Record graphics command buffer while worker threads are busy + recordGraphicsCommandBuffer(imageIndex); + + // Wait for all worker threads to complete + waitForThreadsToComplete(); + + // Collect command buffers from all threads + std::vector computeCmdBuffers; + computeCmdBuffers.reserve(threadCount); + for (uint32_t i = 0; i < threadCount; i++) { + try { + computeCmdBuffers.push_back(*resourceManager.getCommandBuffer(i)); + } catch (const std::exception&) { + // Skip this thread's command buffer if there was an error + } + } + + // Ensure we have at least one command buffer + if (computeCmdBuffers.empty()) { + return; + } + + // Set up compute submission + vk::TimelineSemaphoreSubmitInfo computeTimelineInfo{ + .waitSemaphoreValueCount = 1, + .pWaitSemaphoreValues = &computeWaitValue, + .signalSemaphoreValueCount = 1, + .pSignalSemaphoreValues = &computeSignalValue + }; + + vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eComputeShader}; + + vk::SubmitInfo computeSubmitInfo{ + .pNext = &computeTimelineInfo, + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*timelineSemaphore, + .pWaitDstStageMask = waitStages, + .commandBufferCount = static_cast(computeCmdBuffers.size()), + .pCommandBuffers = computeCmdBuffers.data(), + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*timelineSemaphore + }; + + // Submit compute work + { + std::lock_guard lock(queueSubmitMutex); + computeQueue.submit(computeSubmitInfo, nullptr); + } + + // Set up graphics submission + vk::PipelineStageFlags graphicsWaitStages[] = {vk::PipelineStageFlagBits::eVertexInput, vk::PipelineStageFlagBits::eColorAttachmentOutput}; + + std::array waitSemaphores = {*timelineSemaphore, *imageAvailableSemaphores[currentFrame]}; + std::array waitSemaphoreValues = {graphicsWaitValue, 0}; + + vk::TimelineSemaphoreSubmitInfo graphicsTimelineInfo{ + .waitSemaphoreValueCount = static_cast(waitSemaphoreValues.size()), + .pWaitSemaphoreValues = waitSemaphoreValues.data(), + .signalSemaphoreValueCount = 1, + .pSignalSemaphoreValues = &graphicsSignalValue + }; + + vk::SubmitInfo graphicsSubmitInfo{ + .pNext = &graphicsTimelineInfo, + .waitSemaphoreCount = static_cast(waitSemaphores.size()), + .pWaitSemaphores = waitSemaphores.data(), + .pWaitDstStageMask = graphicsWaitStages, + .commandBufferCount = 1, + .pCommandBuffers = &*graphicsCommandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*timelineSemaphore + }; + + // Submit graphics work + { + std::lock_guard lock(queueSubmitMutex); + graphicsQueue.submit(graphicsSubmitInfo, *inFlightFences[currentFrame]); + } + + // Wait for graphics to complete before presenting + vk::SemaphoreWaitInfo waitInfo{ + .semaphoreCount = 1, + .pSemaphores = &*timelineSemaphore, + .pValues = &graphicsSignalValue + }; + + auto waitResult = device.waitSemaphores(waitInfo, 5000000000); + if (waitResult == vk::Result::eTimeout) { + device.waitIdle(); + return; + } + + // Present the image + vk::PresentInfoKHR presentInfo{ + .waitSemaphoreCount = 0, + .pWaitSemaphores = nullptr, + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + + result = presentQueue.presentKHR(presentInfo); + + // Move to the next frame + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; + } + +}; + + +int main() { + try { + MultithreadedApplication app; + app.run(); + } catch (const std::exception& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + + return EXIT_SUCCESS; +} diff --git a/attachments/37_shader_compute.comp b/attachments/37_shader_compute.comp new file mode 100644 index 00000000..966276a5 --- /dev/null +++ b/attachments/37_shader_compute.comp @@ -0,0 +1,49 @@ +#version 450 + +struct Particle { + vec2 position; + vec2 velocity; + vec4 color; +}; + +layout (binding = 0) uniform ParameterUBO { + float deltaTime; +} ubo; + +layout(std140, binding = 1) readonly buffer ParticleSSBOIn { + Particle particlesIn[ ]; +}; + +layout(std140, binding = 2) buffer ParticleSSBOOut { + Particle particlesOut[ ]; +}; + +// Push constants for particle group information +layout(push_constant) uniform PushConstants { + uint startIndex; + uint count; +} pushConstants; + +layout (local_size_x = 256, local_size_y = 1, local_size_z = 1) in; + +void main() +{ + // Calculate the global particle index by adding the thread ID to the start index + uint localIndex = gl_GlobalInvocationID.x; + + // Only process particles within the assigned range + if (localIndex < pushConstants.count) { + uint globalIndex = pushConstants.startIndex + localIndex; + + particlesOut[globalIndex].position = particlesIn[globalIndex].position + particlesIn[globalIndex].velocity.xy * ubo.deltaTime; + particlesOut[globalIndex].velocity = particlesIn[globalIndex].velocity; + + // Flip movement at window border + if ((particlesOut[globalIndex].position.x <= -1.0) || (particlesOut[globalIndex].position.x >= 1.0)) { + particlesOut[globalIndex].velocity.x = -particlesOut[globalIndex].velocity.x; + } + if ((particlesOut[globalIndex].position.y <= -1.0) || (particlesOut[globalIndex].position.y >= 1.0)) { + particlesOut[globalIndex].velocity.y = -particlesOut[globalIndex].velocity.y; + } + } +} diff --git a/attachments/37_shader_compute.frag b/attachments/37_shader_compute.frag new file mode 100644 index 00000000..94517ecd --- /dev/null +++ b/attachments/37_shader_compute.frag @@ -0,0 +1,11 @@ +#version 450 + +layout(location = 0) in vec3 fragColor; + +layout(location = 0) out vec4 outColor; + +void main() { + + vec2 coord = gl_PointCoord - vec2(0.5); + outColor = vec4(fragColor, 0.5 - length(coord)); +} diff --git a/attachments/37_shader_compute.slang b/attachments/37_shader_compute.slang new file mode 100644 index 00000000..d8355706 --- /dev/null +++ b/attachments/37_shader_compute.slang @@ -0,0 +1,81 @@ +struct VSInput { + float2 inPosition; + float4 inColor; +}; + +struct VSOutput +{ + float4 pos : SV_Position; + float pointSize : SV_PointSize; + float3 fragColor : COLOR0; +}; + +struct PSInput +{ + float4 pos : SV_POSITION; + float3 fragColor : COLOR0; + float2 pointCoord : SV_PointCoord; +}; + +[shader("vertex")] +VSOutput vertMain(VSInput input) { + VSOutput output; + output.pointSize = 14.0; + output.pos = float4(input.inPosition, 1.0, 1.0); + output.fragColor = input.inColor.rgb; + return output; +} + +[shader("fragment")] +float4 fragMain(PSInput input) : SV_TARGET { + float2 coord = input.pointCoord - float2(0.5); + return float4(input.fragColor, 0.5 - length(coord)); +} + +struct Particle { + float2 position; + float2 velocity; + float4 color; +}; + +struct UniformBuffer { + float deltaTime; +}; +ConstantBuffer ubo; + +// Push constants for particle group information +struct PushConstants { + uint startIndex; + uint count; +}; +[[vk::push_constant]] PushConstants pushConstants; + +struct ParticleSSBO { + Particle particles; +}; +StructuredBuffer particlesIn; +RWStructuredBuffer particlesOut; + +[shader("compute")] +[numthreads(256,1,1)] +void compMain(uint3 threadId : SV_DispatchThreadID) +{ + // Calculate the global particle index by adding the thread ID to the start index + uint localIndex = threadId.x; + + // Only process particles within the assigned range + if (localIndex < pushConstants.count) { + uint globalIndex = pushConstants.startIndex + localIndex; + + particlesOut[globalIndex].particles.position = particlesIn[globalIndex].particles.position + particlesIn[globalIndex].particles.velocity.xy * ubo.deltaTime; + particlesOut[globalIndex].particles.velocity = particlesIn[globalIndex].particles.velocity; + + // Flip movement at window border + if ((particlesOut[globalIndex].particles.position.x <= -1.0) || (particlesOut[globalIndex].particles.position.x >= 1.0)) { + particlesOut[globalIndex].particles.velocity.x = -particlesOut[globalIndex].particles.velocity.x; + } + if ((particlesOut[globalIndex].particles.position.y <= -1.0) || (particlesOut[globalIndex].particles.position.y >= 1.0)) { + particlesOut[globalIndex].particles.velocity.y = -particlesOut[globalIndex].particles.velocity.y; + } + } +} diff --git a/attachments/37_shader_compute.vert b/attachments/37_shader_compute.vert new file mode 100644 index 00000000..9730d27d --- /dev/null +++ b/attachments/37_shader_compute.vert @@ -0,0 +1,13 @@ +#version 450 + +layout(location = 0) in vec2 inPosition; +layout(location = 1) in vec4 inColor; + +layout(location = 0) out vec3 fragColor; + +void main() { + + gl_PointSize = 14.0; + gl_Position = vec4(inPosition.xy, 1.0, 1.0); + fragColor = inColor.rgb; +} diff --git a/attachments/CMake/FindKTX.cmake b/attachments/CMake/FindKTX.cmake new file mode 100644 index 00000000..ac6971a2 --- /dev/null +++ b/attachments/CMake/FindKTX.cmake @@ -0,0 +1,106 @@ +# FindKTX.cmake +# +# Finds the KTX library +# +# This will define the following variables +# +# KTX_FOUND +# KTX_INCLUDE_DIRS +# KTX_LIBRARIES +# +# and the following imported targets +# +# KTX::ktx +# + +# Check if we're on Linux - if so, we'll skip the search and directly use FetchContent +if(UNIX AND NOT APPLE) + # On Linux, we assume KTX is not installed and proceed directly to fetching it + set(KTX_FOUND FALSE) +else() + # On non-Linux platforms, try to find KTX using pkg-config first + find_package(PkgConfig QUIET) + if(PKG_CONFIG_FOUND) + pkg_check_modules(PC_KTX QUIET ktx libktx ktx2 libktx2) + endif() + + # Try to find KTX using standard find_package + find_path(KTX_INCLUDE_DIR + NAMES ktx.h + PATH_SUFFIXES include ktx KTX ktx2 KTX2 + HINTS + ${PC_KTX_INCLUDEDIR} + /usr/include + /usr/local/include + $ENV{KTX_DIR}/include + $ENV{VULKAN_SDK}/include + ${CMAKE_SOURCE_DIR}/external/ktx/include + ) + + find_library(KTX_LIBRARY + NAMES ktx ktx2 libktx libktx2 + PATH_SUFFIXES lib lib64 + HINTS + ${PC_KTX_LIBDIR} + /usr/lib + /usr/lib64 + /usr/local/lib + /usr/local/lib64 + $ENV{KTX_DIR}/lib + $ENV{VULKAN_SDK}/lib + ${CMAKE_SOURCE_DIR}/external/ktx/lib + ) + + include(FindPackageHandleStandardArgs) + find_package_handle_standard_args(KTX + REQUIRED_VARS KTX_INCLUDE_DIR KTX_LIBRARY + FAIL_MESSAGE "" # Suppress the error message to allow our fallback + ) + + # Debug output if KTX is not found (only on non-Linux platforms) + if(NOT KTX_FOUND) + message(STATUS "KTX include directory search paths: ${PC_KTX_INCLUDEDIR}, /usr/include, /usr/local/include, $ENV{KTX_DIR}/include, $ENV{VULKAN_SDK}/include, ${CMAKE_SOURCE_DIR}/external/ktx/include") + message(STATUS "KTX library search paths: ${PC_KTX_LIBDIR}, /usr/lib, /usr/lib64, /usr/local/lib, /usr/local/lib64, $ENV{KTX_DIR}/lib, $ENV{VULKAN_SDK}/lib, ${CMAKE_SOURCE_DIR}/external/ktx/lib") + endif() +endif() + +if(KTX_FOUND) + set(KTX_INCLUDE_DIRS ${KTX_INCLUDE_DIR}) + set(KTX_LIBRARIES ${KTX_LIBRARY}) + + if(NOT TARGET KTX::ktx) + add_library(KTX::ktx UNKNOWN IMPORTED) + set_target_properties(KTX::ktx PROPERTIES + IMPORTED_LOCATION "${KTX_LIBRARIES}" + INTERFACE_INCLUDE_DIRECTORIES "${KTX_INCLUDE_DIRS}" + ) + endif() +else() + # If not found, use FetchContent to download and build + include(FetchContent) + + # Only show the message on non-Linux platforms + if(NOT (UNIX AND NOT APPLE)) + message(STATUS "KTX not found, fetching from GitHub...") + endif() + + FetchContent_Declare( + ktx + GIT_REPOSITORY https://github.com/KhronosGroup/KTX-Software.git + GIT_TAG v4.3.1 # Use a specific tag for stability + ) + + # Set options to minimize build time and dependencies + set(KTX_FEATURE_TOOLS OFF CACHE BOOL "Build KTX tools" FORCE) + set(KTX_FEATURE_DOC OFF CACHE BOOL "Build KTX documentation" FORCE) + set(KTX_FEATURE_TESTS OFF CACHE BOOL "Build KTX tests" FORCE) + + FetchContent_MakeAvailable(ktx) + + # Create an alias to match the expected target name + if(NOT TARGET KTX::ktx) + add_library(KTX::ktx ALIAS ktx) + endif() + + set(KTX_FOUND TRUE) +endif() diff --git a/attachments/CMake/FindTinyGLTF.cmake b/attachments/CMake/FindTinyGLTF.cmake new file mode 100644 index 00000000..f01469ae --- /dev/null +++ b/attachments/CMake/FindTinyGLTF.cmake @@ -0,0 +1,107 @@ +# FindTinyGLTF.cmake +# +# Finds the TinyGLTF library +# +# This will define the following variables +# +# TinyGLTF_FOUND +# TinyGLTF_INCLUDE_DIRS +# +# and the following imported targets +# +# tinygltf::tinygltf +# + +# First, try to find nlohmann_json +find_package(nlohmann_json QUIET) +if(NOT nlohmann_json_FOUND) + include(FetchContent) + message(STATUS "nlohmann_json not found, fetching from GitHub...") + FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.2 # Use a specific tag for stability + ) + FetchContent_MakeAvailable(nlohmann_json) +endif() + +# Try to find TinyGLTF using standard find_package +find_path(TinyGLTF_INCLUDE_DIR + NAMES tiny_gltf.h + PATH_SUFFIXES include tinygltf +) + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(TinyGLTF + REQUIRED_VARS TinyGLTF_INCLUDE_DIR + FAIL_MESSAGE "" # Suppress the error message to allow our fallback +) + +if(TinyGLTF_FOUND) + set(TinyGLTF_INCLUDE_DIRS ${TinyGLTF_INCLUDE_DIR}) + + if(NOT TARGET tinygltf::tinygltf) + add_library(tinygltf::tinygltf INTERFACE IMPORTED) + set_target_properties(tinygltf::tinygltf PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${TinyGLTF_INCLUDE_DIRS}" + INTERFACE_COMPILE_DEFINITIONS "TINYGLTF_IMPLEMENTATION;TINYGLTF_NO_EXTERNAL_IMAGE;TINYGLTF_NO_STB_IMAGE;TINYGLTF_NO_STB_IMAGE_WRITE" + ) + if(TARGET nlohmann_json::nlohmann_json) + target_link_libraries(tinygltf::tinygltf INTERFACE nlohmann_json::nlohmann_json) + endif() + endif() +else() + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "TinyGLTF not found, fetching from GitHub...") + FetchContent_Declare( + tinygltf + GIT_REPOSITORY https://github.com/syoyo/tinygltf.git + GIT_TAG v2.8.18 # Use a specific tag for stability + ) + + # Configure tinygltf before making it available + FetchContent_GetProperties(tinygltf) + if(NOT tinygltf_POPULATED) + FetchContent_Populate(tinygltf) + + # Update the minimum required CMake version to avoid deprecation warning + file(READ "${tinygltf_SOURCE_DIR}/CMakeLists.txt" TINYGLTF_CMAKE_CONTENT) + string(REPLACE "cmake_minimum_required(VERSION 3.6)" + "cmake_minimum_required(VERSION 3.10)" + TINYGLTF_CMAKE_CONTENT "${TINYGLTF_CMAKE_CONTENT}") + file(WRITE "${tinygltf_SOURCE_DIR}/CMakeLists.txt" "${TINYGLTF_CMAKE_CONTENT}") + + # Create a symbolic link to make nlohmann/json.hpp available + if(EXISTS "${tinygltf_SOURCE_DIR}/json.hpp") + file(MAKE_DIRECTORY "${tinygltf_SOURCE_DIR}/nlohmann") + file(CREATE_LINK "${tinygltf_SOURCE_DIR}/json.hpp" "${tinygltf_SOURCE_DIR}/nlohmann/json.hpp" SYMBOLIC) + endif() + + # Set tinygltf to header-only mode + set(TINYGLTF_HEADER_ONLY ON CACHE BOOL "Use header only version" FORCE) + set(TINYGLTF_INSTALL OFF CACHE BOOL "Do not install tinygltf" FORCE) + + # Add the subdirectory after modifying the CMakeLists.txt + add_subdirectory(${tinygltf_SOURCE_DIR} ${tinygltf_BINARY_DIR}) + endif() + + # Create an alias for the tinygltf target + if(NOT TARGET tinygltf::tinygltf) + add_library(tinygltf_wrapper INTERFACE) + target_link_libraries(tinygltf_wrapper INTERFACE tinygltf) + target_compile_definitions(tinygltf_wrapper INTERFACE + TINYGLTF_IMPLEMENTATION + TINYGLTF_NO_EXTERNAL_IMAGE + TINYGLTF_NO_STB_IMAGE + TINYGLTF_NO_STB_IMAGE_WRITE + ) + if(TARGET nlohmann_json::nlohmann_json) + target_link_libraries(tinygltf_wrapper INTERFACE nlohmann_json::nlohmann_json) + endif() + add_library(tinygltf::tinygltf ALIAS tinygltf_wrapper) + endif() + + set(TinyGLTF_FOUND TRUE) +endif() diff --git a/attachments/CMake/FindVulkanHpp.cmake b/attachments/CMake/FindVulkanHpp.cmake new file mode 100644 index 00000000..16b13e07 --- /dev/null +++ b/attachments/CMake/FindVulkanHpp.cmake @@ -0,0 +1,426 @@ +# FindVulkanHpp.cmake +# +# Finds or downloads the Vulkan-Hpp headers and Vulkan Profiles headers +# +# This will define the following variables +# +# VulkanHpp_FOUND +# VulkanHpp_INCLUDE_DIRS +# +# and the following imported targets +# +# VulkanHpp::VulkanHpp +# + +# Try to find the package using standard find_path +find_path(VulkanHpp_INCLUDE_DIR + NAMES vulkan/vulkan.hpp + PATHS + ${Vulkan_INCLUDE_DIR} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include +) + +# Also try to find vulkan.cppm +find_path(VulkanHpp_CPPM_DIR + NAMES vulkan/vulkan.cppm + PATHS + ${Vulkan_INCLUDE_DIR} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include +) + +# Try to find vulkan_profiles.hpp +find_path(VulkanProfiles_INCLUDE_DIR + NAMES vulkan/vulkan_profiles.hpp + PATHS + ${Vulkan_INCLUDE_DIR} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include +) + +# Function to extract Vulkan version from vulkan_core.h +function(extract_vulkan_version VULKAN_CORE_H_PATH OUTPUT_VERSION_TAG) + # Extract the version information from vulkan_core.h + file(STRINGS ${VULKAN_CORE_H_PATH} VULKAN_VERSION_MAJOR_LINE REGEX "^#define VK_VERSION_MAJOR") + file(STRINGS ${VULKAN_CORE_H_PATH} VULKAN_VERSION_MINOR_LINE REGEX "^#define VK_VERSION_MINOR") + file(STRINGS ${VULKAN_CORE_H_PATH} VULKAN_HEADER_VERSION_LINE REGEX "^#define VK_HEADER_VERSION") + + set(VERSION_TAG "v1.3.275") # Default fallback + + if(VULKAN_VERSION_MAJOR_LINE AND VULKAN_VERSION_MINOR_LINE AND VULKAN_HEADER_VERSION_LINE) + string(REGEX REPLACE "^#define VK_VERSION_MAJOR[ \t]+([0-9]+).*$" "\\1" VULKAN_VERSION_MAJOR "${VULKAN_VERSION_MAJOR_LINE}") + string(REGEX REPLACE "^#define VK_VERSION_MINOR[ \t]+([0-9]+).*$" "\\1" VULKAN_VERSION_MINOR "${VULKAN_VERSION_MINOR_LINE}") + string(REGEX REPLACE "^#define VK_HEADER_VERSION[ \t]+([0-9]+).*$" "\\1" VULKAN_HEADER_VERSION "${VULKAN_HEADER_VERSION_LINE}") + + # Construct the version tag + set(VERSION_TAG "v${VULKAN_VERSION_MAJOR}.${VULKAN_VERSION_MINOR}.${VULKAN_HEADER_VERSION}") + else() + # Alternative approach: look for VK_HEADER_VERSION_COMPLETE + file(STRINGS ${VULKAN_CORE_H_PATH} VULKAN_HEADER_VERSION_COMPLETE_LINE REGEX "^#define VK_HEADER_VERSION_COMPLETE") + file(STRINGS ${VULKAN_CORE_H_PATH} VULKAN_HEADER_VERSION_LINE REGEX "^#define VK_HEADER_VERSION") + + if(VULKAN_HEADER_VERSION_COMPLETE_LINE AND VULKAN_HEADER_VERSION_LINE) + # Extract the header version + string(REGEX REPLACE "^#define VK_HEADER_VERSION[ \t]+([0-9]+).*$" "\\1" VULKAN_HEADER_VERSION "${VULKAN_HEADER_VERSION_LINE}") + + # Check if the complete version line contains the major and minor versions + if(VULKAN_HEADER_VERSION_COMPLETE_LINE MATCHES "VK_MAKE_API_VERSION\\(.*,[ \t]*([0-9]+),[ \t]*([0-9]+),[ \t]*VK_HEADER_VERSION\\)") + set(VULKAN_VERSION_MAJOR "${CMAKE_MATCH_1}") + set(VULKAN_VERSION_MINOR "${CMAKE_MATCH_2}") + set(VERSION_TAG "v${VULKAN_VERSION_MAJOR}.${VULKAN_VERSION_MINOR}.${VULKAN_HEADER_VERSION}") + endif() + endif() + endif() + + # Return the version tag + set(${OUTPUT_VERSION_TAG} ${VERSION_TAG} PARENT_SCOPE) +endfunction() + +# Determine the Vulkan version to use for Vulkan-Hpp and Vulkan-Profiles +set(VULKAN_VERSION_TAG "v1.3.275") # Default version + +# Try to detect the Vulkan version +set(VULKAN_CORE_H "") + +# If we're building for Android, try to detect the NDK's Vulkan version +if(DEFINED ANDROID_NDK) + # Find the vulkan_core.h file in the NDK + find_file(VULKAN_CORE_H vulkan_core.h + PATHS + ${ANDROID_NDK}/toolchains/llvm/prebuilt/linux-x86_64/sysroot/usr/include/vulkan + ${ANDROID_NDK}/toolchains/llvm/prebuilt/darwin-x86_64/sysroot/usr/include/vulkan + ${ANDROID_NDK}/toolchains/llvm/prebuilt/windows-x86_64/sysroot/usr/include/vulkan + ${ANDROID_NDK}/toolchains/llvm/prebuilt/windows/sysroot/usr/include/vulkan + NO_DEFAULT_PATH + ) + + if(VULKAN_CORE_H) + extract_vulkan_version(${VULKAN_CORE_H} VULKAN_VERSION_TAG) + message(STATUS "Detected NDK Vulkan version: ${VULKAN_VERSION_TAG}") + else() + message(STATUS "Could not find vulkan_core.h in NDK, using default version: ${VULKAN_VERSION_TAG}") + endif() +# For desktop builds, try to detect the Vulkan SDK version +elseif(DEFINED ENV{VULKAN_SDK}) + # Find the vulkan_core.h file in the Vulkan SDK + find_file(VULKAN_CORE_H vulkan_core.h + PATHS + $ENV{VULKAN_SDK}/include/vulkan + NO_DEFAULT_PATH + ) + + if(VULKAN_CORE_H) + extract_vulkan_version(${VULKAN_CORE_H} VULKAN_VERSION_TAG) + message(STATUS "Detected Vulkan SDK version: ${VULKAN_VERSION_TAG}") + else() + message(STATUS "Could not find vulkan_core.h in Vulkan SDK, using default version: ${VULKAN_VERSION_TAG}") + endif() +# If Vulkan package was already found, try to use its include directory +elseif(DEFINED Vulkan_INCLUDE_DIR) + # Find the vulkan_core.h file in the Vulkan include directory + find_file(VULKAN_CORE_H vulkan_core.h + PATHS + ${Vulkan_INCLUDE_DIR}/vulkan + NO_DEFAULT_PATH + ) + + if(VULKAN_CORE_H) + extract_vulkan_version(${VULKAN_CORE_H} VULKAN_VERSION_TAG) + message(STATUS "Detected Vulkan version from include directory: ${VULKAN_VERSION_TAG}") + else() + message(STATUS "Could not find vulkan_core.h in Vulkan include directory, using default version: ${VULKAN_VERSION_TAG}") + endif() +else() + # Try to find vulkan_core.h in system paths + find_file(VULKAN_CORE_H vulkan_core.h + PATHS + /usr/include/vulkan + /usr/local/include/vulkan + ) + + if(VULKAN_CORE_H) + extract_vulkan_version(${VULKAN_CORE_H} VULKAN_VERSION_TAG) + message(STATUS "Detected system Vulkan version: ${VULKAN_VERSION_TAG}") + else() + message(STATUS "Could not find vulkan_core.h in system paths, using default version: ${VULKAN_VERSION_TAG}") + endif() +endif() + +# If the include directory wasn't found, use FetchContent to download and build +if(NOT VulkanHpp_INCLUDE_DIR OR NOT VulkanHpp_CPPM_DIR) + # If not found, use FetchContent to download + include(FetchContent) + + message(STATUS "Vulkan-Hpp not found, fetching from GitHub with version ${VULKAN_VERSION_TAG}...") + FetchContent_Declare( + VulkanHpp + GIT_REPOSITORY https://github.com/KhronosGroup/Vulkan-Hpp.git + GIT_TAG ${VULKAN_VERSION_TAG} # Use the detected or default version + ) + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Make sure FetchContent is available + include(FetchContent) + + # Populate the content + FetchContent_GetProperties(VulkanHpp SOURCE_DIR VulkanHpp_SOURCE_DIR) + if(NOT VulkanHpp_POPULATED) + FetchContent_Populate(VulkanHpp) + # Get the source directory after populating + FetchContent_GetProperties(VulkanHpp SOURCE_DIR VulkanHpp_SOURCE_DIR) + endif() + + # Set the include directory to the source directory + set(VulkanHpp_INCLUDE_DIR ${VulkanHpp_SOURCE_DIR}) + message(STATUS "VulkanHpp_SOURCE_DIR: ${VulkanHpp_SOURCE_DIR}") + message(STATUS "VulkanHpp_INCLUDE_DIR: ${VulkanHpp_INCLUDE_DIR}") + + # Check if vulkan.cppm exists in the downloaded repository + if(EXISTS "${VulkanHpp_SOURCE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${VulkanHpp_SOURCE_DIR}) + else() + # If vulkan.cppm doesn't exist, we need to create it + set(VulkanHpp_CPPM_DIR ${CMAKE_CURRENT_BINARY_DIR}/VulkanHpp) + file(MAKE_DIRECTORY ${VulkanHpp_CPPM_DIR}/vulkan) + + # Create vulkan.cppm file + file(WRITE "${VulkanHpp_CPPM_DIR}/vulkan/vulkan.cppm" +"// Auto-generated vulkan.cppm file +module; +#include +export module vulkan; +export namespace vk { + using namespace VULKAN_HPP_NAMESPACE; +} +") + endif() +endif() + +# If the Vulkan Profiles include directory wasn't found, use FetchContent to download +if(NOT VulkanProfiles_INCLUDE_DIR) + # If not found, use FetchContent to download + include(FetchContent) + + message(STATUS "Vulkan-Profiles not found, fetching from GitHub main branch...") + FetchContent_Declare( + VulkanProfiles + GIT_REPOSITORY https://github.com/KhronosGroup/Vulkan-Profiles.git + GIT_TAG main # Use main branch instead of a specific tag + ) + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Populate the content + FetchContent_GetProperties(VulkanProfiles SOURCE_DIR VulkanProfiles_SOURCE_DIR) + if(NOT VulkanProfiles_POPULATED) + FetchContent_Populate(VulkanProfiles) + # Get the source directory after populating + FetchContent_GetProperties(VulkanProfiles SOURCE_DIR VulkanProfiles_SOURCE_DIR) + endif() + + # Create the include directory structure if it doesn't exist + set(VulkanProfiles_INCLUDE_DIR ${CMAKE_CURRENT_BINARY_DIR}/VulkanProfiles/include) + file(MAKE_DIRECTORY ${VulkanProfiles_INCLUDE_DIR}/vulkan) + + # Create a stub vulkan_profiles.hpp file if it doesn't exist + if(NOT EXISTS "${VulkanProfiles_INCLUDE_DIR}/vulkan/vulkan_profiles.hpp") + file(WRITE "${VulkanProfiles_INCLUDE_DIR}/vulkan/vulkan_profiles.hpp" +"// Auto-generated vulkan_profiles.hpp stub file +#pragma once +#include + +namespace vp { + // Stub implementation for Vulkan Profiles + struct ProfileDesc { + const char* name; + uint32_t specVersion; + }; + + inline bool GetProfileSupport(VkPhysicalDevice physicalDevice, const ProfileDesc* pProfile, VkBool32* pSupported) { + *pSupported = VK_TRUE; + return true; + } +} +") + endif() + + message(STATUS "VulkanProfiles_SOURCE_DIR: ${VulkanProfiles_SOURCE_DIR}") + message(STATUS "VulkanProfiles_INCLUDE_DIR: ${VulkanProfiles_INCLUDE_DIR}") +endif() + +# Set the variables +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(VulkanHpp + REQUIRED_VARS VulkanHpp_INCLUDE_DIR + FAIL_MESSAGE "Could NOT find VulkanHpp. Install it or set VulkanHpp_INCLUDE_DIR to the directory containing vulkan/vulkan.hpp" +) + +# Debug output +message(STATUS "VulkanHpp_FOUND: ${VulkanHpp_FOUND}") +message(STATUS "VULKANHPP_FOUND: ${VULKANHPP_FOUND}") + +if(VulkanHpp_FOUND) + set(VulkanHpp_INCLUDE_DIRS ${VulkanHpp_INCLUDE_DIR}) + + # Make sure VulkanHpp_CPPM_DIR is set + if(NOT DEFINED VulkanHpp_CPPM_DIR) + # Check if vulkan.cppm exists in the include directory + if(EXISTS "${VulkanHpp_INCLUDE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${VulkanHpp_INCLUDE_DIR}) + message(STATUS "Found vulkan.cppm in VulkanHpp_INCLUDE_DIR: ${VulkanHpp_CPPM_DIR}") + elseif(DEFINED VulkanHpp_SOURCE_DIR AND EXISTS "${VulkanHpp_SOURCE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${VulkanHpp_SOURCE_DIR}) + message(STATUS "Found vulkan.cppm in VulkanHpp_SOURCE_DIR: ${VulkanHpp_CPPM_DIR}") + elseif(DEFINED vulkanhpp_SOURCE_DIR AND EXISTS "${vulkanhpp_SOURCE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${vulkanhpp_SOURCE_DIR}) + message(STATUS "Found vulkan.cppm in vulkanhpp_SOURCE_DIR: ${VulkanHpp_CPPM_DIR}") + else() + # If vulkan.cppm doesn't exist, we need to create it + set(VulkanHpp_CPPM_DIR ${CMAKE_CURRENT_BINARY_DIR}/VulkanHpp) + file(MAKE_DIRECTORY ${VulkanHpp_CPPM_DIR}/vulkan) + message(STATUS "Creating vulkan.cppm in ${VulkanHpp_CPPM_DIR}") + + # Create vulkan.cppm file + file(WRITE "${VulkanHpp_CPPM_DIR}/vulkan/vulkan.cppm" +"// Auto-generated vulkan.cppm file +module; +#include +export module vulkan; +export namespace vk { + using namespace VULKAN_HPP_NAMESPACE; +} +") + endif() + endif() + + message(STATUS "Final VulkanHpp_CPPM_DIR: ${VulkanHpp_CPPM_DIR}") + + # Add Vulkan Profiles include directory if found + if(VulkanProfiles_INCLUDE_DIR AND EXISTS "${VulkanProfiles_INCLUDE_DIR}/vulkan/vulkan_profiles.hpp") + list(APPEND VulkanHpp_INCLUDE_DIRS ${VulkanProfiles_INCLUDE_DIR}) + message(STATUS "Added Vulkan Profiles include directory: ${VulkanProfiles_INCLUDE_DIR}") + endif() + + # Create an imported target + if(NOT TARGET VulkanHpp::VulkanHpp) + add_library(VulkanHpp::VulkanHpp INTERFACE IMPORTED) + set_target_properties(VulkanHpp::VulkanHpp PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${VulkanHpp_INCLUDE_DIRS}" + ) + endif() +elseif(DEFINED VulkanHpp_SOURCE_DIR OR DEFINED vulkanhpp_SOURCE_DIR) + # If find_package_handle_standard_args failed but we have a VulkanHpp source directory from FetchContent + # Create an imported target + if(NOT TARGET VulkanHpp::VulkanHpp) + add_library(VulkanHpp::VulkanHpp INTERFACE IMPORTED) + + # Determine the source directory + if(DEFINED VulkanHpp_SOURCE_DIR) + set(_vulkanhpp_source_dir ${VulkanHpp_SOURCE_DIR}) + elseif(DEFINED vulkanhpp_SOURCE_DIR) + set(_vulkanhpp_source_dir ${vulkanhpp_SOURCE_DIR}) + endif() + + message(STATUS "Using fallback VulkanHpp source directory: ${_vulkanhpp_source_dir}") + + set_target_properties(VulkanHpp::VulkanHpp PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${_vulkanhpp_source_dir}" + ) + endif() + + # Set variables to indicate that VulkanHpp was found + set(VulkanHpp_FOUND TRUE) + set(VULKANHPP_FOUND TRUE) + + # Set include directories + if(DEFINED _vulkanhpp_source_dir) + set(VulkanHpp_INCLUDE_DIR ${_vulkanhpp_source_dir}) + elseif(DEFINED VulkanHpp_SOURCE_DIR) + set(VulkanHpp_INCLUDE_DIR ${VulkanHpp_SOURCE_DIR}) + elseif(DEFINED vulkanhpp_SOURCE_DIR) + set(VulkanHpp_INCLUDE_DIR ${vulkanhpp_SOURCE_DIR}) + endif() + set(VulkanHpp_INCLUDE_DIRS ${VulkanHpp_INCLUDE_DIR}) + + # Add Vulkan Profiles include directory if found + if(VulkanProfiles_INCLUDE_DIR AND EXISTS "${VulkanProfiles_INCLUDE_DIR}/vulkan/vulkan_profiles.hpp") + list(APPEND VulkanHpp_INCLUDE_DIRS ${VulkanProfiles_INCLUDE_DIR}) + message(STATUS "Added Vulkan Profiles include directory to fallback: ${VulkanProfiles_INCLUDE_DIR}") + endif() + + # Make sure VulkanHpp_CPPM_DIR is set + if(NOT DEFINED VulkanHpp_CPPM_DIR) + # Check if vulkan.cppm exists in the downloaded repository + if(DEFINED VulkanHpp_INCLUDE_DIR AND EXISTS "${VulkanHpp_INCLUDE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${VulkanHpp_INCLUDE_DIR}) + message(STATUS "Found vulkan.cppm in VulkanHpp_INCLUDE_DIR: ${VulkanHpp_CPPM_DIR}") + elseif(DEFINED _vulkanhpp_source_dir AND EXISTS "${_vulkanhpp_source_dir}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${_vulkanhpp_source_dir}) + message(STATUS "Found vulkan.cppm in _vulkanhpp_source_dir: ${VulkanHpp_CPPM_DIR}") + elseif(DEFINED VulkanHpp_SOURCE_DIR AND EXISTS "${VulkanHpp_SOURCE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${VulkanHpp_SOURCE_DIR}) + message(STATUS "Found vulkan.cppm in VulkanHpp_SOURCE_DIR: ${VulkanHpp_CPPM_DIR}") + elseif(DEFINED vulkanhpp_SOURCE_DIR AND EXISTS "${vulkanhpp_SOURCE_DIR}/vulkan/vulkan.cppm") + set(VulkanHpp_CPPM_DIR ${vulkanhpp_SOURCE_DIR}) + message(STATUS "Found vulkan.cppm in vulkanhpp_SOURCE_DIR: ${VulkanHpp_CPPM_DIR}") + else() + # If vulkan.cppm doesn't exist, we need to create it + set(VulkanHpp_CPPM_DIR ${CMAKE_CURRENT_BINARY_DIR}/VulkanHpp) + file(MAKE_DIRECTORY ${VulkanHpp_CPPM_DIR}/vulkan) + message(STATUS "Creating vulkan.cppm in ${VulkanHpp_CPPM_DIR}") + + # Create vulkan.cppm file + file(WRITE "${VulkanHpp_CPPM_DIR}/vulkan/vulkan.cppm" +"// Auto-generated vulkan.cppm file +module; +#include +export module vulkan; +export namespace vk { + using namespace VULKAN_HPP_NAMESPACE; +} +") + endif() + endif() + + message(STATUS "Final VulkanHpp_CPPM_DIR: ${VulkanHpp_CPPM_DIR}") +endif() + +mark_as_advanced(VulkanHpp_INCLUDE_DIR VulkanHpp_CPPM_DIR) diff --git a/attachments/CMake/Findglm.cmake b/attachments/CMake/Findglm.cmake new file mode 100644 index 00000000..fdf113cf --- /dev/null +++ b/attachments/CMake/Findglm.cmake @@ -0,0 +1,133 @@ +# Findglm.cmake +# +# Finds the GLM library +# +# This will define the following variables +# +# glm_FOUND +# glm_INCLUDE_DIRS +# +# and the following imported targets +# +# glm::glm +# + +# Try to find the package using pkg-config first +find_package(PkgConfig QUIET) +if(PKG_CONFIG_FOUND) + pkg_check_modules(PC_glm QUIET glm) +endif() + +# Find the include directory +find_path(glm_INCLUDE_DIR + NAMES glm/glm.hpp + PATHS + ${PC_glm_INCLUDE_DIRS} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include + PATH_SUFFIXES glm +) + +# If the include directory wasn't found, use FetchContent to download and build +if(NOT glm_INCLUDE_DIR) + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "GLM not found, fetching from GitHub...") + FetchContent_Declare( + glm + GIT_REPOSITORY https://github.com/g-truc/glm.git + GIT_TAG 0.9.9.8 # Use a specific tag for stability + ) + + # Define a function to update the CMake minimum required version + function(update_glm_cmake_version) + # Get the source directory + FetchContent_GetProperties(glm SOURCE_DIR glm_SOURCE_DIR) + + # Update the minimum required CMake version + file(READ "${glm_SOURCE_DIR}/CMakeLists.txt" GLM_CMAKE_CONTENT) + string(REPLACE "cmake_minimum_required(VERSION 3.2" + "cmake_minimum_required(VERSION 3.5" + GLM_CMAKE_CONTENT "${GLM_CMAKE_CONTENT}") + file(WRITE "${glm_SOURCE_DIR}/CMakeLists.txt" "${GLM_CMAKE_CONTENT}") + endfunction() + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # First, declare and populate the content + FetchContent_GetProperties(glm) + if(NOT glm_POPULATED) + FetchContent_Populate(glm) + # Update the CMake version before making it available + update_glm_cmake_version() + endif() + + # Now make it available (this will process the CMakeLists.txt) + FetchContent_MakeAvailable(glm) + + # Get the include directory from the target + if(TARGET glm) + get_target_property(glm_INCLUDE_DIR glm INTERFACE_INCLUDE_DIRECTORIES) + if(NOT glm_INCLUDE_DIR) + # If we can't get the include directory from the target, use the source directory + set(glm_INCLUDE_DIR ${glm_SOURCE_DIR}) + endif() + else() + # GLM might not create a target, so use the source directory + set(glm_INCLUDE_DIR ${glm_SOURCE_DIR}) + endif() +endif() + +# Set the variables +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(glm + REQUIRED_VARS glm_INCLUDE_DIR +) + +if(glm_FOUND) + set(glm_INCLUDE_DIRS ${glm_INCLUDE_DIR}) + + # Create an imported target + if(NOT TARGET glm::glm) + add_library(glm::glm INTERFACE IMPORTED) + set_target_properties(glm::glm PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${glm_INCLUDE_DIRS}" + ) + endif() +elseif(TARGET glm) + # If find_package_handle_standard_args failed but we have a glm target from FetchContent + # Create an alias for the glm target + if(NOT TARGET glm::glm) + add_library(glm::glm ALIAS glm) + endif() + + # Set variables to indicate that glm was found + set(glm_FOUND TRUE) + set(GLM_FOUND TRUE) + + # Set include directories + get_target_property(glm_INCLUDE_DIR glm INTERFACE_INCLUDE_DIRECTORIES) + if(glm_INCLUDE_DIR) + set(glm_INCLUDE_DIRS ${glm_INCLUDE_DIR}) + else() + # If we can't get the include directory from the target, use the source directory + set(glm_INCLUDE_DIR ${glm_SOURCE_DIR}) + set(glm_INCLUDE_DIRS ${glm_INCLUDE_DIR}) + endif() +endif() + +mark_as_advanced(glm_INCLUDE_DIR) diff --git a/attachments/CMake/Findnlohmann_json.cmake b/attachments/CMake/Findnlohmann_json.cmake new file mode 100644 index 00000000..287c7e83 --- /dev/null +++ b/attachments/CMake/Findnlohmann_json.cmake @@ -0,0 +1,154 @@ +# Findnlohmann_json.cmake +# +# Finds the nlohmann_json library +# +# This will define the following variables +# +# nlohmann_json_FOUND +# nlohmann_json_INCLUDE_DIRS +# +# and the following imported targets +# +# nlohmann_json::nlohmann_json +# + +# Try to find the package using pkg-config first +find_package(PkgConfig QUIET) +if(PKG_CONFIG_FOUND) + pkg_check_modules(PC_nlohmann_json QUIET nlohmann_json) +endif() + +# Find the include directory +find_path(nlohmann_json_INCLUDE_DIR + NAMES nlohmann/json.hpp json.hpp + PATHS + ${PC_nlohmann_json_INCLUDE_DIRS} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include + PATH_SUFFIXES nlohmann json +) + +# If the include directory wasn't found, use FetchContent to download and build +if(NOT nlohmann_json_INCLUDE_DIR) + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "nlohmann_json not found, fetching from GitHub...") + FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.2 # Use a specific tag for stability + ) + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Populate the content but don't configure it yet + FetchContent_GetProperties(nlohmann_json) + if(NOT nlohmann_json_POPULATED) + FetchContent_Populate(nlohmann_json) + + if(ANDROID) + # Update the minimum required CMake version before including the CMakeLists.txt + file(READ "${nlohmann_json_SOURCE_DIR}/CMakeLists.txt" NLOHMANN_JSON_CMAKE_CONTENT) + string(REPLACE "cmake_minimum_required(VERSION 3.1" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.2" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.3" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.4" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.5" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.6" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.7" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.8" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.9" + "cmake_minimum_required(VERSION 3.10" + NLOHMANN_JSON_CMAKE_CONTENT "${NLOHMANN_JSON_CMAKE_CONTENT}") + file(WRITE "${nlohmann_json_SOURCE_DIR}/CMakeLists.txt" "${NLOHMANN_JSON_CMAKE_CONTENT}") + endif() + + # Now add the subdirectory manually + add_subdirectory(${nlohmann_json_SOURCE_DIR} ${nlohmann_json_BINARY_DIR}) + else() + # If already populated, just make it available + FetchContent_MakeAvailable(nlohmann_json) + endif() + + # Get the include directory from the target + if(TARGET nlohmann_json) + get_target_property(nlohmann_json_INCLUDE_DIR nlohmann_json INTERFACE_INCLUDE_DIRECTORIES) + if(NOT nlohmann_json_INCLUDE_DIR) + # If we can't get the include directory from the target, use the source directory + set(nlohmann_json_INCLUDE_DIR ${nlohmann_json_SOURCE_DIR}/include) + endif() + else() + # nlohmann_json might not create a target, so use the source directory + set(nlohmann_json_INCLUDE_DIR ${nlohmann_json_SOURCE_DIR}/include) + endif() +endif() + +# Set the variables +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(nlohmann_json + REQUIRED_VARS nlohmann_json_INCLUDE_DIR +) + +if(nlohmann_json_FOUND) + set(nlohmann_json_INCLUDE_DIRS ${nlohmann_json_INCLUDE_DIR}) + + # Create an imported target + if(NOT TARGET nlohmann_json::nlohmann_json) + add_library(nlohmann_json::nlohmann_json INTERFACE IMPORTED) + set_target_properties(nlohmann_json::nlohmann_json PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${nlohmann_json_INCLUDE_DIRS}" + ) + endif() +elseif(TARGET nlohmann_json) + # If find_package_handle_standard_args failed but we have a nlohmann_json target from FetchContent + # Create an alias for the nlohmann_json target + if(NOT TARGET nlohmann_json::nlohmann_json) + add_library(nlohmann_json::nlohmann_json ALIAS nlohmann_json) + endif() + + # Set variables to indicate that nlohmann_json was found + set(nlohmann_json_FOUND TRUE) + set(NLOHMANN_JSON_FOUND TRUE) + + # Set include directories + get_target_property(nlohmann_json_INCLUDE_DIR nlohmann_json INTERFACE_INCLUDE_DIRECTORIES) + if(nlohmann_json_INCLUDE_DIR) + set(nlohmann_json_INCLUDE_DIRS ${nlohmann_json_INCLUDE_DIR}) + else() + # If we can't get the include directory from the target, use the source directory + set(nlohmann_json_INCLUDE_DIR ${nlohmann_json_SOURCE_DIR}/include) + set(nlohmann_json_INCLUDE_DIRS ${nlohmann_json_INCLUDE_DIR}) + endif() +endif() + +mark_as_advanced(nlohmann_json_INCLUDE_DIR) diff --git a/attachments/CMake/Findstb.cmake b/attachments/CMake/Findstb.cmake new file mode 100644 index 00000000..6ccf72f5 --- /dev/null +++ b/attachments/CMake/Findstb.cmake @@ -0,0 +1,86 @@ +# Findstb.cmake +# +# Finds the stb library (specifically stb_image.h) +# +# This will define the following variables +# +# stb_FOUND +# stb_INCLUDE_DIRS +# +# and the following imported targets +# +# stb::stb +# + +# Try to find the package using pkg-config first +find_package(PkgConfig QUIET) +if(PKG_CONFIG_FOUND) + pkg_check_modules(PC_stb QUIET stb) +endif() + +# Find the include directory +find_path(stb_INCLUDE_DIR + NAMES stb_image.h + PATHS + ${PC_stb_INCLUDE_DIRS} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include + PATH_SUFFIXES stb +) + +# If the include directory wasn't found, use FetchContent to download and build +if(NOT stb_INCLUDE_DIR) + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "stb_image.h not found, fetching from GitHub...") + FetchContent_Declare( + stb + GIT_REPOSITORY https://github.com/nothings/stb.git + GIT_TAG master # stb doesn't use version tags, so we use master + ) + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Populate the content + FetchContent_GetProperties(stb) + if(NOT stb_POPULATED) + FetchContent_Populate(stb) + endif() + + # stb is a header-only library with no CMakeLists.txt, so we just need to set the include directory + set(stb_INCLUDE_DIR ${stb_SOURCE_DIR}) +endif() + +# Set the variables +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(stb + REQUIRED_VARS stb_INCLUDE_DIR +) + +if(stb_FOUND) + set(stb_INCLUDE_DIRS ${stb_INCLUDE_DIR}) + + # Create an imported target + if(NOT TARGET stb::stb) + add_library(stb::stb INTERFACE IMPORTED) + set_target_properties(stb::stb PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${stb_INCLUDE_DIRS}" + ) + endif() +endif() + +mark_as_advanced(stb_INCLUDE_DIR) \ No newline at end of file diff --git a/attachments/CMake/Findtinygltf.cmake b/attachments/CMake/Findtinygltf.cmake new file mode 100644 index 00000000..6bbaafa3 --- /dev/null +++ b/attachments/CMake/Findtinygltf.cmake @@ -0,0 +1,162 @@ +# Findtinygltf.cmake +# +# Finds the tinygltf library +# +# This will define the following variables +# +# tinygltf_FOUND +# tinygltf_INCLUDE_DIRS +# +# and the following imported targets +# +# tinygltf::tinygltf +# + +# First, try to find nlohmann_json +find_package(nlohmann_json QUIET) +if(NOT nlohmann_json_FOUND) + include(FetchContent) + message(STATUS "nlohmann_json not found, fetching from GitHub...") + FetchContent_Declare( + nlohmann_json + GIT_REPOSITORY https://github.com/nlohmann/json.git + GIT_TAG v3.11.2 # Use a specific tag for stability + ) + FetchContent_MakeAvailable(nlohmann_json) +endif() + +# Try to find tinygltf using standard find_package +find_path(tinygltf_INCLUDE_DIR + NAMES tiny_gltf.h + PATHS + ${PC_tinygltf_INCLUDE_DIRS} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include + PATH_SUFFIXES tinygltf include +) + +# If not found, use FetchContent to download and build +if(NOT tinygltf_INCLUDE_DIR) + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "tinygltf not found, fetching from GitHub...") + FetchContent_Declare( + tinygltf + GIT_REPOSITORY https://github.com/syoyo/tinygltf.git + GIT_TAG v2.8.18 # Use a specific tag for stability + ) + + # Set policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Populate the content but don't configure it yet + FetchContent_GetProperties(tinygltf) + if(NOT tinygltf_POPULATED) + FetchContent_Populate(tinygltf) + + # Update the minimum required CMake version to avoid deprecation warning + file(READ "${tinygltf_SOURCE_DIR}/CMakeLists.txt" TINYGLTF_CMAKE_CONTENT) + string(REPLACE "cmake_minimum_required(VERSION 3.6)" + "cmake_minimum_required(VERSION 3.10)" + TINYGLTF_CMAKE_CONTENT "${TINYGLTF_CMAKE_CONTENT}") + file(WRITE "${tinygltf_SOURCE_DIR}/CMakeLists.txt" "${TINYGLTF_CMAKE_CONTENT}") + + # Create a symbolic link to make nlohmann/json.hpp available + if(EXISTS "${tinygltf_SOURCE_DIR}/json.hpp") + file(MAKE_DIRECTORY "${tinygltf_SOURCE_DIR}/nlohmann") + file(CREATE_LINK "${tinygltf_SOURCE_DIR}/json.hpp" "${tinygltf_SOURCE_DIR}/nlohmann/json.hpp" SYMBOLIC) + endif() + + # Set tinygltf to header-only mode + set(TINYGLTF_HEADER_ONLY ON CACHE BOOL "Use header only version" FORCE) + set(TINYGLTF_INSTALL OFF CACHE BOOL "Do not install tinygltf" FORCE) + + # Add the subdirectory after modifying the CMakeLists.txt + add_subdirectory(${tinygltf_SOURCE_DIR} ${tinygltf_BINARY_DIR}) + else() + # If already populated, just make it available + FetchContent_MakeAvailable(tinygltf) + endif() + + # Get the include directory from the target + get_target_property(tinygltf_INCLUDE_DIR tinygltf INTERFACE_INCLUDE_DIRECTORIES) + if(NOT tinygltf_INCLUDE_DIR) + # If we can't get the include directory from the target, use the source directory + FetchContent_GetProperties(tinygltf SOURCE_DIR tinygltf_SOURCE_DIR) + set(tinygltf_INCLUDE_DIR ${tinygltf_SOURCE_DIR}) + endif() +endif() + +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(tinygltf + REQUIRED_VARS tinygltf_INCLUDE_DIR +) + +if(tinygltf_FOUND) + set(tinygltf_INCLUDE_DIRS ${tinygltf_INCLUDE_DIR}) + + if(NOT TARGET tinygltf::tinygltf) + add_library(tinygltf::tinygltf INTERFACE IMPORTED) + set_target_properties(tinygltf::tinygltf PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${tinygltf_INCLUDE_DIRS}" + INTERFACE_COMPILE_DEFINITIONS "TINYGLTF_IMPLEMENTATION;TINYGLTF_NO_EXTERNAL_IMAGE;TINYGLTF_NO_STB_IMAGE;TINYGLTF_NO_STB_IMAGE_WRITE" + ) + if(TARGET nlohmann_json::nlohmann_json) + target_link_libraries(tinygltf::tinygltf INTERFACE nlohmann_json::nlohmann_json) + endif() + endif() +elseif(TARGET tinygltf) + # If find_package_handle_standard_args failed but we have a tinygltf target from FetchContent + # Create an alias for the tinygltf target + if(NOT TARGET tinygltf::tinygltf) + add_library(tinygltf_wrapper INTERFACE) + target_link_libraries(tinygltf_wrapper INTERFACE tinygltf) + target_compile_definitions(tinygltf_wrapper INTERFACE + TINYGLTF_IMPLEMENTATION + TINYGLTF_NO_EXTERNAL_IMAGE + TINYGLTF_NO_STB_IMAGE + TINYGLTF_NO_STB_IMAGE_WRITE + ) + if(TARGET nlohmann_json::nlohmann_json) + target_link_libraries(tinygltf_wrapper INTERFACE nlohmann_json::nlohmann_json) + endif() + add_library(tinygltf::tinygltf ALIAS tinygltf_wrapper) + endif() + + # Set variables to indicate that tinygltf was found + set(tinygltf_FOUND TRUE) + set(TINYGLTF_FOUND TRUE) + + # Set include directories + get_target_property(tinygltf_INCLUDE_DIR tinygltf INTERFACE_INCLUDE_DIRECTORIES) + if(tinygltf_INCLUDE_DIR) + set(tinygltf_INCLUDE_DIRS ${tinygltf_INCLUDE_DIR}) + else() + # If we can't get the include directory from the target, use the source directory + FetchContent_GetProperties(tinygltf SOURCE_DIR tinygltf_SOURCE_DIR) + set(tinygltf_INCLUDE_DIR ${tinygltf_SOURCE_DIR}) + set(tinygltf_INCLUDE_DIRS ${tinygltf_INCLUDE_DIR}) + + # Explicitly set the include directory on the target + if(TARGET tinygltf) + set_target_properties(tinygltf PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${tinygltf_INCLUDE_DIR}" + ) + endif() + endif() +endif() + +mark_as_advanced(tinygltf_INCLUDE_DIR) diff --git a/attachments/CMake/Findtinyobjloader.cmake b/attachments/CMake/Findtinyobjloader.cmake new file mode 100644 index 00000000..4b1fb44c --- /dev/null +++ b/attachments/CMake/Findtinyobjloader.cmake @@ -0,0 +1,160 @@ +# Findtinyobjloader.cmake +# Find the tinyobjloader library +# +# This module defines the following variables: +# tinyobjloader_FOUND - True if tinyobjloader was found +# tinyobjloader_INCLUDE_DIRS - Include directories for tinyobjloader +# tinyobjloader_LIBRARIES - Libraries to link against tinyobjloader +# +# It also defines the following targets: +# tinyobjloader::tinyobjloader + +# Try to find the package using pkg-config first +find_package(PkgConfig QUIET) +if(PKG_CONFIG_FOUND) + pkg_check_modules(PC_tinyobjloader QUIET tinyobjloader) +endif() + +# Find the include directory +find_path(tinyobjloader_INCLUDE_DIR + NAMES tiny_obj_loader.h + PATHS + ${PC_tinyobjloader_INCLUDE_DIRS} + /usr/include + /usr/local/include + $ENV{VULKAN_SDK}/include + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/include + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../include + PATH_SUFFIXES tinyobjloader tiny_obj_loader +) + +# Find the library +find_library(tinyobjloader_LIBRARY + NAMES tinyobjloader + PATHS + ${PC_tinyobjloader_LIBRARY_DIRS} + /usr/lib + /usr/local/lib + $ENV{VULKAN_SDK}/lib + ${ANDROID_NDK}/sources/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/lib + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../external + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../third_party + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../../lib + PATH_SUFFIXES lib +) + +# If the include directory wasn't found, use FetchContent to download and build +if(NOT tinyobjloader_INCLUDE_DIR) + # If not found, use FetchContent to download and build + include(FetchContent) + + message(STATUS "tinyobjloader not found, fetching from GitHub...") + FetchContent_Declare( + tinyobjloader + GIT_REPOSITORY https://github.com/tinyobjloader/tinyobjloader.git + GIT_TAG v2.0.0rc10 # Use a specific tag for stability + ) + + # Set options before making tinyobjloader available + set(TINYOBJLOADER_BUILD_TEST_LOADER OFF CACHE BOOL "Do not build test loader" FORCE) + set(TINYOBJLOADER_BUILD_OBJ_STICHER OFF CACHE BOOL "Do not build obj sticher" FORCE) + set(TINYOBJLOADER_INSTALL OFF CACHE BOOL "Do not install tinyobjloader" FORCE) + + # Update CMake policy to suppress the deprecation warning + if(POLICY CMP0169) + cmake_policy(SET CMP0169 OLD) + endif() + + # Populate the content but don't configure it yet + FetchContent_GetProperties(tinyobjloader) + if(NOT tinyobjloader_POPULATED) + FetchContent_Populate(tinyobjloader) + + # Update the minimum required CMake version before including the CMakeLists.txt + file(READ "${tinyobjloader_SOURCE_DIR}/CMakeLists.txt" TINYOBJLOADER_CMAKE_CONTENT) + string(REPLACE "cmake_minimum_required(VERSION 3.2)" + "cmake_minimum_required(VERSION 3.10)" + TINYOBJLOADER_CMAKE_CONTENT "${TINYOBJLOADER_CMAKE_CONTENT}") + string(REPLACE "cmake_minimum_required(VERSION 3.5)" + "cmake_minimum_required(VERSION 3.10)" + TINYOBJLOADER_CMAKE_CONTENT "${TINYOBJLOADER_CMAKE_CONTENT}") + file(WRITE "${tinyobjloader_SOURCE_DIR}/CMakeLists.txt" "${TINYOBJLOADER_CMAKE_CONTENT}") + + # Now add the subdirectory manually + add_subdirectory(${tinyobjloader_SOURCE_DIR} ${tinyobjloader_BINARY_DIR}) + else() + # If already populated, just make it available + FetchContent_MakeAvailable(tinyobjloader) + endif() + + # Get the include directory from the target + get_target_property(tinyobjloader_INCLUDE_DIR tinyobjloader INTERFACE_INCLUDE_DIRECTORIES) + if(NOT tinyobjloader_INCLUDE_DIR) + # If we can't get the include directory from the target, use the source directory + set(tinyobjloader_INCLUDE_DIR ${tinyobjloader_SOURCE_DIR}) + endif() +endif() + +# Set the variables +include(FindPackageHandleStandardArgs) +find_package_handle_standard_args(tinyobjloader + REQUIRED_VARS tinyobjloader_INCLUDE_DIR +) + +if(tinyobjloader_FOUND) + set(tinyobjloader_INCLUDE_DIRS ${tinyobjloader_INCLUDE_DIR}) + + if(tinyobjloader_LIBRARY) + set(tinyobjloader_LIBRARIES ${tinyobjloader_LIBRARY}) + else() + # tinyobjloader is a header-only library, so no library is needed + set(tinyobjloader_LIBRARIES "") + endif() + + # Create an imported target + if(NOT TARGET tinyobjloader::tinyobjloader) + add_library(tinyobjloader::tinyobjloader INTERFACE IMPORTED) + set_target_properties(tinyobjloader::tinyobjloader PROPERTIES + INTERFACE_INCLUDE_DIRECTORIES "${tinyobjloader_INCLUDE_DIRS}" + ) + if(tinyobjloader_LIBRARIES) + set_target_properties(tinyobjloader::tinyobjloader PROPERTIES + INTERFACE_LINK_LIBRARIES "${tinyobjloader_LIBRARIES}" + ) + endif() + endif() +elseif(TARGET tinyobjloader) + # If find_package_handle_standard_args failed but we have a tinyobjloader target from FetchContent + # Create an alias for the tinyobjloader target + if(NOT TARGET tinyobjloader::tinyobjloader) + add_library(tinyobjloader::tinyobjloader ALIAS tinyobjloader) + endif() + + # Set variables to indicate that tinyobjloader was found + set(tinyobjloader_FOUND TRUE) + set(TINYOBJLOADER_FOUND TRUE) + + # Set include directories + get_target_property(tinyobjloader_INCLUDE_DIR tinyobjloader INTERFACE_INCLUDE_DIRECTORIES) + if(tinyobjloader_INCLUDE_DIR) + set(tinyobjloader_INCLUDE_DIRS ${tinyobjloader_INCLUDE_DIR}) + else() + # If we can't get the include directory from the target, use the source directory + set(tinyobjloader_INCLUDE_DIR ${tinyobjloader_SOURCE_DIR}) + set(tinyobjloader_INCLUDE_DIRS ${tinyobjloader_INCLUDE_DIR}) + endif() +endif() + +mark_as_advanced(tinyobjloader_INCLUDE_DIR tinyobjloader_LIBRARY) diff --git a/attachments/CMakeLists.txt b/attachments/CMakeLists.txt index 935839d4..fd1630a2 100644 --- a/attachments/CMakeLists.txt +++ b/attachments/CMakeLists.txt @@ -10,6 +10,9 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/CMake") find_package (glfw3 REQUIRED) find_package (glm REQUIRED) find_package (Vulkan REQUIRED) +find_package (tinyobjloader REQUIRED) +find_package (TinyGLTF REQUIRED) +find_package (KTX REQUIRED) # set up Vulkan C++ module add_library(VulkanCppModule) @@ -38,23 +41,15 @@ target_sources(VulkanCppModule "${Vulkan_INCLUDE_DIR}/vulkan/vulkan.cppm" ) + # Add the vulkan.cppm file directly as a source file target_sources(VulkanCppModule PRIVATE "${Vulkan_INCLUDE_DIR}/vulkan/vulkan.cppm" ) -find_package (tinyobjloader REQUIRED) - -find_package (PkgConfig) -pkg_get_variable (STB_INCLUDEDIR stb includedir) -if (NOT STB_INCLUDEDIR) - unset (STB_INCLUDEDIR) - find_path (STB_INCLUDEDIR stb_image.h PATH_SUFFIXES stb) -endif () -if (NOT STB_INCLUDEDIR) - message (FATAL_ERROR "stb_image.h not found") -endif () +find_package(stb REQUIRED) +set(STB_INCLUDEDIR ${stb_INCLUDE_DIRS}) add_executable (glslang::validator IMPORTED) find_program (GLSLANG_VALIDATOR "glslangValidator" HINTS $ENV{VULKAN_SDK}/bin REQUIRED) @@ -116,7 +111,7 @@ function (add_chapter CHAPTER_NAME) if(${CMAKE_GENERATOR} MATCHES "Visual Studio.*") set_target_properties(${CHAPTER_NAME} PROPERTIES VS_DEBUGGER_WORKING_DIRECTORY "${CMAKE_BINARY_DIR}/${CHAPTER_NAME}") endif() - endif() + endif() if (DEFINED CHAPTER_SHADER) set (CHAPTER_SHADER_TARGET ${CHAPTER_NAME}_shader) @@ -135,10 +130,10 @@ function (add_chapter CHAPTER_NAME) target_link_libraries (${CHAPTER_NAME} ${CHAPTER_LIBS}) endif () if (DEFINED CHAPTER_MODELS) - file (COPY ${CHAPTER_MODELS} DESTINATION ${CMAKE_BINARY_DIR}/${CHAPTER_NAME}/models) + file (COPY assets/${CHAPTER_MODELS} DESTINATION ${CMAKE_BINARY_DIR}/${CHAPTER_NAME}/models) endif () if (DEFINED CHAPTER_TEXTURES) - file (COPY ${CHAPTER_TEXTURES} DESTINATION ${CMAKE_BINARY_DIR}/${CHAPTER_NAME}/textures) + file (COPY assets/${CHAPTER_TEXTURES} DESTINATION ${CMAKE_BINARY_DIR}/${CHAPTER_NAME}/textures) endif () endfunction () @@ -207,22 +202,22 @@ add_chapter (23_descriptor_sets add_chapter (24_texture_image SHADER 22_shader_ubo - TEXTURES ../images/texture.jpg + TEXTURES ../../images/texture.jpg LIBS glm::glm) add_chapter (25_sampler SHADER 22_shader_ubo - TEXTURES ../images/texture.jpg + TEXTURES ../../images/texture.jpg LIBS glm::glm) add_chapter (26_texture_mapping SHADER 26_shader_textures - TEXTURES ../images/texture.jpg + TEXTURES ../../images/texture.jpg LIBS glm::glm) add_chapter (27_depth_buffering SHADER 27_shader_depth - TEXTURES ../images/texture.jpg + TEXTURES ../../images/texture.jpg LIBS glm::glm) add_chapter (28_model_loading @@ -246,3 +241,37 @@ add_chapter (30_multisampling add_chapter (31_compute_shader SHADER 31_shader_compute LIBS glm::glm) + +add_chapter (32_ecosystem_utilities + SHADER 27_shader_depth + MODELS viking_room.obj + TEXTURES viking_room.png + LIBS glm::glm tinyobjloader::tinyobjloader) + +add_chapter (33_vulkan_profiles + SHADER 27_shader_depth + MODELS viking_room.obj + TEXTURES viking_room.png + LIBS glm::glm tinyobjloader::tinyobjloader) + +add_chapter (34_android + SHADER 27_shader_depth + MODELS viking_room.obj + TEXTURES viking_room.png + LIBS glm::glm tinyobjloader::tinyobjloader) + +add_chapter (35_gltf_ktx + SHADER 27_shader_depth + MODELS viking_room.glb + TEXTURES viking_room.ktx2 + LIBS glm::glm tinygltf::tinygltf KTX::ktx) + +add_chapter (36_multiple_objects + SHADER 27_shader_depth + MODELS viking_room.glb + TEXTURES viking_room.ktx2 + LIBS glm::glm tinygltf::tinygltf KTX::ktx) + +add_chapter (37_multithreading + SHADER 37_shader_compute + LIBS glm::glm) diff --git a/attachments/android/README.adoc b/attachments/android/README.adoc new file mode 100644 index 00000000..a451068f --- /dev/null +++ b/attachments/android/README.adoc @@ -0,0 +1,67 @@ += Android Project for Vulkan Tutorial + +This Android project allows you to run different chapters of the Vulkan Tutorial on Android devices. + +== Selecting a Chapter + +By default, the project builds and runs the `34_android` chapter. You can select a different chapter by setting the `chapter` property in your Gradle build. + +=== Available Chapters + +* `34_android`: The Android chapter that uses tinyobjloader to load OBJ models +* `35_gltf_ktx`: The glTF and KTX chapter that uses tinygltf to load glTF models and KTX to load KTX2 textures + +=== How to Select a Chapter + +==== From the Command Line + +[source,bash] +---- +./gradlew assembleDebug -Pchapter=35_gltf_ktx +---- + +==== From Android Studio + +1. Edit the `gradle.properties` file in the project root directory +2. Add the following line: ++ +[source] +---- +chapter=35_gltf_ktx +---- +3. Sync the project and build + +== Adding New Chapters + +To add support for a new chapter: + +1. Add the chapter name to the `SUPPORTED_CHAPTERS` list in `app/src/main/cpp/CMakeLists.txt` +2. Add any chapter-specific libraries and compile definitions in the same file +3. Make sure the chapter's source file exists in the `attachments` directory + +For example, to add support for a hypothetical `36_new_feature` chapter: + +[source,cmake] +---- +# Define the list of supported chapters +set(SUPPORTED_CHAPTERS + "34_android" + "35_gltf_ktx" + "36_new_feature" +) + +# Add chapter-specific libraries and definitions +if(CHAPTER STREQUAL "34_android") + # ... +elseif(CHAPTER STREQUAL "35_gltf_ktx") + # ... +elseif(CHAPTER STREQUAL "36_new_feature") + target_link_libraries(vulkan_tutorial_android + # Add any required libraries here + ) + + target_compile_definitions(vulkan_tutorial_android PRIVATE + # Add any required compile definitions here + ) +endif() +---- diff --git a/attachments/android/app/build.gradle b/attachments/android/app/build.gradle new file mode 100644 index 00000000..7fad41cd --- /dev/null +++ b/attachments/android/app/build.gradle @@ -0,0 +1,68 @@ +plugins { + id 'com.android.application' +} + +android { + namespace "com.vulkan.tutorial" + compileSdk 36 + defaultConfig { + applicationId "com.vulkan.tutorial" + minSdk 24 + targetSdk 36 + versionCode 1 + versionName "1.0" + + // Define which chapter to build (default to 34_android) + buildConfigField "String", "CHAPTER", "\"${project.findProperty('chapter') ?: '34_android'}\"" + + externalNativeBuild { + cmake { + arguments "-DCHAPTER=${project.findProperty('chapter') ?: '34_android'}" + abiFilters = project.findProperty('abiFilters')?.split(',') ?: ['armeabi-v7a', 'arm64-v8a', 'x86', 'x86_64'] + } + } + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + + compileOptions { + sourceCompatibility JavaVersion.VERSION_11 + targetCompatibility JavaVersion.VERSION_11 + } + + externalNativeBuild { + cmake { + path "src/main/cpp/CMakeLists.txt" + version "4.0.2" + } + } + + ndkVersion "28.1.13356709" + + // Use assets from the dedicated assets directory and locally compiled shaders + sourceSets { + main { + assets { + srcDirs = [ + // Point to the dedicated assets directory + '../../assets/' + ] + } + } + } + buildFeatures { + prefab true + buildConfig true + } +} + +dependencies { + implementation 'androidx.appcompat:appcompat:1.7.1' + implementation 'com.google.android.material:material:1.12.0' + implementation 'androidx.games:games-activity:4.0.0' +} diff --git a/attachments/android/app/src/main/AndroidManifest.xml b/attachments/android/app/src/main/AndroidManifest.xml new file mode 100644 index 00000000..02fe7b64 --- /dev/null +++ b/attachments/android/app/src/main/AndroidManifest.xml @@ -0,0 +1,31 @@ + + + + + + + + + + + + + + + + + diff --git a/attachments/android/app/src/main/cpp/CMakeLists.txt b/attachments/android/app/src/main/cpp/CMakeLists.txt new file mode 100644 index 00000000..7b9cd8ff --- /dev/null +++ b/attachments/android/app/src/main/cpp/CMakeLists.txt @@ -0,0 +1,194 @@ +cmake_minimum_required(VERSION 3.22.1) + +# Enable C++ module dependency scanning +set(CMAKE_CXX_SCAN_FOR_MODULES ON) + +project(vulkan_tutorial_android) + +# Set the path to the main CMakeLists.txt relative to this file +set(MAIN_CMAKE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../CMakeLists.txt") + +# Add the parent project's cmake folder to the module path +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/CMake") + +# Find the Vulkan package +find_package(Vulkan REQUIRED) + +# Find the Vulkan-Hpp package +find_package(VulkanHpp REQUIRED) + +# Find the tinyobjloader package +find_package(tinyobjloader REQUIRED) + +# Find the glm package +find_package(glm REQUIRED) + +# Find the tinygltf package (needed for 35_gltf_ktx) +find_package(tinygltf REQUIRED) + +# Find the KTX package (needed for 35_gltf_ktx) +find_package(KTX REQUIRED) + +# Find the stb library (for stb_image.h) +find_package(stb REQUIRED) +set(STB_INCLUDEDIR ${stb_INCLUDE_DIRS}) + +# Set up shader compilation tools +add_executable(glslang::validator IMPORTED) +find_program(GLSLANG_VALIDATOR "glslangValidator" HINTS $ENV{VULKAN_SDK}/bin REQUIRED) +set_property(TARGET glslang::validator PROPERTY IMPORTED_LOCATION "${GLSLANG_VALIDATOR}") + +# Define shader building function +function(add_shaders_target TARGET) + cmake_parse_arguments("SHADER" "" "CHAPTER_NAME" "SOURCES" ${ARGN}) + set(SHADERS_DIR ${SHADER_CHAPTER_NAME}/shaders) + add_custom_command( + OUTPUT ${SHADERS_DIR} + COMMAND ${CMAKE_COMMAND} -E make_directory ${SHADERS_DIR} + ) + add_custom_command( + OUTPUT ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv + COMMAND glslang::validator + ARGS --target-env vulkan1.0 ${SHADER_SOURCES} --quiet + WORKING_DIRECTORY ${SHADERS_DIR} + DEPENDS ${SHADERS_DIR} ${SHADER_SOURCES} + COMMENT "Compiling Shaders" + VERBATIM + ) + add_custom_target(${TARGET} DEPENDS ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv) +endfunction() + +# Include the game-activity library +find_package(game-activity REQUIRED CONFIG) +include_directories(${ANDROID_NDK}/sources/android/game-activity/include) + +# Set C++ standard to match the main project +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Add the Vulkan C++ module +# Create a simple source file for VulkanCppModule +file(WRITE "${CMAKE_CURRENT_BINARY_DIR}/vulkan_cpp_module_stub.cpp" "// Auto-generated stub file for VulkanCppModule\n") + +add_library(VulkanCppModule SHARED "${CMAKE_CURRENT_BINARY_DIR}/vulkan_cpp_module_stub.cpp") +target_compile_definitions(VulkanCppModule + PUBLIC VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1 VULKAN_HPP_NO_STRUCT_CONSTRUCTORS=1 +) +target_include_directories(VulkanCppModule + PRIVATE + "${Vulkan_INCLUDE_DIR}" + "${VulkanHpp_INCLUDE_DIRS}" +) +target_link_libraries(VulkanCppModule + PUBLIC + ${Vulkan_LIBRARIES} + VulkanHpp::VulkanHpp +) +set_target_properties(VulkanCppModule PROPERTIES CXX_STANDARD 20) + +# Set up the C++ module file set +target_sources(VulkanCppModule + PUBLIC + FILE_SET cxx_modules TYPE CXX_MODULES + BASE_DIRS + "${VulkanHpp_CPPM_DIR}" + FILES + "${VulkanHpp_CPPM_DIR}/vulkan/vulkan.cppm" +) + +# Add the vulkan.cppm file directly as a source file +target_sources(VulkanCppModule + PRIVATE + "${VulkanHpp_CPPM_DIR}/vulkan/vulkan.cppm" +) + +# Set up shader compilation for all chapters +set(SHADER_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments") +set(SHADER_OUTPUT_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../assets") +file(MAKE_DIRECTORY ${SHADER_OUTPUT_DIR}) + +# Copy shader source files to the assets directory +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.frag" + "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" + COPYONLY +) +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.vert" + "${SHADER_OUTPUT_DIR}/27_shader_depth.vert" + COPYONLY +) + +# Compile shaders +set(SHADER_SOURCES "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" "${SHADER_OUTPUT_DIR}/27_shader_depth.vert") +add_shaders_target(vulkan_tutorial_shaders CHAPTER_NAME "${SHADER_OUTPUT_DIR}" SOURCES ${SHADER_SOURCES}) + +# Set default chapter if not provided +if(NOT DEFINED CHAPTER) + set(CHAPTER "34_android") +endif() + +# Define the list of supported chapters +set(SUPPORTED_CHAPTERS + "34_android" + "35_gltf_ktx" + "36_multiple_objects" +) + +# Validate the chapter +list(FIND SUPPORTED_CHAPTERS ${CHAPTER} CHAPTER_INDEX) +if(CHAPTER_INDEX EQUAL -1) + message(FATAL_ERROR "Invalid chapter: ${CHAPTER}. Supported chapters are: ${SUPPORTED_CHAPTERS}") +endif() + +message(STATUS "Building chapter: ${CHAPTER}") + +# Add the main native library +add_library(vulkan_tutorial_android SHARED + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/${CHAPTER}.cpp + game_activity_bridge.cpp +) + +# Add dependency on shader compilation +add_dependencies(vulkan_tutorial_android vulkan_tutorial_shaders) + +# Set include directories +target_include_directories(vulkan_tutorial_android PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${Vulkan_INCLUDE_DIR} + ${ANDROID_NDK}/sources/android/game-activity/include + ${STB_INCLUDEDIR} +) + +# Link against libraries +target_link_libraries(vulkan_tutorial_android + VulkanCppModule + game-activity::game-activity + android + log + ${Vulkan_LIBRARIES} + glm::glm +) + +# Add chapter-specific libraries and definitions +if(CHAPTER STREQUAL "34_android") + target_link_libraries(vulkan_tutorial_android + tinyobjloader::tinyobjloader + ) +elseif(CHAPTER STREQUAL "35_gltf_ktx" OR CHAPTER STREQUAL "36_multiple_objects") + target_link_libraries(vulkan_tutorial_android + tinygltf::tinygltf + KTX::ktx + ) + + # Add necessary definitions for tinygltf and KTX + target_compile_definitions(vulkan_tutorial_android PRIVATE + TINYGLTF_IMPLEMENTATION + TINYGLTF_NO_EXTERNAL_IMAGE + TINYGLTF_NO_STB_IMAGE + TINYGLTF_NO_STB_IMAGE_WRITE + KTX_FEATURE_KTX1 + KTX_FEATURE_KTX2 + KTX_FEATURE_WRITE + ) +endif() diff --git a/attachments/android/app/src/main/cpp/game_activity_bridge.cpp b/attachments/android/app/src/main/cpp/game_activity_bridge.cpp new file mode 100644 index 00000000..862e4fbc --- /dev/null +++ b/attachments/android/app/src/main/cpp/game_activity_bridge.cpp @@ -0,0 +1,41 @@ +#include +#include +#include +#include + +// Define logging macros +#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "VulkanTutorial", __VA_ARGS__)) +#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "VulkanTutorial", __VA_ARGS__)) +#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "VulkanTutorial", __VA_ARGS__)) + +// Forward declaration of the main entry point +extern "C" void android_main(android_app* app); + +// GameActivity entry point +extern "C" { +// This is the function the GameActivity library will call. +// Ensure its signature matches what GameActivity expects: +// void GameActivity_onCreate(GameActivity* activity, void* savedState, size_t savedStateSize) +void GameActivity_onCreate(GameActivity* activity, void* savedState, size_t savedStateSize) { + LOGI("GameActivity_onCreate"); + + // Create an android_app structure + android_app* app = new android_app(); // Consider using std::unique_ptr for better memory management + memset(app, 0, sizeof(android_app)); + + // Set up the android_app structure + app->activity = activity; + app->window = nullptr; // Window will be provided later through onNativeWindowCreated callback + + // Call the original android_main function + android_main(app); + + // Clean up + // IMPORTANT: The lifetime of 'app' needs to be managed carefully. + // If android_main runs asynchronously or expects 'app' to live longer, + // deleting it here might be premature. + // Consider the lifecycle of your native_app_glue integration. + // For example, 'app' might need to be freed when the activity is destroyed. + delete app; +} +} \ No newline at end of file diff --git a/attachments/android/app/src/main/java/com/vulkan/tutorial/VulkanActivity.java b/attachments/android/app/src/main/java/com/vulkan/tutorial/VulkanActivity.java new file mode 100644 index 00000000..bdb370f6 --- /dev/null +++ b/attachments/android/app/src/main/java/com/vulkan/tutorial/VulkanActivity.java @@ -0,0 +1,20 @@ +package com.vulkan.tutorial; + +import android.os.Bundle; +import android.view.WindowManager; +import com.google.androidgamesdk.GameActivity; + +public class VulkanActivity extends GameActivity { + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + // Keep the screen on while the app is running + getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); + } + + // Load the native library + static { + System.loadLibrary("vulkan_tutorial_android"); + } +} diff --git a/attachments/android/app/src/main/res/drawable/ic_launcher_background.xml b/attachments/android/app/src/main/res/drawable/ic_launcher_background.xml new file mode 100644 index 00000000..00a14c7a --- /dev/null +++ b/attachments/android/app/src/main/res/drawable/ic_launcher_background.xml @@ -0,0 +1,5 @@ + + + + \ No newline at end of file diff --git a/attachments/android/app/src/main/res/drawable/ic_launcher_foreground.xml b/attachments/android/app/src/main/res/drawable/ic_launcher_foreground.xml new file mode 100644 index 00000000..32847562 --- /dev/null +++ b/attachments/android/app/src/main/res/drawable/ic_launcher_foreground.xml @@ -0,0 +1,13 @@ + + + + + \ No newline at end of file diff --git a/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml b/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml new file mode 100644 index 00000000..eca70cfe --- /dev/null +++ b/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml b/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml new file mode 100644 index 00000000..eca70cfe --- /dev/null +++ b/attachments/android/app/src/main/res/mipmap-anydpi-v26/ic_launcher_round.xml @@ -0,0 +1,5 @@ + + + + + \ No newline at end of file diff --git a/attachments/android/app/src/main/res/values/strings.xml b/attachments/android/app/src/main/res/values/strings.xml new file mode 100644 index 00000000..ed278c96 --- /dev/null +++ b/attachments/android/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ + + Vulkan Tutorial + diff --git a/attachments/android/app/src/main/res/values/styles.xml b/attachments/android/app/src/main/res/values/styles.xml new file mode 100644 index 00000000..c63a3a91 --- /dev/null +++ b/attachments/android/app/src/main/res/values/styles.xml @@ -0,0 +1,6 @@ + + + + diff --git a/attachments/android/app/src/main/res/xml/backup_rules.xml b/attachments/android/app/src/main/res/xml/backup_rules.xml new file mode 100644 index 00000000..04184967 --- /dev/null +++ b/attachments/android/app/src/main/res/xml/backup_rules.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/attachments/android/app/src/main/res/xml/data_extraction_rules.xml b/attachments/android/app/src/main/res/xml/data_extraction_rules.xml new file mode 100644 index 00000000..dbe53401 --- /dev/null +++ b/attachments/android/app/src/main/res/xml/data_extraction_rules.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/attachments/android/build.gradle b/attachments/android/build.gradle new file mode 100644 index 00000000..d946933b --- /dev/null +++ b/attachments/android/build.gradle @@ -0,0 +1,18 @@ +// Top-level build file where you can add configuration options common to all sub-projects/modules. +buildscript { + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:8.11.0' + + // NOTE: Do not place your application dependencies here; they belong + // in the individual module build.gradle files + } +} + +// For Gradle 9.0+, use the Delete interface instead of type +tasks.register('clean', Delete) { + delete rootProject.buildDir +} diff --git a/attachments/android/gradle.properties b/attachments/android/gradle.properties new file mode 100644 index 00000000..6c8ca9a3 --- /dev/null +++ b/attachments/android/gradle.properties @@ -0,0 +1,10 @@ +android.useAndroidX=true +android.enableJetifier=false + +# Gradle 9.0+ compatibility settings +org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8 +org.gradle.parallel=true +org.gradle.caching=true +android.nonTransitiveRClass=true +android.nonFinalResIds=true +org.gradle.configuration-cache=true diff --git a/attachments/android/gradle/wrapper/gradle-wrapper.properties b/attachments/android/gradle/wrapper/gradle-wrapper.properties new file mode 100644 index 00000000..e67a8624 --- /dev/null +++ b/attachments/android/gradle/wrapper/gradle-wrapper.properties @@ -0,0 +1,6 @@ +#Thu Jul 03 10:37:40 PDT 2023 +distributionBase=GRADLE_USER_HOME +distributionPath=wrapper/dists +distributionUrl=https\://services.gradle.org/distributions/gradle-9.0-milestone-1-bin.zip +zipStoreBase=GRADLE_USER_HOME +zipStorePath=wrapper/dists diff --git a/attachments/android/settings.gradle b/attachments/android/settings.gradle new file mode 100644 index 00000000..b95ea2a7 --- /dev/null +++ b/attachments/android/settings.gradle @@ -0,0 +1,20 @@ +// For Gradle 9.0+, plugin repositories should be configured here +pluginManagement { + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} + +// For Gradle 9.0+, dependency repositories should be configured here +dependencyResolutionManagement { + repositoriesMode.set(RepositoriesMode.FAIL_ON_PROJECT_REPOS) + repositories { + google() + mavenCentral() + } +} + +include ':app' +rootProject.name = "VulkanTutorial" diff --git a/attachments/assets/viking_room.glb b/attachments/assets/viking_room.glb new file mode 100644 index 00000000..0f07ce7a Binary files /dev/null and b/attachments/assets/viking_room.glb differ diff --git a/attachments/assets/viking_room.ktx2 b/attachments/assets/viking_room.ktx2 new file mode 100644 index 00000000..ef778c41 Binary files /dev/null and b/attachments/assets/viking_room.ktx2 differ diff --git a/attachments/viking_room.obj b/attachments/assets/viking_room.obj similarity index 100% rename from attachments/viking_room.obj rename to attachments/assets/viking_room.obj diff --git a/attachments/viking_room.png b/attachments/assets/viking_room.png similarity index 100% rename from attachments/viking_room.png rename to attachments/assets/viking_room.png diff --git a/en/12_Ecosystem_Utilities_and_Compatibility.adoc b/en/12_Ecosystem_Utilities_and_Compatibility.adoc new file mode 100644 index 00000000..a73fa3cb --- /dev/null +++ b/en/12_Ecosystem_Utilities_and_Compatibility.adoc @@ -0,0 +1,546 @@ +:pp: {plus}{plus} + += Ecosystem Utilities and GPU Compatibility + +== Introduction + +In this chapter, we'll explore important ecosystem utilities for Vulkan development and learn how to adapt our code to support a wider range of GPUs. As Vulkan continues to evolve with new versions and features, it's important to understand how to: + +1. Discover what features are supported by different GPUs +2. Modify your code to maintain compatibility with older hardware +3. Conditionally use advanced features when available + +This knowledge is essential for developing Vulkan applications that can run on a diverse range of hardware, from the latest high-end GPUs to older or more limited devices. + +== Vulkan Hardware Database (GPUInfo.org) + +=== Introduction to GPUInfo.org + +The link:https://vulkan.gpuinfo.org/[Vulkan Hardware Database] (GPUInfo.org) is an invaluable resource for Vulkan developers. This community-driven database collects and presents information about Vulkan support across a wide range of GPUs and devices. + +GPUInfo.org provides detailed information about: + +* Supported Vulkan versions +* Available extensions +* Feature support +* Implementation limits +* Format properties +* Queue family properties + +This information is crowdsourced from users who run the Vulkan Hardware Capability Viewer tool, which reports their GPU's capabilities to the database. + +=== Using GPUInfo.org for Development + +When developing a Vulkan application, GPUInfo.org can help you: + +1. *Determine minimum requirements*: Understand what Vulkan version and extensions you need to target to support your desired range of hardware. + +2. *Check feature availability*: Verify if specific features like dynamic rendering, timeline semaphores, or ray tracing are widely supported. + +3. *Identify implementation limits*: Discover the practical limits of various Vulkan features across different hardware. + +4. *Compare vendors and devices*: Understand the differences in Vulkan support between NVIDIA, AMD, Intel, and mobile GPU vendors. + +Let's look at some practical examples of using GPUInfo.org: + +==== Example: Checking Vulkan Version Support + +To determine how widely supported Vulkan 1.3 (which introduced dynamic rendering) is: + +1. Visit link:https://vulkan.gpuinfo.org/[GPUInfo.org] +2. Navigate to "Core Version Support" +3. Check the percentage of devices supporting Vulkan 1.3 + +You'll find that while newer GPUs support Vulkan 1.3+, there are still many devices limited to Vulkan 1.0, 1.1, or 1.2. + +==== Example: Checking Extension Support + +If you're considering using a specific extension: + +1. Visit the "Extensions" section +2. Search for your extension of interest +3. Check its support percentage across different vendors + +This helps you decide whether to require the extension or provide a fallback path. + +==== Example: Using the Vulkan Configurator Tool + +The Vulkan Configurator tool (executable name `vkconfig` on all platforms) is included in the Vulkan SDK and provides a convenient way to configure Vulkan settings on your system. Here's how to use it: + +1. *Launch the Vulkan Configurator*: + - On Windows: It's recommended to start "Vulkan Configurator" from the Start menu, as running `vkconfig.exe` from the command line only shows limited options + - On other platforms: Open a terminal and run `vkconfig` + + Note that the executable is called `vkconfig` on all platforms. + +2. *Configure Validation Layers*: + - Navigate to the "Layers" tab + - Enable or disable specific validation layers based on your debugging needs + - For example, enable `VK_LAYER_KHRONOS_validation` during development to catch API usage errors + +3. *Manage Environment Variables*: + - Go to the "Settings" tab + - Set environment variables like `VK_LAYER_PATH` or `VK_ICD_FILENAMES` + - These settings can be applied system-wide or for the current session + +4. *Configure Driver-specific Options*: + - Some GPU vendors provide additional configuration options + - These can be accessed through the vendor-specific tabs + +5. *Export Configuration*: + - Save your configuration for later use or to share with team members + - This ensures consistent Vulkan environments across development machines + +Using the Vulkan Configurator is particularly helpful when: +- Debugging Vulkan applications with different validation layer configurations +- Testing your application with different Vulkan settings without modifying code +- Setting up a development environment with specific Vulkan requirements + +==== Using Vulkan Configurator for Validation Layers Instead of Code + +In many Vulkan applications, validation layers are enabled programmatically during instance creation, typically only in debug builds. Here's how this is commonly done: + +[,c++] +---- +// Define validation layers +const std::vector validationLayers = { + "VK_LAYER_KHRONOS_validation" +}; + +// Enable only in debug builds +#ifdef NDEBUG +constexpr bool enableValidationLayers = false; +#else +constexpr bool enableValidationLayers = true; +#endif + +void createInstance() { + // Check if validation layers are available + if (enableValidationLayers && !checkValidationLayerSupport()) { + throw std::runtime_error("validation layers requested, but not available!"); + } + + // Application info... + + // Enable validation layers if in debug mode + std::vector enabledLayers; + if (enableValidationLayers) { + enabledLayers.assign(validationLayers.begin(), validationLayers.end()); + } + + // Create instance with validation layers + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + .enabledLayerCount = static_cast(enabledLayers.size()), + .ppEnabledLayerNames = enabledLayers.data(), + // ... other parameters + }; + + instance = vk::raii::Instance(context, createInfo); +} +---- + +While this approach works, it has several drawbacks: + +1. It requires modifying and recompiling code to enable/disable validation +2. It's harder to experiment with different validation layer configurations +3. It adds complexity to your codebase + +A better approach is to use the Vulkan Configurator to manage validation layers externally. Here's how to modify your code to take advantage of this: + +[,c++] +---- +void createInstance() { + // Application info... + + // Create instance without explicitly enabling validation layers + vk::InstanceCreateInfo createInfo{ + .pApplicationInfo = &appInfo, + // ... other parameters + }; + + instance = vk::raii::Instance(context, createInfo); +} +---- + +With this approach: + +1. You remove all validation layer-specific code from your application +2. You use the Vulkan Configurator to enable validation layers when needed +3. You can switch validation configurations without recompiling + +To enable validation layers with the Vulkan Configurator: + +1. Launch the Vulkan Configurator (from the Start menu on Windows, or run `vkconfig` from the terminal - the executable is called `vkconfig` on all platforms) +2. Go to the "Layers" tab +3. Enable the `VK_LAYER_KHRONOS_validation` layer +4. Apply the settings + +This configuration will apply to all Vulkan applications run in that environment, making it easy to toggle validation on and off without code changes. + +The benefits of this approach include: + +* *Cleaner code*: Your application code doesn't need to handle validation layers +* *Flexibility*: Change validation settings without recompiling +* *Consistency*: Apply the same validation settings across multiple applications +* *Experimentation*: Easily try different validation configurations + +=== Other Useful Ecosystem Tools + +Besides GPUInfo.org, several other tools can help you develop and debug Vulkan applications: + +* *Vulkan SDK Tools*: +** `vulkaninfo`: Displays Vulkan capabilities of your local system +** `vkconfig` (Vulkan Configurator): A configuration tool for managing Vulkan settings (see <> for details) +** Validation layers: Help identify API usage errors +** RenderDoc: Graphics debugging tool + +* *Vendor-specific Tools*: +** NVIDIA Nsight Graphics +** AMD Radeon GPU Profiler +** Intel Graphics Performance Analyzers + +== Supporting Older GPUs + +Now that we understand how to discover GPU capabilities, let's explore how to modify our code to support older GPUs that don't have Vulkan 1.3/1.4 features like dynamic rendering. + +=== Detecting Available Features + +The first step is to detect what features are available on the user's GPU. This is done during device creation: + +[,c++] +---- +// Check if dynamic rendering is supported +bool dynamicRenderingSupported = false; + +// Check for Vulkan 1.3 support +if (deviceProperties.apiVersion >= VK_VERSION_1_3) { + dynamicRenderingSupported = true; +} else { + // Check for the extension on older Vulkan versions + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME) == 0) { + dynamicRenderingSupported = true; + break; + } + } +} + +// Store this information for later use +appInfo.dynamicRenderingSupported = dynamicRenderingSupported; +---- + +=== Alternative to Dynamic Rendering: Traditional Render Passes + +If dynamic rendering isn't available, we need to use traditional render passes and framebuffers. Here's how to implement this alternative approach: + +==== Creating a Render Pass + +[,c++] +---- +void createRenderPass() { + if (appInfo.dynamicRenderingSupported) { + // No render pass needed with dynamic rendering + return; + } + + // Color attachment description + vk::AttachmentDescription colorAttachment{ + .format = swapChainImageFormat, + .samples = vk::SampleCountFlagBits::e1, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .stencilLoadOp = vk::AttachmentLoadOp::eDontCare, + .stencilStoreOp = vk::AttachmentStoreOp::eDontCare, + .initialLayout = vk::ImageLayout::eUndefined, + .finalLayout = vk::ImageLayout::ePresentSrcKHR + }; + + // Subpass reference to the color attachment + vk::AttachmentReference colorAttachmentRef{ + .attachment = 0, + .layout = vk::ImageLayout::eColorAttachmentOptimal + }; + + // Subpass description + vk::SubpassDescription subpass{ + .pipelineBindPoint = vk::PipelineBindPoint::eGraphics, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachmentRef + }; + + // Dependency to ensure proper image layout transitions + vk::SubpassDependency dependency{ + .srcSubpass = VK_SUBPASS_EXTERNAL, + .dstSubpass = 0, + .srcStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput, + .dstStageMask = vk::PipelineStageFlagBits::eColorAttachmentOutput, + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite + }; + + // Create the render pass + vk::RenderPassCreateInfo renderPassInfo{ + .attachmentCount = 1, + .pAttachments = &colorAttachment, + .subpassCount = 1, + .pSubpasses = &subpass, + .dependencyCount = 1, + .pDependencies = &dependency + }; + + renderPass = device.createRenderPass(renderPassInfo); +} +---- + +==== Creating Framebuffers + +[,c++] +---- +void createFramebuffers() { + if (appInfo.dynamicRenderingSupported) { + // No framebuffers needed with dynamic rendering + return; + } + + swapChainFramebuffers.resize(swapChainImageViews.size()); + + for (size_t i = 0; i < swapChainImageViews.size(); i++) { + vk::ImageView attachments[] = { + swapChainImageViews[i] + }; + + vk::FramebufferCreateInfo framebufferInfo{ + .renderPass = renderPass, + .attachmentCount = 1, + .pAttachments = attachments, + .width = swapChainExtent.width, + .height = swapChainExtent.height, + .layers = 1 + }; + + swapChainFramebuffers[i] = device.createFramebuffer(framebufferInfo); + } +} +---- + +==== Modifying Pipeline Creation + +When creating the graphics pipeline, we need to specify the render pass if dynamic rendering isn't available: + +[,c++] +---- +void createGraphicsPipeline() { + // ... existing shader stage and fixed function setup ... + + vk::GraphicsPipelineCreateInfo pipelineInfo{}; + + if (appInfo.dynamicRenderingSupported) { + // Use dynamic rendering + vk::PipelineRenderingCreateInfo pipelineRenderingCreateInfo{ + .colorAttachmentCount = 1, + .pColorAttachmentFormats = &swapChainImageFormat + }; + + pipelineInfo.pNext = &pipelineRenderingCreateInfo; + pipelineInfo.renderPass = nullptr; + } else { + // Use traditional render pass + pipelineInfo.pNext = nullptr; + pipelineInfo.renderPass = renderPass; + pipelineInfo.subpass = 0; + } + + // ... rest of pipeline creation ... +} +---- + +==== Adapting Command Buffer Recording + +Finally, we need to modify how we record command buffers: + +[,c++] +---- +void recordCommandBuffer(vk::CommandBuffer commandBuffer, uint32_t imageIndex) { + // ... begin command buffer ... + + if (appInfo.dynamicRenderingSupported) { + // Begin dynamic rendering + vk::RenderingAttachmentInfo colorAttachment{ + .imageView = swapChainImageViews[imageIndex], + .imageLayout = vk::ImageLayout::eAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor + }; + + vk::RenderingInfo renderingInfo{ + .renderArea = {{0, 0}, swapChainExtent}, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachment + }; + + commandBuffer.beginRendering(renderingInfo); + } else { + // Begin traditional render pass + vk::RenderPassBeginInfo renderPassInfo{ + .renderPass = renderPass, + .framebuffer = swapChainFramebuffers[imageIndex], + .renderArea = {{0, 0}, swapChainExtent}, + .clearValueCount = 1, + .pClearValues = &clearColor + }; + + commandBuffer.beginRenderPass(renderPassInfo, vk::SubpassContents::eInline); + } + + // ... bind pipeline and draw ... + + if (appInfo.dynamicRenderingSupported) { + commandBuffer.endRendering(); + } else { + commandBuffer.endRenderPass(); + } + + // ... end command buffer ... +} +---- + +=== Handling Other Vulkan 1.3/1.4 Features + +Dynamic rendering is just one example of a feature that might not be available on older GPUs. Here are some other Vulkan 1.3/1.4 features you might need to provide alternatives for: + +==== Timeline Semaphores + +Timeline semaphores (introduced in Vulkan 1.2) provide a more flexible synchronization mechanism than binary semaphores. If they're not available, you'll need to use binary semaphores and fences: + +[,c++] +---- +bool timelineSemaphoresSupported = false; + +// Check for Vulkan 1.2 support or extension +if (deviceProperties.apiVersion >= VK_VERSION_1_2) { + timelineSemaphoresSupported = true; +} else { + // Check for extension + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_TIMELINE_SEMAPHORE_EXTENSION_NAME) == 0) { + timelineSemaphoresSupported = true; + break; + } + } +} + +// Create appropriate synchronization primitives +if (timelineSemaphoresSupported) { + // Create timeline semaphore + vk::SemaphoreTypeCreateInfo timelineCreateInfo{ + .semaphoreType = vk::SemaphoreType::eTimeline, + .initialValue = 0 + }; + + vk::SemaphoreCreateInfo semaphoreInfo{ + .pNext = &timelineCreateInfo + }; + + timelineSemaphore = device.createSemaphore(semaphoreInfo); +} else { + // Create binary semaphores and fences + vk::SemaphoreCreateInfo semaphoreInfo{}; + vk::FenceCreateInfo fenceInfo{.flags = vk::FenceCreateFlagBits::eSignaled}; + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + imageAvailableSemaphores[i] = device.createSemaphore(semaphoreInfo); + renderFinishedSemaphores[i] = device.createSemaphore(semaphoreInfo); + inFlightFences[i] = device.createFence(fenceInfo); + } +} +---- + +==== Synchronization2 + +The Synchronization2 feature (Vulkan 1.3) simplifies pipeline barriers and memory dependencies. If it's not available, use the original synchronization commands: + +[,c++] +---- +bool synchronization2Supported = false; + +// Check for Vulkan 1.3 support or extension +if (deviceProperties.apiVersion >= VK_VERSION_1_3) { + synchronization2Supported = true; +} else { + // Check for extension + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_SYNCHRONIZATION_2_EXTENSION_NAME) == 0) { + synchronization2Supported = true; + break; + } + } +} + +// Use appropriate barrier commands +if (synchronization2Supported) { + // Use Synchronization2 API + vk::ImageMemoryBarrier2 barrier{ + .srcStageMask = vk::PipelineStageFlagBits2::eTopOfPipe, + .srcAccessMask = vk::AccessFlagBits2::eNone, + .dstStageMask = vk::PipelineStageFlagBits2::eColorAttachmentOutput, + .dstAccessMask = vk::AccessFlagBits2::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eAttachmentOptimal, + .image = swapChainImages[i], + .subresourceRange = {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1} + }; + + vk::DependencyInfo dependencyInfo{ + .imageMemoryBarrierCount = 1, + .pImageMemoryBarriers = &barrier + }; + + commandBuffer.pipelineBarrier2(dependencyInfo); +} else { + // Use original synchronization API + vk::ImageMemoryBarrier barrier{ + .srcAccessMask = vk::AccessFlagBits::eNone, + .dstAccessMask = vk::AccessFlagBits::eColorAttachmentWrite, + .oldLayout = vk::ImageLayout::eUndefined, + .newLayout = vk::ImageLayout::eColorAttachmentOptimal, + .srcQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .dstQueueFamilyIndex = VK_QUEUE_FAMILY_IGNORED, + .image = swapChainImages[i], + .subresourceRange = {vk::ImageAspectFlagBits::eColor, 0, 1, 0, 1} + }; + + commandBuffer.pipelineBarrier( + vk::PipelineStageFlagBits::eTopOfPipe, + vk::PipelineStageFlagBits::eColorAttachmentOutput, + vk::DependencyFlagBits::eByRegion, + {}, + {}, + { barrier } + ); +} +---- + +== Best Practices for Cross-GPU Compatibility + +Based on what we've learned, here are some best practices for developing Vulkan applications that work across a wide range of GPUs: + +1. *Check feature availability at runtime*: Don't assume features are available based on the Vulkan version alone. Always check for specific features and extensions. + +2. *Provide fallback paths*: Implement alternative code paths for when modern features aren't available. + +3. *Use feature structures*: When creating a logical device, use the appropriate feature structures to enable only the features you need and that are available. + +4. *Test on various hardware*: Use GPUInfo.org to identify common hardware configurations and test your application on a representative sample. + +5. *Graceful degradation*: Design your application to gracefully reduce visual quality or functionality when running on less capable hardware. + +6. *Document requirements*: Clearly document the minimum and recommended Vulkan version and extension requirements for your application. + +== Conclusion + +Understanding Vulkan ecosystem utilities and knowing how to adapt your code for different GPU capabilities are essential skills for Vulkan developers. By following the approaches outlined in this chapter, you can create applications that run on a wide range of hardware while still taking advantage of the latest features when available. + +link:/attachments/32_ecosystem_utilities.cpp[C{pp} code] diff --git a/en/13_Vulkan_Profiles.adoc b/en/13_Vulkan_Profiles.adoc new file mode 100644 index 00000000..a45efb41 --- /dev/null +++ b/en/13_Vulkan_Profiles.adoc @@ -0,0 +1,321 @@ +:pp: {plus}{plus} + += Vulkan Profiles: Simplifying Feature Detection + +== Introduction + +In this chapter, we'll explore Vulkan profiles, a powerful feature that builds upon the ecosystem utilities we discussed in the previous chapter. Vulkan profiles provide a standardized way to: + +1. Define a set of features, extensions, and limits that your application requires +2. Automatically check for compatibility with the user's hardware +3. Eliminate the need for manual feature detection and fallback paths +4. Significantly reduce boilerplate code + +Vulkan profiles are particularly valuable for developers who want to ensure their applications work consistently across a wide range of hardware without the complexity of manually checking for feature support. + +== Understanding Vulkan Profiles + +=== What Are Vulkan Profiles? + +Vulkan profiles are predefined collections of features, extensions, limits, and formats that represent a specific target environment or set of best practices. They provide a higher-level abstraction over the low-level Vulkan API, making it easier to: + +* Target specific hardware capabilities +* Ensure compatibility across different GPUs +* Implement best practices consistently +* Reduce boilerplate code for feature detection + +Instead of manually checking for each feature and extension and implementing fallback paths, you can simply specify a profile that your application requires. The Vulkan profiles library will handle the compatibility checks and provide appropriate error messages if the user's hardware doesn't meet the requirements. + +=== Types of Vulkan Profiles + +Several types of profiles are available: + +1. *API Profiles*: Represent specific Vulkan API versions (e.g., Vulkan 1.1, 1.2, 1.3) +2. *Vendor Profiles*: Target specific hardware vendors (e.g., NVIDIA, AMD, Intel) +3. *Platform Profiles*: Target specific platforms (e.g., Windows, Linux, Android) +4. *Best Practices Profile*: Implements recommended practices for Vulkan development + +In this chapter, we'll use the Best Practices profile as an example, +additionally, we will demonstrate how profiles can simplify your code by +eliminating the need for manual feature detection. + +== How Profiles Simplify Your Code + +=== Eliminating Manual Feature Detection + +Up until now, we had to manually check for feature support and implement +fallback paths: + +1. Check if the device supports Vulkan 1.3 +2. If not, check if it supports the dynamic rendering extension +3. If neither is supported, fall back to traditional render passes +4. Repeat this process for every feature (timeline semaphores, synchronization2, etc.) +5. Maintain separate code paths for each feature + +This approach leads to complex, hard-to-maintain code with multiple conditional branches. + +With profiles, this entire process is simplified to: + +1. Check if the profile is supported +2. If supported, use all features guaranteed by the profile +3. If not, optionally fall back to a more basic approach + +=== Benefits of Using Profiles + +Using profiles offers several advantages: + +1. *Drastically reduced code complexity*: No need for multiple feature checks and conditional branches +2. *Improved maintainability*: Fewer code paths to test and debug +3. *Future-proofing*: As new Vulkan versions are released, profiles can be updated without changing your code +4. *Clearer requirements*: Profiles provide a clear specification of what your application needs +5. *Simplified error handling*: One check instead of many + +== Implementing Profiles in Your Application + +Let's see how to implement profiles in your Vulkan application. We'll use the Best Practices profile as an example to demonstrate how profiles can replace the manual feature detection we had to do in the previous chapter. + +=== Adding the Vulkan Profiles Library + +First, you need to include the Vulkan profiles header: + +[,c++] +---- +#include +---- + +This header provides the necessary functions and structures to work with Vulkan profiles. + +The Vulkan Profiles header is NOT part of the standard Vulkan headers. +It is only available if you use the Vulkan SDK. Make sure you have the Vulkan SDK installed and properly configured in your development environment. + +=== Defining the Profile Requirements + +Instead of manually checking for features and extensions, you can define your profile requirements: + +[,c++] +---- +// Define the Best Practices profile +const VpProfileProperties bestPracticesProfile = { + VP_BEST_PRACTICES_PROFILE_NAME, + VP_BEST_PRACTICES_PROFILE_SPEC_VERSION +}; + +// Check if the profile is supported +VkBool32 supported = false; +vpGetPhysicalDeviceProfileSupport(instance, physicalDevice, &bestPracticesProfile, &supported); + +if (!supported) { + throw std::runtime_error("Best Practices profile is not supported on this device"); +} +---- + +=== Creating a Device with the Profile + +When creating a logical device, you can use the profile to automatically enable the required features and extensions: + +[,c++] +---- +// Create device with Best Practices profile +VkDeviceCreateInfo deviceCreateInfo = {}; +deviceCreateInfo.sType = VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO; + +// Set up queue create infos +// ... + +// Apply the Best Practices profile to the device creation +vpCreateDevice(physicalDevice, &deviceCreateInfo, &bestPracticesProfile, nullptr, &device); +---- + +This automatically enables all the features and extensions required by the Best Practices profile, without having to manually specify them. + +=== Using Profile-Specific Features + +The Best Practices profile may enable specific features that you can use in your application: + +[,c++] +---- +// The profile guarantees these features are available +// No need to check for support or provide fallback paths + +// Example: Using dynamic rendering (guaranteed by the profile) +vk::RenderingAttachmentInfo colorAttachment{ + .imageView = swapChainImageViews[imageIndex], + .imageLayout = vk::ImageLayout::eAttachmentOptimal, + .loadOp = vk::AttachmentLoadOp::eClear, + .storeOp = vk::AttachmentStoreOp::eStore, + .clearValue = clearColor +}; + +vk::RenderingInfo renderingInfo{ + .renderArea = {{0, 0}, swapChainExtent}, + .layerCount = 1, + .colorAttachmentCount = 1, + .pColorAttachments = &colorAttachment +}; + +commandBuffer.beginRendering(renderingInfo); +// ... draw commands ... +commandBuffer.endRendering(); +---- + +=== Error Handling with Profiles + +When using profiles, error handling becomes more straightforward: + +[,c++] +---- +try { + // Try to create a device with the Best Practices profile + vpCreateDevice(physicalDevice, &deviceCreateInfo, &bestPracticesProfile, nullptr, &device); +} catch (const std::exception& e) { + // Profile is not supported, provide user-friendly error message + std::cerr << "Your GPU does not support the required Vulkan features for optimal performance." << std::endl; + std::cerr << "Error: " << e.what() << std::endl; + + // Optionally, try with a more basic profile or exit gracefully + // ... +} +---- + +== Comparing Manual Feature Detection vs. Profiles + +Let's compare the two approaches to understand just how much code and complexity profiles can eliminate: + +=== Manual Feature Detection (Previous Chapter) + +In the previous chapter, we had to write code like this for *each feature* we wanted to use: + +[,c++] +---- +// Check if dynamic rendering is supported +bool dynamicRenderingSupported = false; + +// Check for Vulkan 1.3 support +if (deviceProperties.apiVersion >= VK_VERSION_1_3) { + dynamicRenderingSupported = true; +} else { + // Check for the extension on older Vulkan versions + for (const auto& extension : availableExtensions) { + if (strcmp(extension.extensionName, VK_KHR_DYNAMIC_RENDERING_EXTENSION_NAME) == 0) { + dynamicRenderingSupported = true; + break; + } + } +} + +// Store this information for later use +appInfo.dynamicRenderingSupported = dynamicRenderingSupported; +---- + +And then we had to create conditional code paths throughout our application: + +[,c++] +---- +// When creating the pipeline +if (appInfo.dynamicRenderingSupported) { + // Use dynamic rendering + vk::PipelineRenderingCreateInfo renderingInfo{ + .colorAttachmentCount = 1, + .pColorAttachmentFormats = &swapChainImageFormat + }; + pipelineInfo.pNext = &renderingInfo; + pipelineInfo.renderPass = nullptr; +} else { + // Use traditional render pass + pipelineInfo.pNext = nullptr; + pipelineInfo.renderPass = renderPass; + pipelineInfo.subpass = 0; +} + +// When recording command buffers +if (appInfo.dynamicRenderingSupported) { + // Begin dynamic rendering + vk::RenderingAttachmentInfo colorAttachment{/*...*/}; + vk::RenderingInfo renderingInfo{/*...*/}; + commandBuffer.beginRendering(renderingInfo); +} else { + // Begin traditional render pass + vk::RenderPassBeginInfo renderPassInfo{/*...*/}; + commandBuffer.beginRenderPass(renderPassInfo, vk::SubpassContents::eInline); +} + +// And again at the end of the command buffer +if (appInfo.dynamicRenderingSupported) { + commandBuffer.endRendering(); +} else { + commandBuffer.endRenderPass(); +} +---- + +We had to repeat this pattern for *every feature* we wanted to use conditionally (timeline semaphores, synchronization2, etc.), resulting in complex, branching code that's challenging to maintain. + +=== Using Profiles (This Chapter) + +With profiles, all of that complexity is reduced to: + +[,c++] +---- +// Define the profile +const VpProfileProperties bestPracticesProfile = { + VP_BEST_PRACTICES_PROFILE_NAME, + VP_BEST_PRACTICES_PROFILE_SPEC_VERSION +}; + +// Check if the profile is supported +VkBool32 supported = false; +vpGetPhysicalDeviceProfileSupport(instance, physicalDevice, &bestPracticesProfile, &supported); + +if (supported) { + // Create device with the profile - all features enabled automatically + vpCreateDevice(physicalDevice, &deviceCreateInfo, &bestPracticesProfile, nullptr, &device); + + // Now we can use any feature guaranteed by the profile without checks + // For example, dynamic rendering is always available: + vk::RenderingAttachmentInfo colorAttachment{/*...*/}; + vk::RenderingInfo renderingInfo{/*...*/}; + commandBuffer.beginRendering(renderingInfo); + // ... draw commands ... + commandBuffer.endRendering(); +} +---- + +The profile approach eliminates: + +1. Multiple feature detection checks +2. Conditional code paths throughout your application +3. The need to track feature support in your application state +4. The complexity of maintaining and testing multiple code paths + +This results in code that is: + +1. Significantly shorter +2. Easier to read and understand +3. Less prone to errors +4. Easier to maintain and update + +== Best Practices for Using Profiles + +When using Vulkan profiles, consider these best practices: + +1. *Choose the right profile*: Select a profile that matches your application's requirements without being overly restrictive. + +2. *Provide fallback options*: If the Best Practices profile isn't supported, consider falling back to a more basic profile. + +3. *Communicate requirements clearly*: Inform users about the hardware requirements based on the profiles you support. + +4. *Test on various hardware*: Even with profiles, it's important to test your application on different GPUs. + +5. *Stay updated*: Profiles evolve with new Vulkan versions, so keep your implementation up to date. + +== Conclusion + +Vulkan profiles provide a powerful way to simplify your Vulkan code by eliminating the need for manual feature detection and conditional code paths. As we've seen in this chapter, profiles can dramatically reduce the amount of code you need to write and maintain, making your application: + +1. More concise and readable +2. Easier to maintain and update +3. Less prone to errors +4. More consistent across different hardware + +The example we've explored in this chapter demonstrates how profiles can replace the complex feature detection and fallback paths we had to implement in the previous chapter. By using profiles, you can focus more on your application's core functionality and less on the intricacies of hardware compatibility. + +link:/attachments/33_vulkan_profiles.cpp[C{pp} code] diff --git a/en/14_Android.adoc b/en/14_Android.adoc new file mode 100644 index 00000000..d753f579 --- /dev/null +++ b/en/14_Android.adoc @@ -0,0 +1,835 @@ +:pp: {plus}{plus} + += Android: Taking Your Vulkan App Mobile + +== Introduction + +In the previous chapter, we explored how Vulkan profiles can simplify feature detection and make your code more maintainable. Now, let's take our Vulkan knowledge a step further by bringing our application to the mobile world with Android. + +While Vulkan was designed to be cross-platform from the ground up, deploying to Android introduces some new challenges and opportunities. The core Vulkan API remains the same, but the surrounding ecosystem - from window management to build systems - requires a different approach. + +This chapter will guide you through adapting your Vulkan application for Android, reusing as much code as possible while addressing platform-specific requirements. You'll see that with the right setup, you can maintain a single codebase that works across desktop and mobile platforms. + +== Android-specific Considerations + +Before diving into implementation details, let's understand the key differences when developing Vulkan applications for Android compared to desktop: + +1. *Window System Integration*: Instead of GLFW, we use Android's native window system and activity lifecycle. +2. *Application Lifecycle*: Android apps can be paused, resumed, or terminated by the system at any time, requiring careful resource management. +3. *Asset Loading*: Resources are packaged in APK files and accessed through Android's asset manager. +4. *Build System*: We use Gradle and CMake together to build Android applications. +5. *Input Handling*: Touch input replaces mouse and keyboard, requiring different event handling. + +These differences might seem daunting at first, but with the right approach, we can address them while maintaining a clean, maintainable codebase. + +== Project Setup + +Now that we understand the key differences, let's set up our Android project. Our goal is to reuse as much code as possible from our desktop implementation while addressing Android-specific requirements. + +=== Prerequisites + +Before we begin, make sure you have the following tools installed: + +* *link:++https://developer.android.com/studio[Android Studio]++[Android Studio]*: The official IDE for Android development +* *Android NDK (Native Development Kit)*: Enables native C++ development on Android +* *Android SDK*: With a recent API level (24+, which corresponds to Android 7.0 or higher) for Vulkan support +* *CMake and Ninja build tools*: For building native code (these can be installed through Android Studio) +* *Vulkan SDK*: For shader compilation tools and validation layers + +[IMPORTANT] +==== +Unlike the desktop environment, Vulkan HPP (the C++ bindings for Vulkan) is NOT included by default in the Android NDK. You'll need to download it separately from the https://github.com/KhronosGroup/Vulkan-Hpp[Vulkan-Hpp GitHub repository] or use the version included in the Vulkan SDK. +==== + +=== Project Structure + +Let's start by understanding the structure of our Android project. We'll follow the standard Android application structure, but with some modifications to efficiently reuse code from our main project: + +[source] +---- +android/ +├── app/ +│ ├── build.gradle // App-level build configuration +│ ├── src/ +│ │ ├── main/ +│ │ │ ├── AndroidManifest.xml // App manifest +│ │ │ ├── cpp/ // Native code +│ │ │ │ ├── CMakeLists.txt // CMake build script +│ │ │ │ └── game_activity_bridge.cpp // Bridge between GameActivity and our Vulkan code +│ │ │ ├── java/ // Java code +│ │ │ │ └── com/example/vulkantutorial/ +│ │ │ │ └── VulkanActivity.java // Main activity (extends GameActivity) +│ │ │ └── res/ // Resources +│ │ │ └── values/ +│ │ │ ├── strings.xml // String resources +│ │ │ └── styles.xml // Style resources +├── build.gradle // Project-level build configuration +├── gradle/ // Gradle wrapper +├── settings.gradle // Project settings +---- + +== Setting Up the Android Project + +With our project structure in place, let's dive into the key components of our Android Vulkan application. We'll start with the essential configuration files and then move on to the native code implementation. + +=== The Manifest File + +Every Android application requires a manifest file that declares important information about the app. For our Vulkan application, the AndroidManifest.xml file is particularly important as it specifies the Vulkan version requirements: + +[source,xml] +---- + + + + + + + + + + + + + + + + + + + + +---- + +Key points: +* We specify a minimum SDK version of 24 (Android 7.0), which is required for Vulkan support. +* We declare that our app uses Vulkan with specific version requirements. +* We set up our main activity (VulkanActivity) as the entry point for our application. + +=== Java Activity + +After configuring the manifest, we need to create the Java side of our application. While most of our Vulkan code will run in native C++, we still need a Java activity to serve as the entry point for our application. + +For our Vulkan application, we'll use the GameActivity from the Android Game SDK instead of the traditional NativeActivity. This modern approach offers better performance and features specifically designed for games and graphics-intensive applications: + +[source,java] +---- +package com.vulkan.tutorial; + +import android.os.Bundle; +import android.view.WindowManager; +import com.google.androidgamesdk.GameActivity; + +public class VulkanActivity extends GameActivity { + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + // Keep the screen on while the app is running + getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); + } + + // Load the native library + static { + System.loadLibrary("vulkan_tutorial_android"); + } +} +---- + +Key points: +* We extend GameActivity from the Android Game SDK, which provides a more optimized bridge between Java and native code. +* GameActivity offers better performance for games and graphics-intensive applications compared to NativeActivity. +* We load our native library ("vulkan_tutorial_android") which contains our Vulkan implementation. + +=== Build Configuration + +With our Java activity in place, we need to configure the build process. Android uses Gradle as its build system, which we'll configure to work with our native Vulkan code and assets. + +The build configuration is split across multiple files, with different responsibilities: + +Project-level build.gradle: +[source,groovy] +---- +buildscript { + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:7.2.2' + } +} + +allprojects { + repositories { + google() + mavenCentral() + } +} + +task clean(type: Delete) { + delete rootProject.buildDir +} +---- + +App-level build.gradle: +[source,groovy] +---- +plugins { + id 'com.android.application' +} + +android { + compileSdkVersion 33 + defaultConfig { + applicationId "com.vulkan.tutorial" + minSdkVersion 24 + targetSdkVersion 33 + versionCode 1 + versionName "1.0" + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + + compileOptions { + sourceCompatibility JavaVersion.VERSION_1_8 + targetCompatibility JavaVersion.VERSION_1_8 + } + + externalNativeBuild { + cmake { + path "src/main/cpp/CMakeLists.txt" + version "3.22.1" + } + } + + ndkVersion "25.2.9519653" + + // Use assets from the main project and locally compiled shaders + sourceSets { + main { + assets { + srcDirs = [ + // Point to the main project's assets + '../../../../', // For models and textures in the attachments directory + // Use locally compiled shaders from the build directory for all ABIs + // These paths are relative to the app directory + '.externalNativeBuild/cmake/debug/arm64-v8a/shaders', + '.externalNativeBuild/cmake/debug/armeabi-v7a/shaders', + '.externalNativeBuild/cmake/debug/x86/shaders', + '.externalNativeBuild/cmake/debug/x86_64/shaders', + // Also include release build paths + '.externalNativeBuild/cmake/release/arm64-v8a/shaders', + '.externalNativeBuild/cmake/release/armeabi-v7a/shaders', + '.externalNativeBuild/cmake/release/x86/shaders', + '.externalNativeBuild/cmake/release/x86_64/shaders' + ] + } + } + } +} + +dependencies { + implementation 'androidx.appcompat:appcompat:1.6.1' + implementation 'com.google.android.material:material:1.9.0' + implementation 'com.google.androidgamesdk:game-activity:1.2.0' +} +---- + +Key points: +* We specify the minimum SDK version as 24 (Android 7.0) for Vulkan support. +* We configure CMake to build our native code. +* We include the game-activity dependency for better performance. +* We set up asset directories to reference the main project's assets and locally compiled shaders. +* This approach avoids duplicating assets and ensures we're using the latest versions. + +=== CMake Configuration + +While Gradle handles the overall Android build process, we use CMake to build our native C++ code. This is where we'll set up our Vulkan environment, compile shaders, and link against the necessary libraries. + +Let's examine our CMakeLists.txt file, which is the heart of our native code configuration: + +[source,cmake] +---- +cmake_minimum_required(VERSION 3.22.1) + +project(vulkan_tutorial_android) + +# Set the path to the main CMakeLists.txt relative to this file +set(MAIN_CMAKE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../CMakeLists.txt") + +# Find the Vulkan package +find_package(Vulkan REQUIRED) + +# Set up shader compilation tools +add_executable(glslang::validator IMPORTED) +find_program(GLSLANG_VALIDATOR "glslangValidator" HINTS $ENV{VULKAN_SDK}/bin REQUIRED) +set_property(TARGET glslang::validator PROPERTY IMPORTED_LOCATION "${GLSLANG_VALIDATOR}") + +# Define shader building function +function(add_shaders_target TARGET) + cmake_parse_arguments("SHADER" "" "CHAPTER_NAME" "SOURCES" ${ARGN}) + set(SHADERS_DIR ${SHADER_CHAPTER_NAME}/shaders) + add_custom_command( + OUTPUT ${SHADERS_DIR} + COMMAND ${CMAKE_COMMAND} -E make_directory ${SHADERS_DIR} + ) + add_custom_command( + OUTPUT ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv + COMMAND glslang::validator + ARGS --target-env vulkan1.0 ${SHADER_SOURCES} --quiet + WORKING_DIRECTORY ${SHADERS_DIR} + DEPENDS ${SHADERS_DIR} ${SHADER_SOURCES} + COMMENT "Compiling Shaders" + VERBATIM + ) + add_custom_target(${TARGET} DEPENDS ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv) +endfunction() + +# Include the game-activity library +find_package(game-activity REQUIRED CONFIG) +include_directories(${ANDROID_NDK}/sources/android/game-activity/include) + +# Set C++ standard to match the main project +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Add the Vulkan C++ module +add_library(VulkanCppModule SHARED) +target_compile_definitions(VulkanCppModule + PUBLIC VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1 VULKAN_HPP_NO_STRUCT_CONSTRUCTORS=1 +) +target_include_directories(VulkanCppModule + PRIVATE + "${Vulkan_INCLUDE_DIR}" +) +target_link_libraries(VulkanCppModule + PUBLIC + ${Vulkan_LIBRARIES} +) +set_target_properties(VulkanCppModule PROPERTIES CXX_STANDARD 20) + +# Set up the C++ module file set +target_sources(VulkanCppModule + PUBLIC + FILE_SET cxx_modules TYPE CXX_MODULES + BASE_DIRS + "${Vulkan_INCLUDE_DIR}" + FILES + "${Vulkan_INCLUDE_DIR}/vulkan/vulkan.cppm" +) + +# Set up shader compilation for 34_android +set(SHADER_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments") +set(SHADER_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/shaders") +file(MAKE_DIRECTORY ${SHADER_OUTPUT_DIR}) + +# Copy shader source files to the build directory +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.frag" + "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" + COPYONLY +) +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.vert" + "${SHADER_OUTPUT_DIR}/27_shader_depth.vert" + COPYONLY +) + +# Compile shaders +set(SHADER_SOURCES "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" "${SHADER_OUTPUT_DIR}/27_shader_depth.vert") +add_shaders_target(android_shaders CHAPTER_NAME "${SHADER_OUTPUT_DIR}" SOURCES ${SHADER_SOURCES}) + +# Add the main native library +add_library(vulkan_tutorial_android SHARED + ${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/34_android.cpp + game_activity_bridge.cpp +) + +# Add dependency on shader compilation +add_dependencies(vulkan_tutorial_android android_shaders) + +# Set include directories +target_include_directories(vulkan_tutorial_android PRIVATE + ${CMAKE_CURRENT_SOURCE_DIR} + ${Vulkan_INCLUDE_DIR} + ${ANDROID_NDK}/sources/android/game-activity/include +) + +# Link against libraries +target_link_libraries(vulkan_tutorial_android + VulkanCppModule + game-activity::game-activity + android + log + ${Vulkan_LIBRARIES} +) +---- + +Key points: +* We find the Vulkan package and include the game-activity library instead of native_app_glue. +* We set up shader compilation tools and define a function to compile shaders. +* We set the C++ standard to C++20 and create a Vulkan C++ module. +* We set up shader compilation for the 34_android chapter, copying shader source files from the main project. +* We add the main native library, which uses the 34_android.cpp file from the main project and a bridge file to connect with GameActivity. +* We link against the necessary libraries, including game-activity. + +== Native Implementation + +Now that we've set up our build configuration, let's dive into the native C++ code that powers our Vulkan application on Android. This is where the real magic happens - we'll see how to adapt our existing Vulkan code to work on Android while minimizing platform-specific changes. + +One of the key advantages of our approach is code reuse. Instead of maintaining separate codebases for desktop and Android, we've structured our project to share as much code as possible: + +1. *34_android.cpp*: This is the same file used in our main project, containing the core Vulkan implementation. By reusing this file, we ensure that our rendering code is identical across platforms. + +2. *game_activity_bridge.cpp*: This small bridge file connects the Android GameActivity to our core Vulkan code. It handles the platform-specific initialization and event processing. + +This separation of concerns allows us to focus on the Vulkan implementation without getting bogged down in platform-specific details. When we make improvements to our rendering code, both desktop and Android versions benefit automatically. + +=== GameActivity Bridge + +Let's take a closer look at our bridge code, which is the key to connecting our Java GameActivity with our native Vulkan implementation. This small but crucial file handles the translation between Android's Java-based activity lifecycle and our C++ code: + +[source,cpp] +---- +#include +#include +#include + +// Define logging macros +#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "VulkanTutorial", __VA_ARGS__)) +#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "VulkanTutorial", __VA_ARGS__)) +#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "VulkanTutorial", __VA_ARGS__)) + +// Forward declaration of the main entry point +extern "C" void android_main(android_app* app); + +// GameActivity entry point +extern "C" { + void GameActivity_onCreate(GameActivity* activity) { + LOGI("GameActivity_onCreate"); + + // Create an android_app structure + android_app* app = new android_app(); + memset(app, 0, sizeof(android_app)); + + // Set up the android_app structure + app->activity = activity; + app->window = activity->window; + + // Call the original android_main function + android_main(app); + + // Clean up + delete app; + } +} +---- + +This bridge code: +1. Creates an android_app structure compatible with our Vulkan code +2. Sets up the necessary connections between GameActivity and our code +3. Calls the android_main function in our 34_android.cpp file + +=== Android Entry Point + +Once our bridge code has created the android_app structure, it calls the android_main function, which serves as the entry point for our native code. This function is defined in our 34_android.cpp file and is analogous to the main() function in desktop applications: + +Let's look at how we initialize our Vulkan application from this entry point: + +[source,cpp] +---- +void android_main(android_app* app) { + try { + // Create and run the Vulkan application + HelloTriangleApplication application(app); + application.run(); + } catch (const std::exception& e) { + LOGE("Exception caught: %s", e.what()); + } +} +---- + +=== Creating the Vulkan Surface + +One of the key platform-specific differences in our Vulkan implementation is how we create the surface. On desktop, we used GLFW to create a window and surface. On Android, we need to use the VK_KHR_android_surface extension to create a surface from the native Android window. + +Here's how we create a Vulkan surface on Android: + +[source,cpp] +---- +void createSurface() { + VkSurfaceKHR _surface; + VkResult result = VK_SUCCESS; + + // Create Android surface + result = vkCreateAndroidSurfaceKHR( + *instance, + &(VkAndroidSurfaceCreateInfoKHR{ + .sType = VK_STRUCTURE_TYPE_ANDROID_SURFACE_CREATE_INFO_KHR, + .pNext = nullptr, + .flags = 0, + .window = androidApp->window + }), + nullptr, + &_surface + ); + + if (result != VK_SUCCESS) { + throw std::runtime_error("Failed to create Android surface"); + } + + surface = vk::raii::SurfaceKHR(instance, _surface); +} +---- + +=== Handling Android Events + +Another important platform-specific aspect is event handling. Android applications have a different lifecycle compared to desktop applications - they can be paused, resumed, or terminated by the system at any time. We need to handle these events properly to ensure our Vulkan resources are managed correctly. + +Here's how we handle Android-specific events in our application: + +[source,cpp] +---- +static void handleAppCommand(android_app* app, int32_t cmd) { + auto* vulkanApp = static_cast(app->userData); + switch (cmd) { + case APP_CMD_INIT_WINDOW: + // Window created, initialize Vulkan + if (app->window != nullptr) { + vulkanApp->initVulkan(); + } + break; + case APP_CMD_TERM_WINDOW: + // Window destroyed, clean up Vulkan + vulkanApp->cleanup(); + break; + default: + break; + } +} + +static int32_t handleInputEvent(android_app* app, AInputEvent* event) { + auto* vulkanApp = static_cast(app->userData); + if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) { + // Handle touch events + float x = AMotionEvent_getX(event, 0); + float y = AMotionEvent_getY(event, 0); + + // Process touch coordinates + // ... + + return 1; + } + return 0; +} +---- + +== Cross-Platform Implementation + +While we've focused on Android-specific code so far, our approach allows us to maintain a single codebase that works on both desktop and Android platforms. This is achieved through careful use of preprocessor directives and platform-specific abstractions. + +=== Platform Detection + +The first step in our cross-platform approach is to detect which platform we're building for. We use preprocessor directives to check for platform-specific predefined macros: + +[source,cpp] +---- +// Platform detection +#if defined(__ANDROID__) + #define PLATFORM_ANDROID 1 +#else + #define PLATFORM_DESKTOP 1 +#endif +---- + +This approach leverages the standard predefined macro `__ANDROID__` which is automatically defined by the compiler when building for Android platforms. These platform macros are then used throughout the code to conditionally compile platform-specific code. + +=== Consistent Class Structure + +To maintain a clean and consistent codebase, we use the same class name (`HelloTriangleApplication`) for both platforms. This makes it easier to understand the code and reduces the need for platform-specific branches: + +[source,cpp] +---- +// Cross-platform application class +class HelloTriangleApplication { +public: +#if PLATFORM_DESKTOP + // Desktop constructor + HelloTriangleApplication() { + // No Android-specific initialization needed + } +#else + // Android constructor + HelloTriangleApplication(android_app* app) : androidApp(app) { + // Android-specific initialization + } +#endif + // ... rest of the class ... +}; +---- + +=== Platform-Specific Includes + +Different platforms require different header files. We use preprocessor directives to include the appropriate headers: + +[source,cpp] +---- +// Platform-specific includes +#if PLATFORM_ANDROID + // Android-specific includes + #include + #include + #include + #include +#else + // Desktop-specific includes + #define GLFW_INCLUDE_VULKAN + #include + #include + #include +#endif +---- + +=== Cross-Platform File Loading + +File loading is one of the key differences between desktop and Android platforms. On desktop, we load files from the filesystem, while on Android, we load them from the APK's assets. We've created a cross-platform file loading function that works on both platforms: + +[source,cpp] +---- +// Cross-platform file reading function +std::vector readFile(const std::string& filename, std::optional assetManager = std::nullopt) { +#if PLATFORM_ANDROID + // On Android, use asset manager if provided + if (assetManager.has_value() && *assetManager != nullptr) { + // Open the asset + AAsset* asset = AAssetManager_open(*assetManager, filename.c_str(), AASSET_MODE_BUFFER); + // ... read file from asset ... + return buffer; + } +#endif + + // Desktop version or Android fallback to filesystem + std::ifstream file(filename, std::ios::ate | std::ios::binary); + // ... read file from filesystem ... + return buffer; +} +---- + +=== Platform-Specific Entry Points + +Each platform has its own entry point. On desktop, we use the standard `main()` function, while on Android, we use the `android_main()` function: + +[source,cpp] +---- +// Platform-specific entry point +#if PLATFORM_ANDROID +// Android main entry point +void android_main(android_app* app) { + // Android-specific initialization + try { + HelloTriangleApplication vulkanApp(app); + vulkanApp.run(); + } catch (const std::exception& e) { + LOGE("Exception caught: %s", e.what()); + } +} +#else +// Desktop main entry point +int main() { + try { + HelloTriangleApplication app; + app.run(); + } catch (const std::exception& e) { + std::cerr << e.what() << std::endl; + return EXIT_FAILURE; + } + return EXIT_SUCCESS; +} +#endif +---- + +=== Build System Integration + +Our cross-platform approach leverages the compiler's built-in platform detection capabilities. Since the `__ANDROID__` macro is automatically defined by the compiler when building for Android, we don't need to explicitly define platform macros in our build system. + +This approach has several advantages: +1. *Simplicity*: We don't need to maintain platform-specific compile definitions in our CMake files. +2. *Reliability*: We rely on standard compiler behavior rather than custom definitions. +3. *Maintainability*: Less build system configuration means fewer potential points of failure. + +By using the compiler's predefined macros, we can maintain a single codebase that works on both desktop and Android platforms, with minimal platform-specific code. When we make improvements to our rendering code, both desktop and Android versions benefit automatically. + +== Shader Handling on Android + +Now that we've covered the core native implementation, let's address another important aspect of Vulkan development on Android: shader handling. Shaders are a critical part of any Vulkan application, and we need to ensure they're properly compiled and loaded on Android. + +In our approach, we compile shaders locally during the build process, similar to how it's done in the main project. This strategy offers several significant advantages: + +1. *Consistency*: We use the same shader source files for both desktop and Android builds, ensuring identical visual results across platforms. +2. *Maintainability*: When we need to update a shader, we only need to change it in one place, and both desktop and Android versions benefit. +3. *Build-time validation*: Shader compilation errors are caught during the build process, not at runtime, making debugging much easier. + +=== Local Shader Compilation + +We've set up our CMake configuration to compile shaders locally during the build process: + +1. *Define a shader building function*: ++ +[source,cmake] +---- +function(add_shaders_target TARGET) + cmake_parse_arguments("SHADER" "" "CHAPTER_NAME" "SOURCES" ${ARGN}) + set(SHADERS_DIR ${SHADER_CHAPTER_NAME}/shaders) + add_custom_command( + OUTPUT ${SHADERS_DIR} + COMMAND ${CMAKE_COMMAND} -E make_directory ${SHADERS_DIR} + ) + add_custom_command( + OUTPUT ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv + COMMAND glslang::validator + ARGS --target-env vulkan1.0 ${SHADER_SOURCES} --quiet + WORKING_DIRECTORY ${SHADERS_DIR} + DEPENDS ${SHADERS_DIR} ${SHADER_SOURCES} + COMMENT "Compiling Shaders" + VERBATIM + ) + add_custom_target(${TARGET} DEPENDS ${SHADERS_DIR}/frag.spv ${SHADERS_DIR}/vert.spv) +endfunction() +---- + +2. *Copy shader source files from the main project*: ++ +[source,cmake] +---- +# Set up shader compilation for 34_android +set(SHADER_SOURCE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments") +set(SHADER_OUTPUT_DIR "${CMAKE_CURRENT_BINARY_DIR}/shaders") +file(MAKE_DIRECTORY ${SHADER_OUTPUT_DIR}) + +# Copy shader source files to the build directory +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.frag" + "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" + COPYONLY +) +configure_file( + "${SHADER_SOURCE_DIR}/27_shader_depth.vert" + "${SHADER_OUTPUT_DIR}/27_shader_depth.vert" + COPYONLY +) +---- + +3. *Compile the shaders*: ++ +[source,cmake] +---- +# Compile shaders +set(SHADER_SOURCES "${SHADER_OUTPUT_DIR}/27_shader_depth.frag" "${SHADER_OUTPUT_DIR}/27_shader_depth.vert") +add_shaders_target(android_shaders CHAPTER_NAME "${SHADER_OUTPUT_DIR}" SOURCES ${SHADER_SOURCES}) + +# Add dependency on shader compilation +add_dependencies(vulkan_tutorial_android android_shaders) +---- + +4. *Reference the compiled shaders in the Gradle build*: ++ +[source,groovy] +---- +sourceSets { + main { + assets { + srcDirs = [ + // Point to the main project's assets + '../../../../', // For models and textures in the attachments directory + // Use locally compiled shaders from the build directory for all ABIs + '.externalNativeBuild/cmake/debug/arm64-v8a/shaders', + '.externalNativeBuild/cmake/debug/armeabi-v7a/shaders', + // ... other ABIs ... + ] + } + } +} +---- + +=== Loading Assets in a Cross-Platform Way + +Our unified readFile function makes it easy to load assets in a cross-platform way. Here's how we use it to load shader files: + +[source,cpp] +---- +// Load shader files using cross-platform function +#if PLATFORM_ANDROID +std::optional optionalAssetManager = assetManager; +#else +std::optional optionalAssetManager = std::nullopt; +#endif +std::vector vertShaderCode = readFile("shaders/vert.spv", optionalAssetManager); +std::vector fragShaderCode = readFile("shaders/frag.spv", optionalAssetManager); +---- + +We use the same approach to load texture images and model files: + +[source,cpp] +---- +// Load texture image +#if PLATFORM_ANDROID +std::optional optionalAssetManager = assetManager; +std::vector imageData = readFile(TEXTURE_PATH, optionalAssetManager); +// Process the image data... +#else +// Load directly from filesystem +// ... +#endif +---- + +This unified approach gives us the best of both worlds: we use the same code structure for both platforms, with the platform-specific differences handled by the readFile function itself. This makes our code more maintainable and easier to understand. + +== Building and Running + +Now that we've set up our Android project with all the necessary components, let's put everything together and run our Vulkan application on an Android device. + +The process is straightforward: + +1. Open the project in Android Studio. +2. Connect an Android device or start an emulator (make sure it supports Vulkan). +3. Click the "Run" button in Android Studio. + +Android Studio will handle the rest - it will build the application, compile the shaders, package everything into an APK, install it on the device/emulator, and launch it. If everything is set up correctly, you should see your Vulkan application running on Android, rendering the same scene as on desktop. + +== Conclusion + +In this chapter, we've explored how to take our Vulkan application from desktop to mobile by adapting it for Android. We've seen that while the core Vulkan API remains the same across platforms, the surrounding ecosystem requires platform-specific adaptations. + +Our approach demonstrates several key principles that you can apply to your own Vulkan projects: + +1. *Code Reuse*: By structuring our project properly, we can use the same core rendering code (34_android.cpp) for both desktop and Android platforms, minimizing duplication and maintenance overhead. + +2. *Modern Android Integration*: We leverage the GameActivity from the Android Game SDK for better performance and more streamlined integration compared to the older NativeActivity approach. + +3. *Efficient Asset Management*: Instead of duplicating assets, we reference them from the main project, ensuring consistency and reducing APK size. + +4. *Local Shader Compilation*: By compiling shaders during the build process, we catch errors early and ensure compatibility across platforms. + +5. *Minimal Platform-Specific Code*: We isolate platform-specific code in a small bridge file, keeping our core Vulkan implementation clean and portable. + +This approach not only makes it easier to maintain and update our application but also provides a solid foundation for expanding to other platforms in the future. When you make improvements to your core rendering code, both desktop and Android versions benefit automatically. + +The complete Android example can be found in the attachments/android directory. Feel free to use it as a template for your own Vulkan projects on Android. + +Remember that Vulkan HPP is not included by default in the Android NDK, so you'll need to download it separately from the https://github.com/KhronosGroup/Vulkan-Hpp[Vulkan-Hpp GitHub repository] or use the version included in the Vulkan SDK. diff --git a/en/15_GLTF_KTX2_Migration.adoc b/en/15_GLTF_KTX2_Migration.adoc new file mode 100644 index 00000000..cf1c086a --- /dev/null +++ b/en/15_GLTF_KTX2_Migration.adoc @@ -0,0 +1,674 @@ +:pp: {plus}{plus} + += Migrating to Modern Asset Formats: glTF and KTX2 + +== Introduction + +In previous chapters, we've been using tinyobjloader to load 3D models in the Wavefront OBJ format and stb_image to load textures in common image formats like PNG and JPEG. While these libraries and formats are simple and widely supported, modern graphics applications often benefit from more advanced asset formats. + +In this chapter, we'll explore how to migrate from: + +1. Wavefront OBJ (loaded with tinyobjloader) to glTF (loaded with tinygltf) +2. Common image formats like PNG (loaded with stb_image) to KTX2 (loaded with the KTX library) + +This migration offers several advantages: + +* **More comprehensive model data**: glTF supports animations, skeletal rigs, PBR materials, and more +* **GPU-optimized textures**: KTX2 supports compressed texture formats, mipmaps, and other GPU-friendly features +* **Industry standard**: Both glTF and KTX2 are Khronos standards designed specifically for modern graphics APIs + +Let's dive into the migration process and see how to adapt our Vulkan application to use these modern formats. + +== Understanding glTF + +=== What is glTF? + +https://www.khronos.org/gltf/[glTF] (GL Transmission Format) is a royalty-free specification for the efficient transmission and loading of 3D scenes and models. Developed by the Khronos Group, glTF is designed to be a "JPEG for 3D" - a common publishing format for 3D content. + +Key features of glTF include: + +* **Compact file size**: Binary data is stored efficiently +* **Fast loading**: Minimizes processing needed at load time +* **Complete 3D scene representation**: Includes meshes, materials, textures, animations, and more +* **Runtime-ready**: Data is stored in formats that can be directly used by the GPU +* **Extensible**: The format can be extended with new capabilities + +=== Comparing OBJ and glTF + +Let's compare the OBJ format with glTF: + +[cols="1,1,1"] +|=== +|Feature |OBJ |glTF + +|File format +|Text-based +|JSON + binary data (GLB option for single file) + +|Supported data +|Geometry, basic materials, texture coordinates +|Geometry, PBR materials, animations, skeletons, scenes, cameras, etc. + +|Material system +|Basic (MTL files) +|Physically-Based Rendering (PBR) + +|Animation support +|None +|Keyframe and skeletal animations + +|Coordinate system +|Right-handed +|Right-handed, Y-up + +|Industry adoption +|Legacy standard +|Modern standard for real-time 3D +|=== + +== Understanding KTX2 + +=== What is KTX2? + +https://www.khronos.org/ktx/[KTX2] (Khronos Texture 2.0) is a container file format for storing texture data optimized for GPU usage. It's designed to work efficiently with modern graphics APIs like Vulkan, OpenGL, and DirectX. + +Key features of KTX2 include: + +* **GPU-ready formats**: Supports all GPU texture formats including compressed formats +* **Mipmap storage**: Efficiently stores complete mipmap chains +* **Metadata**: Includes information about the texture's properties +* **Supercompression**: Supports additional compression like Basis Universal +* **Direct uploads**: Data can often be uploaded directly to the GPU without processing + +=== Comparing PNG/JPEG and KTX2 + +Let's compare traditional image formats with KTX2: + +[cols="1,1,1"] +|=== +|Feature |PNG/JPEG |KTX2 + +|File format +|General-purpose image format +|GPU-optimized texture container + +|Compression +|General-purpose (PNG) or lossy (JPEG) +|GPU texture compression (BC, ETC, ASTC) + supercompression + +|Mipmaps +|Not supported +|Built-in mipmap chain support + +|GPU upload +|Requires conversion +|Can be directly uploaded to GPU + +|Metadata +|Limited +|Comprehensive texture metadata + +|Supported features +|Basic 2D images +|All GPU texture types (2D, 3D, cubemaps, arrays) +|=== + +== Migrating from tinyobjloader to tinygltf + +=== Setting Up tinygltf + +First, we need to include the tinygltf library instead of tinyobjloader: + +[,c{pp}] +---- +// Replace this: +#define TINYOBJLOADER_IMPLEMENTATION +#include + +// With this: +#define TINYGLTF_IMPLEMENTATION +#define STB_IMAGE_WRITE_IMPLEMENTATION +#include +---- + +Note that tinygltf uses stb_image internally for image loading, but we'll be replacing the texture loading code with KTX2 later. + +=== Loading a glTF Model + +Now, let's modify our `loadModel()` function to use tinygltf instead of tinyobjloader: + +[,c{pp}] +---- +void loadModel() { + // Use tinygltf to load the model instead of tinyobjloader + tinygltf::Model model; + tinygltf::TinyGLTF loader; + std::string err; + std::string warn; + + bool ret = loader.LoadASCIIFromFile(&model, &err, &warn, MODEL_PATH); + + if (!warn.empty()) { + std::cout << "glTF warning: " << warn << std::endl; + } + + if (!err.empty()) { + std::cout << "glTF error: " << err << std::endl; + } + + if (!ret) { + throw std::runtime_error("Failed to load glTF model"); + } + + // Process all meshes in the model + std::unordered_map uniqueVertices{}; + + for (const auto& mesh : model.meshes) { + for (const auto& primitive : mesh.primitives) { + // Get indices + const tinygltf::Accessor& indexAccessor = model.accessors[primitive.indices]; + const tinygltf::BufferView& indexBufferView = model.bufferViews[indexAccessor.bufferView]; + const tinygltf::Buffer& indexBuffer = model.buffers[indexBufferView.buffer]; + + // Get vertex positions + const tinygltf::Accessor& posAccessor = model.accessors[primitive.attributes.at("POSITION")]; + const tinygltf::BufferView& posBufferView = model.bufferViews[posAccessor.bufferView]; + const tinygltf::Buffer& posBuffer = model.buffers[posBufferView.buffer]; + + // Get texture coordinates if available + bool hasTexCoords = primitive.attributes.find("TEXCOORD_0") != primitive.attributes.end(); + const tinygltf::Accessor* texCoordAccessor = nullptr; + const tinygltf::BufferView* texCoordBufferView = nullptr; + const tinygltf::Buffer* texCoordBuffer = nullptr; + + if (hasTexCoords) { + texCoordAccessor = &model.accessors[primitive.attributes.at("TEXCOORD_0")]; + texCoordBufferView = &model.bufferViews[texCoordAccessor->bufferView]; + texCoordBuffer = &model.buffers[texCoordBufferView->buffer]; + } + + // Process vertices + for (size_t i = 0; i < posAccessor.count; i++) { + Vertex vertex{}; + + // Get position + const float* pos = reinterpret_cast(&posBuffer.data[posBufferView.byteOffset + posAccessor.byteOffset + i * 12]); + vertex.pos = {pos[0], pos[1], pos[2]}; + + // Get texture coordinates if available + if (hasTexCoords) { + const float* texCoord = reinterpret_cast(&texCoordBuffer->data[texCoordBufferView->byteOffset + texCoordAccessor->byteOffset + i * 8]); + vertex.texCoord = {texCoord[0], 1.0f - texCoord[1]}; + } else { + vertex.texCoord = {0.0f, 0.0f}; + } + + // Set default color + vertex.color = {1.0f, 1.0f, 1.0f}; + + // Add vertex if unique + if (!uniqueVertices.contains(vertex)) { + uniqueVertices[vertex] = static_cast(vertices.size()); + vertices.push_back(vertex); + } + } + + // Process indices + const unsigned char* indexData = &indexBuffer.data[indexBufferView.byteOffset + indexAccessor.byteOffset]; + + // Handle different index component types + if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_SHORT) { + const uint16_t* indices16 = reinterpret_cast(indexData); + for (size_t i = 0; i < indexAccessor.count; i++) { + Vertex vertex = vertices[indices16[i]]; + indices.push_back(uniqueVertices[vertex]); + } + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_INT) { + const uint32_t* indices32 = reinterpret_cast(indexData); + for (size_t i = 0; i < indexAccessor.count; i++) { + Vertex vertex = vertices[indices32[i]]; + indices.push_back(uniqueVertices[vertex]); + } + } else if (indexAccessor.componentType == TINYGLTF_COMPONENT_TYPE_UNSIGNED_BYTE) { + const uint8_t* indices8 = reinterpret_cast(indexData); + for (size_t i = 0; i < indexAccessor.count; i++) { + Vertex vertex = vertices[indices8[i]]; + indices.push_back(uniqueVertices[vertex]); + } + } + } + } +} +---- + +The key differences in this implementation compared to the tinyobjloader version are: + +1. **Data structure**: glTF uses a more complex data structure with accessors, buffer views, and buffers +2. **Attribute access**: We need to navigate through these structures to access vertex data +3. **Multiple meshes and primitives**: glTF models can contain multiple meshes, each with multiple primitives +4. **Component types**: We need to handle different index component types (8-bit, 16-bit, 32-bit) + +=== Advanced glTF Features + +While our basic implementation only extracts geometry and texture coordinates, glTF supports many more features that you might want to use: + +* **Materials**: Access PBR material properties through `primitive.material` +* **Animations**: Process animation data in `model.animations` +* **Skeletons**: Handle skeletal data in `model.skins` +* **Scenes and nodes**: Process scene hierarchy through `model.scenes` and `model.nodes` + +For a complete application, you would typically process these additional features to take full advantage of glTF. + +== Migrating from stb_image to KTX + +=== Setting Up KTX + +First, we need to include the KTX library: + +[,c{pp}] +---- +// Replace this: +#define STB_IMAGE_IMPLEMENTATION +#include + +// With this: +#include +---- + +=== Loading a KTX2 Texture + +Now, let's modify our `createTextureImage()` function to use KTX instead of stb_image: + +[,c{pp}] +---- +void createTextureImage() { + // Load KTX2 texture instead of using stb_image + ktxTexture* kTexture; + KTX_error_code result = ktxTexture_CreateFromNamedFile( + TEXTURE_PATH.c_str(), + KTX_TEXTURE_CREATE_LOAD_IMAGE_DATA_BIT, + &kTexture); + + if (result != KTX_SUCCESS) { + throw std::runtime_error("failed to load ktx texture image!"); + } + + // Get texture dimensions and data + uint32_t texWidth = kTexture->baseWidth; + uint32_t texHeight = kTexture->baseHeight; + ktx_size_t imageSize = ktxTexture_GetImageSize(kTexture, 0); + ktx_uint8_t* ktxTextureData = ktxTexture_GetData(kTexture); + + // Create staging buffer + vk::raii::Buffer stagingBuffer({}); + vk::raii::DeviceMemory stagingBufferMemory({}); + createBuffer(imageSize, vk::BufferUsageFlagBits::eTransferSrc, vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, stagingBuffer, stagingBufferMemory); + + // Copy texture data to staging buffer + void* data = stagingBufferMemory.mapMemory(0, imageSize); + memcpy(data, ktxTextureData, imageSize); + stagingBufferMemory.unmapMemory(); + + // Determine the Vulkan format from KTX format + vk::Format textureFormat = vk::Format::eR8G8B8A8Srgb; // Default format, should be determined from KTX metadata + + // Create the texture image + createImage(texWidth, texHeight, textureFormat, vk::ImageTiling::eOptimal, + vk::ImageUsageFlagBits::eTransferDst | vk::ImageUsageFlagBits::eSampled, + vk::MemoryPropertyFlagBits::eDeviceLocal, textureImage, textureImageMemory); + + // Copy data from staging buffer to texture image + transitionImageLayout(textureImage, vk::ImageLayout::eUndefined, vk::ImageLayout::eTransferDstOptimal); + copyBufferToImage(stagingBuffer, textureImage, texWidth, texHeight); + transitionImageLayout(textureImage, vk::ImageLayout::eTransferDstOptimal, vk::ImageLayout::eShaderReadOnlyOptimal); + + // Cleanup KTX resources + ktxTexture_Destroy(kTexture); +} +---- + +The key differences in this implementation compared to the stb_image version are: + +1. **Loading API**: We use the KTX API to load the texture +2. **Texture metadata**: KTX provides metadata about the texture's properties +3. **Resource cleanup**: We need to explicitly destroy the KTX texture object + +=== Advanced KTX Features + +This basic implementation only handles simple 2D textures, but KTX2 supports many more features: + +==== Handling Mipmaps + +KTX2 files can contain pre-generated mipmaps. Here's how to use them: + +[,c{pp}] +---- +// Get mipmap levels +uint32_t mipLevels = kTexture->numLevels; + +// Create image with mipmap support +vk::ImageCreateInfo imageInfo{ + // ... other parameters ... + .mipLevels = mipLevels, + // ... other parameters ... +}; + +// Copy each mip level +for (uint32_t i = 0; i < mipLevels; i++) { + ktx_size_t offset; + KTX_error_code result = ktxTexture_GetImageOffset(kTexture, i, 0, 0, &offset); + + // ... copy this mip level to the image ... +} +---- + +==== Using Compressed Texture Formats + +KTX2 supports GPU texture compression formats. Here's how to handle them: + +[,c{pp}] +---- +// Determine the Vulkan format from KTX format +vk::Format textureFormat; +switch (kTexture->vkFormat) { + case VK_FORMAT_BC7_SRGB_BLOCK: + textureFormat = vk::Format::eBc7SrgbBlock; + break; + case VK_FORMAT_BC5_UNORM_BLOCK: + textureFormat = vk::Format::eBc5UnormBlock; + break; + // ... other format mappings ... + default: + textureFormat = vk::Format::eR8G8B8A8Srgb; + break; +} +---- + +==== Handling Cubemaps and Texture Arrays + +KTX2 can store cubemaps and texture arrays: + +[,c{pp}] +---- +// Check if the texture is a cubemap +bool isCubemap = kTexture->isCubemap; + +// Get the number of layers +uint32_t layerCount = kTexture->numLayers; + +// Create appropriate image +vk::ImageCreateInfo imageInfo{ + // ... other parameters ... + .imageType = vk::ImageType::e2D, + .arrayLayers = layerCount, + .flags = isCubemap ? vk::ImageCreateFlagBits::eCubeCompatible : vk::ImageCreateFlags(), + // ... other parameters ... +}; +---- + +== Converting Assets to glTF and KTX2 + +=== Converting OBJ to glTF + +To convert existing OBJ files to glTF, you can use various tools: + +* **Blender**: Open the OBJ file and export as glTF +* **obj2gltf**: A command-line tool for converting OBJ to glTF +* **assimp**: A library that can convert between various 3D formats + +Example using obj2gltf: + +[,bash] +---- +obj2gltf -i model.obj -o model.gltf +---- + +== Working with KTX2 Files + +=== Creating KTX2 Files + +There are several ways to create KTX2 files: + +==== Using the KTX-Software Tools + +The KTX-Software package provides command-line tools for creating KTX2 files: + +* **toktx**: The primary tool for creating KTX2 files from existing images + +Basic usage: + +[,bash] +---- +# Create a basic KTX2 file +toktx texture.ktx2 texture.png + +# Create a KTX2 file with mipmaps +toktx --mipmap texture.ktx2 texture.png + +# Create a KTX2 file with Basis Universal compression +toktx --bcmp texture.ktx2 texture.png + +# Create a KTX2 file with specific GPU compression format (BC7) +toktx --bcmp --format BC7_RGBA texture.ktx2 texture.png + +# Create a cubemap KTX2 file +toktx --cubemap cubemap.ktx2 posx.png negx.png posy.png negy.png posz.png negz.png +---- + +==== Using the KTX Library API + +You can also create KTX2 files programmatically using the KTX library API: + +[,c{pp}] +---- +#include + +// Create a new KTX2 texture +ktxTexture2* texture; +ktxTextureCreateInfo createInfo = { + .vkFormat = VK_FORMAT_R8G8B8A8_SRGB, + .baseWidth = 512, + .baseHeight = 512, + .baseDepth = 1, + .numDimensions = 2, + .numLevels = 1, + .numLayers = 1, + .numFaces = 1, + .isArray = KTX_FALSE, + .generateMipmaps = KTX_FALSE +}; + +KTX_error_code result = ktxTexture2_Create(&createInfo, KTX_TEXTURE_CREATE_ALLOC_STORAGE, &texture); + +// Set image data +uint32_t* imageData = new uint32_t[512 * 512]; +// ... fill image data ... +ktxTexture_SetImageFromMemory(ktxTexture(texture), 0, 0, 0, imageData, 512 * 512 * 4); + +// Write to file +ktxTexture_WriteToNamedFile(ktxTexture(texture), "output.ktx2"); + +// Clean up +ktxTexture_Destroy(ktxTexture(texture)); +delete[] imageData; +---- + +==== Using Image Editing Software + +Some image editing and 3D modeling software can export directly to KTX2: + +* **Substance Designer**: Can export textures directly to KTX2 format +* **Blender**: With plugins, can export textures to KTX2 +* **GIMP**: With the KTX plugin, can save images as KTX2 + +=== Converting from Other Formats to KTX2 + +KTX2 files can be created from various popular image formats: + +==== From PNG/JPEG/TIFF + +The simplest conversion is from standard image formats using toktx: + +[,bash] +---- +# Convert PNG to KTX2 +toktx texture.ktx2 texture.png + +# Convert JPEG to KTX2 +toktx texture.ktx2 texture.jpg + +# Convert TIFF to KTX2 +toktx texture.ktx2 texture.tiff +---- + +==== From DDS (DirectX Texture Format) + +DDS is another GPU-optimized texture format commonly used with DirectX: + +[,bash] +---- +# Using texconv to convert DDS to PNG first +texconv -ft png texture.dds + +# Then convert PNG to KTX2 +toktx texture.ktx2 texture.png +---- + +Alternatively, you can use the Khronos Texture Tools: + +[,bash] +---- +ktx2ktx2 --convert texture.dds texture.ktx2 +---- + +==== From HDR/EXR (High Dynamic Range Formats) + +For HDR textures: + +[,bash] +---- +# Convert HDR to KTX2 +toktx --hdr texture.ktx2 texture.hdr + +# Convert EXR to KTX2 (may require intermediate conversion) +toktx --hdr texture.ktx2 texture.exr +---- + +==== From PSD (Photoshop) + +For Photoshop files: + +[,bash] +---- +# Export PSD as PNG first +# Then convert to KTX2 +toktx texture.ktx2 texture.png +---- + +=== Optimizing KTX2 Files + +To get the most out of KTX2 files, consider these optimization techniques: + +==== Compression Options + +KTX2 supports various compression methods: + +[,bash] +---- +# Basis Universal compression (highly portable) +toktx --bcmp texture.ktx2 texture.png + +# ASTC compression (good for mobile) +toktx --format ASTC_4x4_RGBA texture.ktx2 texture.png + +# BC7 compression (good for desktop) +toktx --format BC7_RGBA texture.ktx2 texture.png + +# ETC2 compression (good for Android) +toktx --format ETC2_RGBA texture.ktx2 texture.png +---- + +==== Mipmap Generation + +Mipmaps improve rendering performance and quality: + +[,bash] +---- +# Generate mipmaps +toktx --mipmap texture.ktx2 texture.png + +# Generate mipmaps with specific filter +toktx --mipmap --filter lanczos texture.ktx2 texture.png +---- + +==== Metadata + +KTX2 files can include metadata: + +[,bash] +---- +# Add key-value metadata +toktx --mipmap --key "author" --value "Your Name" texture.ktx2 texture.png +---- + +=== Tools for Working with KTX2 Files + +Several tools are available for working with KTX2 files: + +==== Command-line Tools + +* **KTX-Software Suite**: + * `toktx`: Create KTX2 files + * `ktx2ktx2`: Convert between KTX versions + * `ktxinfo`: Display information about KTX files + * `ktxsc`: Apply supercompression to KTX2 files + * `ktxunpack`: Unpack a KTX file to individual images + +==== Libraries and SDKs + +* **KTX-Software Library**: C/C++ library for reading, writing, and processing KTX files +* **libktx**: The core library used by KTX-Software +* **Basis Universal**: Compression technology used in KTX2 +* **Vulkan SDK**: Includes KTX tools and libraries +* **glTF-Transform**: JavaScript library that can process KTX2 textures in glTF files + +==== Viewers and Debuggers + +* **KTX Load Test**: Part of KTX-Software, for viewing KTX files +* **RenderDoc**: Graphics debugger that can inspect KTX2 textures +* **Khronos Texture Tools**: Includes viewers for KTX files +* **glTF Viewer**: Many glTF viewers support KTX2 textures + +==== Integration with Game Engines + +* **Unity**: Supports KTX2 through plugins +* **Unreal Engine**: Supports KTX2 through plugins +* **Godot**: Has KTX2 support in development +* **Three.js**: Supports KTX2 textures +* **Babylon.js**: Supports KTX2 textures + +=== Converting Images to KTX2 + +To convert existing image files to KTX2, you can use: + +* **toktx**: A command-line tool included with the KTX-Software package +* **KTX-Software**: A library with tools for creating and manipulating KTX files + +Example using toktx to create a KTX2 file with Basis Universal compression: + +[,bash] +---- +toktx --bcmp texture.ktx2 texture.png +---- + +== Conclusion + +Migrating from OBJ/PNG to glTF/KTX2 brings significant benefits for modern graphics applications: + +* **Better performance**: Optimized formats for GPU usage +* **More features**: Support for advanced 3D features and texture formats +* **Industry standards**: Formats designed specifically for modern graphics APIs + +While the migration requires some code changes, the benefits in terms of performance, features, and future-proofing make it worthwhile for serious graphics applications. + +link:/attachments/35_gltf_ktx.cpp[C{pp} code] diff --git a/en/16_Multiple_Objects.adoc b/en/16_Multiple_Objects.adoc new file mode 100644 index 00000000..4bc0f590 --- /dev/null +++ b/en/16_Multiple_Objects.adoc @@ -0,0 +1,328 @@ +:pp: {plus}{plus} + += Rendering Multiple Objects + +== Introduction + +In this chapter, we'll extend our Vulkan application to render multiple objects in the scene. So far, we've been rendering a single model, but real-world applications typically need to display many objects. This tutorial will show you how to efficiently manage and render multiple objects while reusing as many resources as possible. + +== Overview + +When rendering multiple objects, we need to consider which resources should be: +1. *Shared across all objects* - to minimize memory usage and state changes +2. *Duplicated for each object* - to allow for independent positioning and appearance + +Here's a quick reference for what typically falls into each category: + +*Shared resources:* + +* Vertex and index buffers (when objects use the same mesh) +* Textures and samplers (when objects use the same textures) +* Pipeline objects and pipeline layouts +* Render passes +* Command pools + +*Per-object resources:* + +* Transformation matrices (position, rotation, scale) +* Uniform buffers containing those matrices +* Descriptor sets that reference those uniform buffers +* Push constants (for small, frequently changing data) + +== Implementation + +Let's walk through the key changes needed to render multiple objects: + +=== Define a GameObject Structure + +First, we'll create a structure to hold per-object data: + +[,c{pp}] +---- +// Define a structure to hold per-object data +struct GameObject { + // Transform properties + glm::vec3 position = {0.0f, 0.0f, 0.0f}; + glm::vec3 rotation = {0.0f, 0.0f, 0.0f}; + glm::vec3 scale = {1.0f, 1.0f, 1.0f}; + + // Uniform buffer for this object (one per frame in flight) + std::vector uniformBuffers; + std::vector uniformBuffersMemory; + std::vector uniformBuffersMapped; + + // Descriptor sets for this object (one per frame in flight) + std::vector descriptorSets; + + // Calculate model matrix based on position, rotation, and scale + glm::mat4 getModelMatrix() const { + glm::mat4 model = glm::mat4(1.0f); + model = glm::translate(model, position); + model = glm::rotate(model, rotation.x, glm::vec3(1.0f, 0.0f, 0.0f)); + model = glm::rotate(model, rotation.y, glm::vec3(0.0f, 1.0f, 0.0f)); + model = glm::rotate(model, rotation.z, glm::vec3(0.0f, 0.0f, 1.0f)); + model = glm::scale(model, scale); + return model; + } +}; +---- + +This structure encapsulates: +* The object's transform (position, rotation, scale) +* Per-object uniform buffers (one for each frame in flight) +* Per-object descriptor sets (one for each frame in flight) +* A helper method to calculate the model matrix + +=== Create an Array of GameObjects + +In our application class, we'll replace the single set of uniform buffers and descriptor sets with an array of GameObjects: + +[,c{pp}] +---- +// Define the number of objects to render +constexpr int MAX_OBJECTS = 3; + +// In the VulkanApplication class: +// Array of game objects to render +std::array gameObjects; +---- + +=== Initialize the GameObjects + +We'll add a new method to set up our game objects with different positions, rotations, and scales: + +[,c{pp}] +---- +// Initialize the game objects with different positions, rotations, and scales +void setupGameObjects() { + // Object 1 - Center + gameObjects[0].position = {0.0f, 0.0f, 0.0f}; + gameObjects[0].rotation = {0.0f, 0.0f, 0.0f}; + gameObjects[0].scale = {1.0f, 1.0f, 1.0f}; + + // Object 2 - Left + gameObjects[1].position = {-2.0f, 0.0f, -1.0f}; + gameObjects[1].rotation = {0.0f, glm::radians(45.0f), 0.0f}; + gameObjects[1].scale = {0.75f, 0.75f, 0.75f}; + + // Object 3 - Right + gameObjects[2].position = {2.0f, 0.0f, -1.0f}; + gameObjects[2].rotation = {0.0f, glm::radians(-45.0f), 0.0f}; + gameObjects[2].scale = {0.75f, 0.75f, 0.75f}; +} +---- + +This method is called from `initVulkan()` after loading the model but before creating uniform buffers. + +=== Create Uniform Buffers for Each Object + +Instead of creating a single set of uniform buffers, we'll create them for each object: + +[,c{pp}] +---- +// Create uniform buffers for each object +void createUniformBuffers() { + // For each game object + for (auto& gameObject : gameObjects) { + gameObject.uniformBuffers.clear(); + gameObject.uniformBuffersMemory.clear(); + gameObject.uniformBuffersMapped.clear(); + + // Create uniform buffers for each frame in flight + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DeviceSize bufferSize = sizeof(UniformBufferObject); + vk::raii::Buffer buffer({}); + vk::raii::DeviceMemory bufferMem({}); + createBuffer(bufferSize, vk::BufferUsageFlagBits::eUniformBuffer, + vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent, + buffer, bufferMem); + gameObject.uniformBuffers.emplace_back(std::move(buffer)); + gameObject.uniformBuffersMemory.emplace_back(std::move(bufferMem)); + gameObject.uniformBuffersMapped.emplace_back(gameObject.uniformBuffersMemory[i].mapMemory(0, bufferSize)); + } + } +} +---- + +=== Update the Descriptor Pool Size + +We need to increase the descriptor pool size to accommodate all objects: + +[,c{pp}] +---- +void createDescriptorPool() { + // We need MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT descriptor sets + std::array poolSize { + vk::DescriptorPoolSize(vk::DescriptorType::eUniformBuffer, MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT), + vk::DescriptorPoolSize(vk::DescriptorType::eCombinedImageSampler, MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT) + }; + vk::DescriptorPoolCreateInfo poolInfo{ + .flags = vk::DescriptorPoolCreateFlagBits::eFreeDescriptorSet, + .maxSets = MAX_OBJECTS * MAX_FRAMES_IN_FLIGHT, + .poolSizeCount = static_cast(poolSize.size()), + .pPoolSizes = poolSize.data() + }; + descriptorPool = vk::raii::DescriptorPool(device, poolInfo); +} +---- + +=== Create Descriptor Sets for Each Object + +Similarly, we'll create descriptor sets for each object: + +[,c{pp}] +---- +void createDescriptorSets() { + // For each game object + for (auto& gameObject : gameObjects) { + // Create descriptor sets for each frame in flight + std::vector layouts(MAX_FRAMES_IN_FLIGHT, *descriptorSetLayout); + vk::DescriptorSetAllocateInfo allocInfo{ + .descriptorPool = *descriptorPool, + .descriptorSetCount = static_cast(layouts.size()), + .pSetLayouts = layouts.data() + }; + + gameObject.descriptorSets.clear(); + gameObject.descriptorSets = device.allocateDescriptorSets(allocInfo); + + for (size_t i = 0; i < MAX_FRAMES_IN_FLIGHT; i++) { + vk::DescriptorBufferInfo bufferInfo{ + .buffer = *gameObject.uniformBuffers[i], + .offset = 0, + .range = sizeof(UniformBufferObject) + }; + vk::DescriptorImageInfo imageInfo{ + .sampler = *textureSampler, + .imageView = *textureImageView, + .imageLayout = vk::ImageLayout::eShaderReadOnlyOptimal + }; + std::array descriptorWrites{ + vk::WriteDescriptorSet{ + .dstSet = *gameObject.descriptorSets[i], + .dstBinding = 0, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eUniformBuffer, + .pBufferInfo = &bufferInfo + }, + vk::WriteDescriptorSet{ + .dstSet = *gameObject.descriptorSets[i], + .dstBinding = 1, + .dstArrayElement = 0, + .descriptorCount = 1, + .descriptorType = vk::DescriptorType::eCombinedImageSampler, + .pImageInfo = &imageInfo + } + }; + device.updateDescriptorSets(descriptorWrites, {}); + } + } +} +---- + +=== Update Uniform Buffers for All Objects + +We'll modify the uniform buffer update to handle all objects: + +[,c{pp}] +---- +void updateUniformBuffers() { + static auto startTime = std::chrono::high_resolution_clock::now(); + auto currentTime = std::chrono::high_resolution_clock::now(); + float time = std::chrono::duration(currentTime - startTime).count(); + + // Camera and projection matrices (shared by all objects) + glm::mat4 view = glm::lookAt(glm::vec3(2.0f, 2.0f, 6.0f), glm::vec3(0.0f, 0.0f, 0.0f), glm::vec3(0.0f, 1.0f, 0.0f)); + glm::mat4 proj = glm::perspective(glm::radians(45.0f), + static_cast(swapChainExtent.width) / static_cast(swapChainExtent.height), + 0.1f, 20.0f); + proj[1][1] *= -1; // Flip Y for Vulkan + + // Update uniform buffers for each object + for (auto& gameObject : gameObjects) { + // Apply continuous rotation to the object + gameObject.rotation.y += 0.001f; // Slow rotation around Y axis + + // Get the model matrix for this object + glm::mat4 initialRotation = glm::rotate(glm::mat4(1.0f), glm::radians(-90.0f), glm::vec3(1.0f, 0.0f, 0.0f)); + glm::mat4 model = gameObject.getModelMatrix() * initialRotation; + + // Create and update the UBO + UniformBufferObject ubo{ + .model = model, + .view = view, + .proj = proj + }; + + // Copy the UBO data to the mapped memory + memcpy(gameObject.uniformBuffersMapped[currentFrame], &ubo, sizeof(ubo)); + } +} +---- + +Note that we're sharing the view and projection matrices across all objects, but each object has its own model matrix. + +=== Modify the Command Buffer Recording + +Finally, we'll update the command buffer recording to draw each object: + +[,c{pp}] +---- +void recordCommandBuffer(uint32_t imageIndex) { + // ... (beginning of the method remains the same) + + // Bind vertex and index buffers (shared by all objects) + commandBuffers[currentFrame].bindVertexBuffers(0, *vertexBuffer, {0}); + commandBuffers[currentFrame].bindIndexBuffer(*indexBuffer, 0, vk::IndexType::eUint32); + + // Draw each object with its own descriptor set + for (const auto& gameObject : gameObjects) { + // Bind the descriptor set for this object + commandBuffers[currentFrame].bindDescriptorSets( + vk::PipelineBindPoint::eGraphics, + *pipelineLayout, + 0, + *gameObject.descriptorSets[currentFrame], + nullptr + ); + + // Draw the object + commandBuffers[currentFrame].drawIndexed(indices.size(), 1, 0, 0, 0); + } + + // ... (end of the method remains the same) +} +---- + +== Performance Considerations + +When rendering multiple objects, keep these performance considerations in mind: + +1. *Minimize state changes*: Group objects by material/texture to reduce binding changes. +2. *Use instancing* for many identical objects (not covered in this tutorial). +3. *Consider push constants* for small, frequently changing data instead of uniform buffers. +4. *Batch draw calls* where possible to reduce CPU overhead. +5. *Use indirect drawing* for large numbers of objects (not covered here). + +== Conclusion + +You've now learned how to render multiple objects in Vulkan by: + +1. Creating a structure to hold per-object data +2. Duplicating the necessary resources with (uniform buffers, descriptor sets) for each object +3. Sharing resources that can be reused (vertex/index buffers, pipeline, textures) +4. Updating the rendering loop to draw each object with its own transformation + +This approach gives you the flexibility to position, rotate, and scale objects independently while maintaining good performance by sharing resources where appropriate. + +In a real-world application, you might extend this system with: + +* Object hierarchies (parent-child relationships) +* Different meshes and materials for different objects +* Frustum culling to avoid rendering objects outside the camera view +* Level-of-detail systems for objects at different distances + +The foundation you've built here will serve as a solid starting point for these more advanced techniques. + +link:/attachments/36_multiple_objects.cpp[C{pp} code] diff --git a/en/17_Multithreading.adoc b/en/17_Multithreading.adoc new file mode 100644 index 00000000..45a5b957 --- /dev/null +++ b/en/17_Multithreading.adoc @@ -0,0 +1,528 @@ +:pp: {plus}{plus} + += Multithreading with Vulkan + +== Introduction + +In this chapter, we'll explore how to leverage multithreading with Vulkan to improve performance in your applications. Modern CPUs have multiple cores, and efficiently utilizing these cores can significantly enhance your application's performance, especially for computationally intensive tasks. Vulkan's explicit design makes it well-suited for multithreaded architectures, allowing for fine-grained control over synchronization and resource access. + +== Overview + +Vulkan was designed with multithreading in mind, offering several advantages over older APIs: + +1. *Thread-safe command buffer recording*: Multiple threads can record commands to different command buffers simultaneously. +2. *Explicit synchronization*: Vulkan requires explicit synchronization, giving you precise control over resource access across threads. +3. *Queue-based architecture*: Different operations can be submitted to different queues, potentially executing in parallel. + +However, multithreading in Vulkan requires careful consideration of: + +1. *Resource sharing*: Ensuring safe access to shared resources across threads. +2. *Synchronization*: Properly synchronizing operations between threads. +3. *Work distribution*: Effectively distributing work to maximize parallelism. + +In this chapter, we'll implement a multithreaded rendering system that builds upon our previous work with compute shaders. We'll create a particle system where: + +1. One thread handles window events and presentation +2. Multiple worker threads record command buffers for different particle groups +3. A dedicated thread submits work to the GPU + +== Implementation + +Let's walk through the key components needed to implement multithreading in our Vulkan application: + +=== Thread-Safe Resource Management + +First, we need to ensure our resources are accessed safely across threads. We'll use a combination of techniques: + +[,c{pp}] +---- +// Thread-safe resource manager +class ThreadSafeResourceManager { +private: + std::mutex resourceMutex; + // Resources that need thread-safe access + std::vector commandPools; + std::vector commandBuffers; + +public: + // Create a command pool for each worker thread + void createThreadCommandPools(vk::raii::Device& device, uint32_t queueFamilyIndex, uint32_t threadCount) { + std::lock_guard lock(resourceMutex); + + commandPools.clear(); + for (uint32_t i = 0; i < threadCount; i++) { + vk::CommandPoolCreateInfo poolInfo{ + .flags = vk::CommandPoolCreateFlagBits::eResetCommandBuffer, + .queueFamilyIndex = queueFamilyIndex + }; + commandPools.emplace_back(device, poolInfo); + } + } + + // Get a command pool for a specific thread + vk::raii::CommandPool& getCommandPool(uint32_t threadIndex) { + std::lock_guard lock(resourceMutex); + return commandPools[threadIndex]; + } + + // Allocate command buffers for each thread + void allocateCommandBuffers(vk::raii::Device& device, uint32_t threadCount, uint32_t buffersPerThread) { + std::lock_guard lock(resourceMutex); + + commandBuffers.clear(); + for (uint32_t i = 0; i < threadCount; i++) { + vk::CommandBufferAllocateInfo allocInfo{ + .commandPool = *commandPools[i], + .level = vk::CommandBufferLevel::ePrimary, + .commandBufferCount = buffersPerThread + }; + auto threadBuffers = device.allocateCommandBuffers(allocInfo); + for (auto& buffer : threadBuffers) { + commandBuffers.emplace_back(std::move(buffer)); + } + } + } + + // Get a command buffer + vk::raii::CommandBuffer& getCommandBuffer(uint32_t index) { + std::lock_guard lock(resourceMutex); + return commandBuffers[index]; + } +}; +---- + +=== Worker Thread Implementation + +Next, we'll implement worker threads that record command buffers for different particle groups: + +[,c{pp}] +---- +class MultithreadedApplication { +private: + // Thread-related members + uint32_t threadCount; + std::vector workerThreads; + std::atomic shouldExit{false}; + std::vector> threadWorkReady; + std::vector> threadWorkDone; + + // Synchronization primitives + std::mutex queueSubmitMutex; + std::condition_variable workCompleteCv; + + // Resource manager + ThreadSafeResourceManager resourceManager; + + // Particle system data + struct ParticleGroup { + uint32_t startIndex; + uint32_t count; + }; + std::vector particleGroups; + + // ... other Vulkan resources ... + +public: + void initThreads() { + // Determine the number of threads to use (leave one core for the main thread) + threadCount = std::max(1u, std::thread::hardware_concurrency() - 1); + + // Initialize synchronization primitives + threadWorkReady.resize(threadCount); + threadWorkDone.resize(threadCount); + + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkReady[i] = false; + threadWorkDone[i] = true; + } + + // Create command pools for each thread + resourceManager.createThreadCommandPools(device, graphicsQueueFamilyIndex, threadCount); + + // Divide particles into groups, one for each thread + const uint32_t particlesPerThread = PARTICLE_COUNT / threadCount; + particleGroups.resize(threadCount); + + for (uint32_t i = 0; i < threadCount; i++) { + particleGroups[i].startIndex = i * particlesPerThread; + particleGroups[i].count = (i == threadCount - 1) ? + (PARTICLE_COUNT - i * particlesPerThread) : particlesPerThread; + } + + // Start worker threads + for (uint32_t i = 0; i < threadCount; i++) { + workerThreads.emplace_back(&MultithreadedApplication::workerThreadFunc, this, i); + } + } + + void workerThreadFunc(uint32_t threadIndex) { + while (!shouldExit) { + // Wait for work to be ready + if (!threadWorkReady[threadIndex]) { + std::this_thread::yield(); + continue; + } + + // Get the particle group for this thread + const ParticleGroup& group = particleGroups[threadIndex]; + + // Get the command buffer for this thread + vk::raii::CommandBuffer& cmdBuffer = resourceManager.getCommandBuffer(threadIndex); + + // Record commands for this particle group + recordComputeCommandBuffer(cmdBuffer, group.startIndex, group.count); + + // Mark work as done + threadWorkDone[threadIndex] = true; + threadWorkReady[threadIndex] = false; + + // Notify main thread + workCompleteCv.notify_one(); + } + } + + void recordComputeCommandBuffer(vk::raii::CommandBuffer& cmdBuffer, uint32_t startIndex, uint32_t count) { + cmdBuffer.reset(); + cmdBuffer.begin({}); + + // Bind compute pipeline and descriptor sets + cmdBuffer.bindPipeline(vk::PipelineBindPoint::eCompute, *computePipeline); + cmdBuffer.bindDescriptorSets(vk::PipelineBindPoint::eCompute, *computePipelineLayout, 0, {*computeDescriptorSets[currentFrame]}, {}); + + // Add a push constant to specify the particle range for this thread + struct PushConstants { + uint32_t startIndex; + uint32_t count; + } pushConstants{startIndex, count}; + + cmdBuffer.pushConstants(*computePipelineLayout, vk::ShaderStageFlagBits::eCompute, 0, pushConstants); + + // Dispatch compute work + uint32_t groupCount = (count + 255) / 256; + cmdBuffer.dispatch(groupCount, 1, 1); + + cmdBuffer.end(); + } + + void signalThreadsToWork() { + // Signal all threads to start working + for (uint32_t i = 0; i < threadCount; i++) { + threadWorkDone[i] = false; + threadWorkReady[i] = true; + } + } + + void waitForThreadsToComplete() { + // Wait for all threads to complete their work + std::unique_lock lock(queueSubmitMutex); + workCompleteCv.wait(lock, [this]() { + for (uint32_t i = 0; i < threadCount; i++) { + if (!threadWorkDone[i]) { + return false; + } + } + return true; + }); + } + + void cleanup() { + // Signal threads to exit and join them + shouldExit = true; + for (auto& thread : workerThreads) { + if (thread.joinable()) { + thread.join(); + } + } + + // ... cleanup other resources ... + } +}; +---- + +=== Modifying the Compute Shader + +We need to modify our compute shader to work with particle ranges specified by push constants: + +[,c{pp}] +---- +// In the compute shader (31_shader_compute.slang) +[[vk::push_constant]] +struct PushConstants { + uint startIndex; + uint count; +}; + +[[vk::binding(0, 0)]] ConstantBuffer ubo; +[[vk::binding(1, 0)]] RWStructuredBuffer particlesIn; +[[vk::binding(2, 0)]] RWStructuredBuffer particlesOut; +PushConstants pushConstants; + +[numthreads(256,1,1)] +void compMain(uint3 threadId : SV_DispatchThreadID) +{ + uint index = threadId.x; + + // Only process particles within our assigned range + if (index >= pushConstants.count) { + return; + } + + // Adjust index to start from our assigned start index + uint globalIndex = pushConstants.startIndex + index; + + // Process the particle + Particle particle = particlesIn[globalIndex]; + + // Update particle position based on velocity and delta time + particle.position += particle.velocity * ubo.deltaTime; + + // Simple boundary check with velocity inversion + if (abs(particle.position.x) > 1.0) { + particle.velocity.x *= -1.0; + } + if (abs(particle.position.y) > 1.0) { + particle.velocity.y *= -1.0; + } + + // Write the updated particle to the output buffer + particlesOut[globalIndex] = particle; +} +---- + +=== Updating the Main Loop + +Finally, we'll update our main loop to coordinate the worker threads: + +[,c{pp}] +---- +void drawFrame() { + // Wait for the previous frame to finish + while (vk::Result::eTimeout == device.waitForFences(*inFlightFences[currentFrame], vk::True, UINT64_MAX)); + device.resetFences(*inFlightFences[currentFrame]); + + // Acquire the next image + auto [result, imageIndex] = swapChain.acquireNextImage(UINT64_MAX, *imageAvailableSemaphores[currentFrame], nullptr); + + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + return; + } + + // Update uniform buffers + updateUniformBuffer(currentFrame); + + // Signal worker threads to start recording compute command buffers + signalThreadsToWork(); + + // While worker threads are busy, record the graphics command buffer on the main thread + recordGraphicsCommandBuffer(imageIndex); + + // Wait for all worker threads to complete + waitForThreadsToComplete(); + + // Collect command buffers from all threads + std::vector computeCmdBuffers; + for (uint32_t i = 0; i < threadCount; i++) { + computeCmdBuffers.push_back(*resourceManager.getCommandBuffer(i)); + } + + // Submit compute work + vk::SubmitInfo computeSubmitInfo{ + .commandBufferCount = static_cast(computeCmdBuffers.size()), + .pCommandBuffers = computeCmdBuffers.data() + }; + + { + std::lock_guard lock(queueSubmitMutex); + computeQueue.submit(computeSubmitInfo, nullptr); + } + + // Wait for compute to finish before graphics + vk::PipelineStageFlags waitStages[] = {vk::PipelineStageFlagBits::eVertexInput}; + + // Submit graphics work + vk::SubmitInfo graphicsSubmitInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*imageAvailableSemaphores[currentFrame], + .pWaitDstStageMask = waitStages, + .commandBufferCount = 1, + .pCommandBuffers = &*graphicsCommandBuffers[currentFrame], + .signalSemaphoreCount = 1, + .pSignalSemaphores = &*renderFinishedSemaphores[currentFrame] + }; + + { + std::lock_guard lock(queueSubmitMutex); + graphicsQueue.submit(graphicsSubmitInfo, *inFlightFences[currentFrame]); + } + + // Present the image + vk::PresentInfoKHR presentInfo{ + .waitSemaphoreCount = 1, + .pWaitSemaphores = &*renderFinishedSemaphores[currentFrame], + .swapchainCount = 1, + .pSwapchains = &*swapChain, + .pImageIndices = &imageIndex + }; + + result = presentQueue.presentKHR(presentInfo); + + if (result == vk::Result::eErrorOutOfDateKHR || result == vk::Result::eSuboptimalKHR || framebufferResized) { + framebufferResized = false; + recreateSwapChain(); + } else if (result != vk::Result::eSuccess) { + throw std::runtime_error("failed to present swap chain image!"); + } + + currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; +} +---- + +== Advanced Multithreading Techniques + +Beyond the basic implementation above, there are several advanced techniques you can use to further optimize your multithreaded Vulkan application: + +=== Secondary Command Buffers + +Secondary command buffers can be recorded in parallel and then executed by a primary command buffer: + +[,c{pp}] +---- +// In worker thread: +vk::CommandBufferInheritanceInfo inheritanceInfo{ + .renderPass = *renderPass, + .subpass = 0, + .framebuffer = *framebuffers[imageIndex] +}; + +vk::CommandBufferBeginInfo beginInfo{ + .flags = vk::CommandBufferUsageFlagBits::eRenderPassContinue, + .pInheritanceInfo = &inheritanceInfo +}; + +secondaryCommandBuffer.begin(beginInfo); +// Record rendering commands... +secondaryCommandBuffer.end(); + +// In main thread: +primaryCommandBuffer.begin({}); +primaryCommandBuffer.beginRenderPass(...); +primaryCommandBuffer.executeCommands(secondaryCommandBuffers); +primaryCommandBuffer.endRenderPass(); +primaryCommandBuffer.end(); +---- + +=== Thread Pool for Dynamic Work Distribution + +Instead of assigning fixed work to each thread, you can use a thread pool to dynamically distribute work: + +[,c{pp}] +---- +class ThreadPool { +private: + std::vector workers; + std::queue> tasks; + std::mutex queueMutex; + std::condition_variable condition; + bool stop; + +public: + ThreadPool(size_t threads) : stop(false) { + for (size_t i = 0; i < threads; ++i) { + workers.emplace_back([this] { + while (true) { + std::function task; + { + std::unique_lock lock(queueMutex); + condition.wait(lock, [this] { return stop || !tasks.empty(); }); + if (stop && tasks.empty()) { + return; + } + task = std::move(tasks.front()); + tasks.pop(); + } + task(); + } + }); + } + } + + template + void enqueue(F&& f) { + { + std::unique_lock lock(queueMutex); + tasks.emplace(std::forward(f)); + } + condition.notify_one(); + } + + ~ThreadPool() { + { + std::unique_lock lock(queueMutex); + stop = true; + } + condition.notify_all(); + for (std::thread& worker : workers) { + worker.join(); + } + } +}; +---- + +=== Asynchronous Resource Loading + +You can use multithreading to load resources asynchronously: + +[,c{pp}] +---- +std::future loadTextureAsync(const std::string& filename) { + return std::async(std::launch::async, [filename]() { + TextureData data; + // Load texture data from file + return data; + }); +} + +// Later in your code: +auto textureDataFuture = loadTextureAsync("texture.ktx"); +// Do other work... +TextureData textureData = textureDataFuture.get(); // Wait for completion if needed +// Create Vulkan texture from the loaded data +---- + +== Performance Considerations + +When implementing multithreading in Vulkan, keep these performance considerations in mind: + +1. *Thread Creation Overhead*: Creating threads has overhead, so create them once at startup rather than per-frame. +2. *Work Granularity*: Ensure each thread has enough work to justify the threading overhead. +3. *False Sharing*: Be aware of cache line contention when multiple threads access adjacent memory. +4. *Queue Submissions*: Queue submissions should be synchronized to avoid race conditions. +5. *Memory Barriers*: Use memory barriers correctly to ensure visibility of memory operations across threads. +6. *Command Pool Per Thread*: Each thread should have its own command pool to avoid synchronization overhead. +7. *Measure Performance*: Always measure to ensure your multithreading actually improves performance. + +== Debugging Multithreaded Vulkan Applications + +Debugging multithreaded applications can be challenging. Here are some tips: + +1. *Validation Layers*: Enable Vulkan validation layers to catch synchronization issues. +2. *Thread Sanitizers*: Use tools like ThreadSanitizer to detect data races. +3. *Logging*: Implement thread-safe logging to track execution flow. +4. *Simplify*: Start with a simpler threading model and gradually add complexity. +5. *Atomic Operations*: Use atomic operations for thread-safe counters and flags. + +== Conclusion + +In this chapter, we've explored how to leverage multithreading with Vulkan to improve performance. We've implemented a multithreaded particle system where: + +1. Multiple worker threads record command buffers in parallel +2. The main thread coordinates work and handles presentation +3. Proper synchronization ensures thread safety + +By distributing work across multiple CPU cores, we can significantly improve performance, especially for computationally intensive applications. Vulkan's explicit design makes it well-suited for multithreaded architectures, allowing for fine-grained control over synchronization and resource access. + +As you continue to develop your Vulkan applications, consider how multithreading can help you leverage the full power of modern CPUs, and remember to always measure performance to ensure your threading model is actually beneficial for your specific use case. + +link:/attachments/37_multithreading.cpp[C{pp} code] diff --git a/scripts/install_dependencies_linux.sh b/scripts/install_dependencies_linux.sh index 0c532604..ab1a2c55 100755 --- a/scripts/install_dependencies_linux.sh +++ b/scripts/install_dependencies_linux.sh @@ -37,6 +37,12 @@ case $PACKAGE_MANAGER in echo "Installing stb..." sudo apt-get install -y libstb-dev || echo "stb not found in apt, will need to be installed manually or via CMake FetchContent" + echo "Installing tinygltf..." + sudo apt-get install -y libtinygltf-dev || echo "tinygltf not found in apt, will need to be installed manually or via CMake FetchContent" + + echo "Installing nlohmann-json..." + sudo apt-get install -y nlohmann-json3-dev || echo "nlohmann-json not found in apt, will need to be installed manually or via CMake FetchContent" + echo "Installing X Window System dependencies..." sudo apt-get install -y libxxf86vm-dev libxi-dev ;; @@ -54,6 +60,12 @@ case $PACKAGE_MANAGER in echo "Installing tinyobjloader..." sudo dnf install -y tinyobjloader-devel || echo "tinyobjloader not found in dnf, will need to be installed manually or via CMake FetchContent" + echo "Installing tinygltf..." + sudo dnf install -y tinygltf-devel || echo "tinygltf not found in dnf, will need to be installed manually or via CMake FetchContent" + + echo "Installing nlohmann-json..." + sudo dnf install -y nlohmann-json-devel || echo "nlohmann-json not found in dnf, will need to be installed manually or via CMake FetchContent" + echo "Installing X Window System dependencies..." sudo dnf install -y libXxf86vm-devel libXi-devel ;; @@ -70,6 +82,12 @@ case $PACKAGE_MANAGER in echo "Installing tinyobjloader..." sudo pacman -S --needed tinyobjloader || echo "tinyobjloader not found in pacman, will need to be installed manually or via CMake FetchContent" + + echo "Installing tinygltf..." + sudo pacman -S --needed tinygltf || echo "tinygltf not found in pacman, will need to be installed manually or via CMake FetchContent" + + echo "Installing nlohmann-json..." + sudo pacman -S --needed nlohmann-json || echo "nlohmann-json not found in pacman, will need to be installed manually or via CMake FetchContent" ;; *) echo "Unsupported package manager. Please install the following packages manually:" @@ -80,6 +98,8 @@ case $PACKAGE_MANAGER in echo "- libglm-dev or equivalent" echo "- libtinyobjloader-dev or equivalent" echo "- libstb-dev or equivalent" + echo "- libtinygltf-dev or equivalent" + echo "- nlohmann-json3-dev or equivalent" echo "- libxxf86vm-dev and libxi-dev or equivalent" exit 1 ;; diff --git a/scripts/install_dependencies_windows.bat b/scripts/install_dependencies_windows.bat index 4527c0b0..bd8ff37c 100644 --- a/scripts/install_dependencies_windows.bat +++ b/scripts/install_dependencies_windows.bat @@ -14,18 +14,16 @@ if %ERRORLEVEL% neq 0 ( exit /b 1 ) -:: Install dependencies using vcpkg -echo Installing GLFW... -vcpkg install glfw3:x64-windows +:: Enable binary caching for vcpkg +echo Enabling binary caching for vcpkg... +set VCPKG_BINARY_SOURCES=clear;files,%TEMP%\vcpkg-cache,readwrite -echo Installing GLM... -vcpkg install glm:x64-windows +:: Create cache directory if it doesn't exist +if not exist %TEMP%\vcpkg-cache mkdir %TEMP%\vcpkg-cache -echo Installing tinyobjloader... -vcpkg install tinyobjloader:x64-windows - -echo Installing stb... -vcpkg install stb:x64-windows +:: Install all dependencies at once using vcpkg with parallel installation +echo Installing all dependencies... +vcpkg install --triplet=x64-windows --x-manifest-root=%~dp0\.. --feature-flags=binarycaching,manifests --x-install-root=%VCPKG_INSTALLATION_ROOT%/installed :: Remind about Vulkan SDK echo. diff --git a/scripts/vcpkg.json b/scripts/vcpkg.json new file mode 100644 index 00000000..0a90e24d --- /dev/null +++ b/scripts/vcpkg.json @@ -0,0 +1,13 @@ +{ + "name": "vulkan-tutorial", + "version": "1.0.0", + "dependencies": [ + "glfw3", + "glm", + "tinyobjloader", + "stb", + "tinygltf", + "nlohmann-json", + "ktx" + ] +} diff --git a/vcpkg.json b/vcpkg.json new file mode 100644 index 00000000..0a90e24d --- /dev/null +++ b/vcpkg.json @@ -0,0 +1,13 @@ +{ + "name": "vulkan-tutorial", + "version": "1.0.0", + "dependencies": [ + "glfw3", + "glm", + "tinyobjloader", + "stb", + "tinygltf", + "nlohmann-json", + "ktx" + ] +}